source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_bugs.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009, 2020, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License, version 2.0, as
# published by the Free Software Foundation.
#
# This program is also distributed with certain software (including
# but not limited to OpenSSL) that is licensed under separate terms,
# as designated in a particular file or component or in included license
# documentation. The authors of MySQL hereby grant you an
# additional permission to link the program and your derivative works
# with the separately licensed software that they have included with
# MySQL.
#
# Without limiting anything contained in the foregoing, this file,
# which is part of MySQL Connector/Python, is also subject to the
# Universal FOSS Exception, version 1.0, a copy of which can be found at
# http://oss.oracle.com/licenses/universal-foss-exception.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License, version 2.0, for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""Test module for bugs
Bug test cases specific to a particular Python (major) version are loaded
from py2.bugs or py3.bugs.
This module was originally located in python2/tests and python3/tests. It
should contain bug test cases which work for both Python v2 and v3.
Whenever a bug is bout to a specific Python version, put the test cases
in tests/py2/bugs.py or tests/py3/bugs.py. It might be that these files need
to be created first.
"""
import io
import os
import gc
import tempfile
from datetime import date, datetime, timedelta, time
from threading import Thread
import traceback
import time
import unittest
import pickle
import sys
import tests
if tests.SSL_AVAILABLE:
import ssl
from tests import foreach_cnx, cnx_config
from . import PY2
from . import check_tls_versions_support
from mysql.connector import (connection, cursor, conversion, protocol,
errors, constants, pooling)
from mysql.connector.optionfiles import read_option_files
from mysql.connector.catch23 import STRING_TYPES
import mysql.connector
import cpy_distutils
try:
from mysql.connector.connection_cext import (CMySQLConnection,
MySQLInterfaceError)
except ImportError:
# Test without C Extension
CMySQLConnection = None
MySQLInterfaceError = None
ERR_NO_CEXT = "C Extension not available"
class Bug437972Tests(tests.MySQLConnectorTests):
def test_windows_tcp_connection(self):
"""lp:437972 TCP connection to Windows"""
if os.name != 'nt':
pass
cnx = None
try:
cnx = connection.MySQLConnection(**tests.get_mysql_config())
except errors.InterfaceError:
self.fail()
if cnx:
cnx.close()
class Bug441430Tests(tests.MySQLConnectorTests):
@foreach_cnx()
def test_execute_return(self):
"""lp:441430 cursor.execute*() should return the cursor.rowcount"""
cur = self.cnx.cursor()
tbl = "buglp44130"
cur.execute("DROP TABLE IF EXISTS %s" % tbl)
cur.execute("CREATE TABLE %s (id INT)" % tbl)
cur.execute("INSERT INTO %s VALUES (%%s),(%%s)" % tbl, (1, 2,))
self.assertEqual(2, cur.rowcount)
stmt = "INSERT INTO %s VALUES (%%s)" % tbl
res = cur.executemany(stmt, [(3,), (4,), (5,), (6,), (7,), (8,)])
self.assertEqual(6, cur.rowcount)
res = cur.execute("UPDATE %s SET id = id + %%s" % tbl, (10,))
self.assertEqual(8, cur.rowcount)
cur.execute("DROP TABLE IF EXISTS {0}".format(tbl))
cur.close()
self.cnx.close()
class Bug454782(tests.MySQLConnectorTests):
"""lp:454782 fetchone() does not follow pep-0249"""
@foreach_cnx()
def test_fetch_retun_values(self):
cur = self.cnx.cursor()
self.assertEqual(None, cur.fetchone())
self.assertEqual([], cur.fetchmany())
self.assertRaises(errors.InterfaceError, cur.fetchall)
cur.close()
self.cnx.close()
class Bug454790(tests.MySQLConnectorTests):
"""lp:454790 pyformat / other named parameters broken"""
@foreach_cnx()
def test_pyformat(self):
cur = self.cnx.cursor()
data = {'name': 'Geert', 'year': 1977}
cur.execute("SELECT %(name)s,%(year)s", data)
self.assertEqual(('Geert', 1977), cur.fetchone())
data = [
{'name': 'Geert', 'year': 1977},
{'name': 'Marta', 'year': 1980}
]
cur.executemany("SELECT %(name)s,%(year)s", data)
self.assertEqual(2, cur.rowcount)
cur.close()
self.cnx.close()
class Bug480360(tests.MySQLConnectorTests):
"""lp:480360: fetchall() should return [] when no result"""
@foreach_cnx()
def test_fetchall(self):
cur = self.cnx.cursor()
# Trick to get empty result not needing any table
cur.execute("SELECT * FROM (SELECT 1) AS t WHERE 0 = 1")
self.assertEqual([], cur.fetchall())
cur.close()
self.cnx.close()
@unittest.skipIf(tests.MYSQL_VERSION >= (5, 6, 6),
"Bug380528 not tested with MySQL version >= 5.6.6")
class Bug380528(tests.MySQLConnectorTests):
"""lp:380528: we do not support old passwords"""
@foreach_cnx()
def test_old_password(self):
cur = self.cnx.cursor()
if self.config['unix_socket'] and os.name != 'nt':
user = "'myconnpy'@'localhost'"
else:
user = "'myconnpy'@'%s'" % (config['host'])
try:
cur.execute("GRANT SELECT ON %s.* TO %s" %
(self.config['database'], user))
cur.execute("SET PASSWORD FOR %s = OLD_PASSWORD('fubar')" % (user))
except:
self.fail("Failed executing grant.")
cur.close()
# Test using the newly created user
test_config = self.config.copy()
test_config['user'] = 'myconnpy'
test_config['password'] = 'fubar'
self.assertRaises(errors.NotSupportedError,
self.cnx.__class__, **test_config)
self.cnx = self.cnx.__class__(**self.config)
cur = self.cnx.cursor()
try:
cur.execute("REVOKE SELECT ON %s.* FROM %s" %
(self.config['database'], user))
cur.execute("DROP USER %s" % (user))
except mysql.connector.Error as exc:
self.fail("Failed cleaning up user {0}: {1}".format(user, exc))
cur.close()
class Bug499362(tests.MySQLConnectorTests):
"""lp:499362 Setting character set at connection fails"""
@cnx_config(charset='latin1')
@foreach_cnx()
def test_charset(self):
cur = self.cnx.cursor()
ver = self.cnx.get_server_version()
varlst = ['character_set_client', 'character_set_connection',
'character_set_results']
if ver < (5, 1, 12):
exp1 = [('character_set_client', 'latin1'),
('character_set_connection', 'latin1'),
('character_set_database', 'utf8'),
('character_set_filesystem', 'binary'),
('character_set_results', 'latin1'),
('character_set_server', 'utf8'),
('character_set_system', 'utf8')]
exp2 = [('character_set_client', 'latin2'),
('character_set_connection', 'latin2'),
('character_set_database', 'utf8'),
('character_set_filesystem', 'binary'),
('character_set_results', 'latin2'),
('character_set_server', 'utf8'),
('character_set_system', 'utf8')]
varlst = []
stmt = r"SHOW SESSION VARIABLES LIKE 'character\_set\_%%'"
exp1 = [('CHARACTER_SET_CONNECTION', 'latin1'),
('CHARACTER_SET_CLIENT', 'latin1'),
('CHARACTER_SET_RESULTS', 'latin1')]
exp2 = [('CHARACTER_SET_CONNECTION', 'latin2'),
('CHARACTER_SET_CLIENT', 'latin2'),
('CHARACTER_SET_RESULTS', 'latin2')]
elif ver >= (5, 7, 6):
# INFORMATION_SCHEMA is deprecated
exp1 = [('character_set_client', 'latin1'),
('character_set_connection', 'latin1'),
('character_set_results', 'latin1')]
exp2 = [('character_set_client', 'latin2'),
('character_set_connection', 'latin2'),
('character_set_results', 'latin2')]
stmt = ("SELECT * FROM performance_schema.session_variables "
"WHERE VARIABLE_NAME IN (%s,%s,%s)")
else:
exp1 = [('CHARACTER_SET_CONNECTION', 'latin1'),
('CHARACTER_SET_CLIENT', 'latin1'),
('CHARACTER_SET_RESULTS', 'latin1')]
exp2 = [('CHARACTER_SET_CONNECTION', 'latin2'),
('CHARACTER_SET_CLIENT', 'latin2'),
('CHARACTER_SET_RESULTS', 'latin2')]
stmt = ("SELECT * FROM INFORMATION_SCHEMA.SESSION_VARIABLES "
"WHERE VARIABLE_NAME IN (%s,%s,%s)")
cur.execute(stmt, varlst)
res1 = cur.fetchall()
self.cnx.set_charset_collation('latin2')
cur.execute(stmt, varlst)
res2 = cur.fetchall()
cur.close()
self.cnx.close()
self.assertTrue(tests.cmp_result(exp1, res1))
self.assertTrue(tests.cmp_result(exp2, res2))
class Bug501290(tests.MySQLConnectorTests):
"""lp:501290 Client flags are set to None when connecting"""
def _setup(self):
self.capabilities = self.cnx._handshake['capabilities']
self.default_flags = constants.ClientFlag.get_default()
if self.capabilities & constants.ClientFlag.PLUGIN_AUTH:
self.default_flags |= constants.ClientFlag.PLUGIN_AUTH
@foreach_cnx()
def test_default(self):
self._setup()
flags = constants.ClientFlag.default
for flag in flags:
self.assertTrue(self.cnx._client_flags & flag)
@foreach_cnx()
def test_set_unset(self):
self._setup()
orig = self.cnx._client_flags
exp = self.default_flags | constants.ClientFlag.COMPRESS
if tests.MYSQL_VERSION < (5, 7):
exp = exp & ~constants.ClientFlag.CONNECT_ARGS
self.cnx.set_client_flags([constants.ClientFlag.COMPRESS])
for flag in constants.ClientFlag.default:
self.assertTrue(self.cnx._client_flags & flag)
self.cnx.set_client_flags([-constants.ClientFlag.COMPRESS])
self.assertEqual(self.cnx._client_flags, orig)
@foreach_cnx()
def test_isset_client_flag(self):
self._setup()
flag = constants.ClientFlag.COMPRESS
data = self.default_flags | flag
self.cnx._client_flags = data
self.assertEqual(True, self.cnx.isset_client_flag(flag))
class Bug507466(tests.MySQLConnectorTests):
"""lp:507466 BIT values are not converted correctly to Python"""
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
try:
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS myconnpy_bits")
except:
pass
cnx.close()
@foreach_cnx()
def test_bits(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS myconnpy_bits")
cur.execute("""CREATE TABLE `myconnpy_bits` (
`id` int NOT NULL AUTO_INCREMENT,
`c1` bit(8) DEFAULT NULL,
`c2` bit(16) DEFAULT NULL,
`c3` bit(24) DEFAULT NULL,
`c4` bit(32) DEFAULT NULL,
`c5` bit(40) DEFAULT NULL,
`c6` bit(48) DEFAULT NULL,
`c7` bit(56) DEFAULT NULL,
`c8` bit(64) DEFAULT NULL,
PRIMARY KEY (id)
)
""")
insert = """insert into myconnpy_bits (c1,c2,c3,c4,c5,c6,c7,c8)
values (%s,%s,%s,%s,%s,%s,%s,%s)"""
select = "SELECT c1,c2,c3,c4,c5,c6,c7,c8 FROM myconnpy_bits ORDER BY id"
data = []
data.append((0, 0, 0, 0, 0, 0, 0, 0))
data.append((
1 << 7, 1 << 15, 1 << 23, 1 << 31,
1 << 39, 1 << 47, 1 << 55, (1 << 63)-1,
))
cur.executemany(insert, data)
cur.execute(select)
rows = cur.fetchall()
self.assertEqual(rows, data)
self.cnx.close()
class Bug519301(tests.MySQLConnectorTests):
"""lp:519301 Temporary connection failures with 2 exceptions"""
@foreach_cnx()
def test_auth(self):
config = self.config.copy()
config.pop('unix_socket')
config['user'] = 'ham'
config['password'] = 'spam'
for _ in range(1, 100):
try:
cnx = self.cnx.__class__(**config)
except errors.ProgrammingError:
pass
except errors.Error as err:
self.fail("Failing authenticating: {0}".format(str(err)))
except:
raise
else:
cnx.close()
class Bug524668(tests.MySQLConnectorTests):
"""lp:524668 Error in server handshake with latest code"""
def test_handshake(self):
handshake = bytearray(
b'\x47\x00\x00\x00\x0a\x35\x2e\x30\x2e\x33\x30\x2d\x65'
b'\x6e\x74\x65\x72\x70\x72\x69\x73\x65\x2d\x67\x70\x6c'
b'\x2d\x6c\x6f'
b'\x67\x00\x09\x01\x00\x00\x68\x34\x69\x36\x6f\x50\x21\x4f\x00'
b'\x2c\xa2\x08\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00'
b'\x00\x00\x4c\x6e\x67\x39\x26\x50\x44\x40\x57\x72\x59\x48\x00'
)
prtcl = protocol.MySQLProtocol()
try:
prtcl.parse_handshake(handshake)
except:
self.fail("Failed handling handshake")
class Bug571201(tests.MySQLConnectorTests):
"""lp:571201 Problem with more than one statement at a time"""
def setUp(self):
self.tbl = 'Bug571201'
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
try:
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
except:
pass
cnx.close()
@foreach_cnx()
def test_multistmts(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.execute(("CREATE TABLE {0} ( "
"id INT AUTO_INCREMENT KEY, "
"c1 INT)").format(self.tbl))
stmts = [
"SELECT * FROM %s" % (self.tbl),
"INSERT INTO %s (c1) VALUES (10),(20)" % (self.tbl),
"SELECT * FROM %s" % (self.tbl),
]
result_iter = cur.execute(';'.join(stmts), multi=True)
self.assertEqual(None, next(result_iter).fetchone())
self.assertEqual(2, next(result_iter).rowcount)
exp = [(1, 10), (2, 20)]
self.assertEqual(exp, next(result_iter).fetchall())
self.assertRaises(StopIteration, next, result_iter)
self.cnx.close()
class Bug551533and586003(tests.MySQLConnectorTests):
"""lp: 551533 as 586003: impossible to retrieve big result sets"""
def setUp(self):
self.tbl = 'Bug551533'
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {table}".format(table=self.tbl))
cnx.close()
@cnx_config(connection_timeout=10)
@foreach_cnx()
def test_select(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {table}".format(table=self.tbl))
cur.execute(
("CREATE TABLE {table} (id INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(100) DEFAULT 'abcabcabcabcabcabcabcabcabcabc') "
"ENGINE=INNODB").format(table=self.tbl)
)
insert = "INSERT INTO {table} (id) VALUES (%s)".format(table=self.tbl)
exp = 20000
cur.executemany(insert, [(None,)] * exp)
self.cnx.commit()
cur.execute(
'SELECT * FROM {table} LIMIT 20000'.format(table=self.tbl))
try:
rows = cur.fetchall()
except errors.Error as err:
self.fail("Failed retrieving big result set: {0}".format(err))
else:
self.assertEqual(exp, cur.rowcount)
self.assertEqual(exp, len(rows))
class Bug675425(tests.MySQLConnectorTests):
"""lp: 675425: Problems with apostrophe"""
def setUp(self):
self.tbl = 'Bug675425'
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_executemany_escape(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.execute("CREATE TABLE {0} (c1 VARCHAR(30),"
" c2 VARCHAR(30))".format(self.tbl))
data = [
("ham", "spam",),
("spam", "ham",),
("ham \\' spam", "spam ' ham",)
]
sql = "INSERT INTO {0} VALUES (%s, %s)".format(self.tbl)
try:
cur.executemany(sql, data)
except Exception as exc:
self.fail(str(exc))
self.cnx.close()
class Bug695514(tests.MySQLConnectorTests):
"""lp: 695514: Infinite recursion when setting connection client_flags"""
@foreach_cnx()
def test_client_flags(self):
try:
config = tests.get_mysql_config()
config['connection_timeout'] = 2
config['client_flags'] = constants.ClientFlag.get_default()
self.cnx = self.cnx.__class__(**config)
except:
self.fail("Failed setting client_flags using integer")
class Bug809033(tests.MySQLConnectorTests):
"""lp: 809033: Lost connection causes infinite loop"""
def setUp(self):
self.table_name = 'Bug809033'
def _setup(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.table_name))
table = (
"CREATE TABLE {table} ("
" id INT UNSIGNED NOT NULL AUTO_INCREMENT,"
" c1 VARCHAR(255) DEFAULT '{default}',"
" PRIMARY KEY (id)"
")"
).format(table=self.table_name, default='a' * 255)
self.cnx.cmd_query(table)
stmt = "INSERT INTO {table} (id) VALUES {values}".format(
table=self.table_name,
values=','.join(['(NULL)'] * 1024)
)
self.cnx.cmd_query(stmt)
def tearDown(self):
try:
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query(
"DROP TABLE IF EXISTS {0}".format(self.table_name))
cnx.close()
except:
pass
@foreach_cnx()
def test_lost_connection(self):
self._setup()
def kill(connection_id):
"""Kill connection using separate connection"""
killer = connection.MySQLConnection(**tests.get_mysql_config())
time.sleep(1)
killer.cmd_query("KILL {0}".format(connection_id))
killer.close()
def sleepy_select(cnx):
"""Execute a SELECT statement which takes a while to complete"""
cur = cnx.cursor()
# Ugly query ahead!
stmt = "SELECT x1.*, x2.* from {table} as x1, {table} as x2".format(
table=self.table_name)
cur.execute(stmt)
# Save the error so we can check in the calling thread
cnx.test_error = None
try:
cur.fetchall()
except errors.Error as err:
cnx.test_error = err
worker = Thread(target=sleepy_select, args=[self.cnx])
killer = Thread(target=kill, args=[self.cnx.connection_id])
worker.start()
killer.start()
worker.join()
killer.join()
self.assertTrue(
isinstance(self.cnx.test_error,
(errors.InterfaceError, errors.OperationalError))
)
self.cnx.close()
class Bug865859(tests.MySQLConnectorTests):
"""lp: 865859: sock.recv fails to return in some cases (infinite wait)"""
def setUp(self):
self.table_name = 'Bug865859'
@cnx_config(connection_timeout=1)
@foreach_cnx()
def test_reassign_connection(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_name))
cur.execute("CREATE TABLE {0} (c1 INT)".format(self.table_name))
cur.execute("INSERT INTO {0} (c1) VALUES (1)".format(self.table_name))
try:
# We create a new cnx, replacing current
self.cnx = self.cnx.__class__(**self.config)
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_name))
except errors.InterfaceError as err:
self.fail(
"Connection was not closed, we got timeout: {0}".format(err))
else:
cur.close()
self.cnx.close()
class BugOra13395083(tests.MySQLConnectorTests):
"""BUG#13395083: Using time zones"""
def setUp(self):
self.table_name = 'BugOra13395083'
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_name))
@cnx_config(time_zone="+00:00")
@foreach_cnx()
def test_time_zone(self):
utc = tests.UTCTimeZone()
testzone = tests.TestTimeZone(+2)
# Store a datetime in UTC into a TIMESTAMP column
now_utc = datetime.utcnow().replace(microsecond=0, tzinfo=utc)
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_name))
cur.execute("CREATE TABLE {0} (c1 TIMESTAMP)".format(self.table_name))
cur.execute(
"INSERT INTO {0} (c1) VALUES (%s)".format(self.table_name),
(now_utc,))
self.cnx.commit()
cur.execute("SELECT c1 FROM {0}".format(self.table_name))
row = cur.fetchone()
self.assertEqual(now_utc, row[0].replace(tzinfo=utc))
self.cnx.time_zone = "+02:00"
cur.execute("SELECT c1 FROM {0}".format(self.table_name))
row = cur.fetchone()
self.assertEqual(now_utc.astimezone(testzone),
row[0].replace(tzinfo=testzone))
self.cnx.close()
class BugOra13392739(tests.MySQLConnectorTests):
"""BUG#13392739: MySQLConnection.ping()"""
@cnx_config(connection_timeout=2, unix_socket=None)
@foreach_cnx()
def test_ping(self):
cnx = self.cnx.__class__()
self.assertRaises(errors.InterfaceError, cnx.ping)
try:
self.cnx.ping()
except Exception as e:
self.fail("Error raised although connection should be "
"available (%s)." % e)
self.cnx.close()
self.assertRaises(errors.InterfaceError, self.cnx.ping)
try:
self.cnx.ping(reconnect=True)
except Exception as e:
self.fail("Error raised although ping should reconnect. (%s)" % e)
# Temper with the host to which we reconnect to simulate the
# MySQL not being available.
self.cnx.disconnect()
self.cnx._host = 'some-unknown-host-somwhere-on.mars'
self.assertRaises(errors.InterfaceError, self.cnx.ping, reconnect=True)
@cnx_config(connection_timeout=2, unix_socket=None)
@foreach_cnx()
def test_reconnect(self):
self.cnx.disconnect()
self.assertRaises(errors.InterfaceError, self.cnx.ping)
try:
self.cnx.reconnect()
except:
self.fail("Errors raised although connection should have been "
"reconnected.")
self.cnx.disconnect()
# Temper with the host to which we reconnect to simulate the
# MySQL not being available.
self.cnx._host = 'some-unknown-host-somwhere-on-mars.example.com'
self.assertRaises(errors.InterfaceError, self.cnx.reconnect)
try:
self.cnx.reconnect(attempts=3)
except errors.InterfaceError as exc:
self.assertTrue('3 attempt(s)' in str(exc))
@unittest.skipIf(sys.version_info < (3, 5), "Objects not collected by GC.")
class BugOra13435186(tests.MySQLConnectorTests):
def setUp(self):
self.sample_size = 100
self.tolerate = 5
self._reset_samples()
self.samples = [0, ] * self.sample_size
gc.collect()
def _reset_samples(self):
self.samples = [0, ] * self.sample_size
def _assert_flat_line(self, samples):
counters = {}
for value in samples:
try:
counters[value] = counters[value] + 1
except KeyError:
counters[value] = 1
if len(counters) > self.tolerate:
self.fail("Counters {} of collected object higher than tolerated."
"".format(len(counters)))
def test_converter(self):
for i in range(0, self.sample_size):
conversion.MySQLConverter()
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
@foreach_cnx()
def test_connection(self):
# Create a connection and close using close()-method
for i in range(0, self.sample_size):
cnx = self.cnx.__class__(**self.config)
cnx.close()
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
self._reset_samples()
# Create a connection and rely on destructor to close
for i in range(0, self.sample_size):
cnx = self.cnx.__class__(**self.config)
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
@foreach_cnx()
def test_cursor(self):
# Create a cursor and close using close()-method
for i in range(0, self.sample_size):
cursor = self.cnx.cursor()
cursor.close()
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
self._reset_samples()
# Create a cursor and rely on destructor to close
for i in range(0, self.sample_size):
cursor = self.cnx.cursor()
self.samples[i] = len(gc.get_objects())
self._assert_flat_line(self.samples)
class BugOra14184643(tests.MySQLConnectorTests):
"""BUG#14184643: cmd_query() disregards waiting results"""
@foreach_cnx()
def test_cmd_query(self):
self.cnx.cmd_query('SELECT 1')
self.assertRaises(errors.InternalError, self.cnx.cmd_query,
'SELECT 2')
@foreach_cnx(connection.MySQLConnection)
def test_get_rows(self):
self.cnx.cmd_query('SELECT 1')
self.cnx.get_rows()
self.assertRaises(errors.InternalError, self.cnx.get_rows)
self.cnx.cmd_query('SELECT 1')
self.cnx.get_row()
self.assertEqual(None, self.cnx.get_row()[0])
self.assertRaises(errors.InternalError, self.cnx.get_row)
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
@foreach_cnx(CMySQLConnection)
def test_get_rows(self):
self.cnx.cmd_query('SELECT 1')
while True:
self.cnx.get_rows()
if not self.cnx.next_result():
break
else:
self.fail("Found multiple results where only 1 was expected")
self.assertRaises(errors.InternalError, self.cnx.get_rows)
@foreach_cnx()
def test_cmd_statistics(self):
self.cnx.cmd_query('SELECT 1')
self.assertRaises(errors.InternalError, self.cnx.cmd_statistics)
self.cnx.get_rows()
class BugOra14208326(tests.MySQLConnectorTests):
"""BUG#14208326: cmd_query() does not handle multiple statements"""
def setUp(self):
self.table = "BugOra14208326"
def _setup(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS %s" % self.table)
self.cnx.cmd_query("CREATE TABLE %s (id INT)" % self.table)
def tearDown(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.table))
@foreach_cnx(connection.MySQLConnection)
def test_cmd_query(self):
self._setup()
self.assertRaises(errors.InterfaceError,
self.cnx.cmd_query, 'SELECT 1; SELECT 2')
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
@foreach_cnx(CMySQLConnection)
def test_cmd_query_iter(self):
self._setup()
stmt = 'SELECT 1; INSERT INTO %s VALUES (1),(2); SELECT 3'
results = []
try:
for result in self.cnx.cmd_query_iter(stmt % self.table):
results.append(result)
if 'columns' in result:
results.append(self.cnx.get_rows())
except NotImplementedError:
# Some cnx are not implementing this
if not isinstance(self.cnx, CMySQLConnection):
raise
class BugOra14201459(tests.MySQLConnectorTests):
"""BUG#14201459: Server error 1426 should raise ProgrammingError"""
def setUp(self):
self.tbl = 'Bug14201459'
def tearDown(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self._setup()
def _setup(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS %s" % (self.tbl))
@foreach_cnx()
def test_error1426(self):
cur = self.cnx.cursor()
self._setup()
create = "CREATE TABLE %s (c1 TIME(7))" % self.tbl
try:
cur.execute(create)
except errors.ProgrammingError as exception:
if tests.MYSQL_VERSION < (5, 6, 4) and exception.errno != 1064:
self.fail("ProgrammingError is not Error 1064")
elif tests.MYSQL_VERSION >= (5, 6, 4) and exception.errno != 1426:
self.fail("ProgrammingError is not Error 1426")
else:
self.fail("ProgrammingError not raised")
class BugOra14231160(tests.MySQLConnectorTests):
"""BUG#14231160: lastrowid, description and rowcount read-only"""
@foreach_cnx()
def test_readonly_properties(self):
cur = self.cnx.cursor()
for attr in ('description', 'rowcount', 'lastrowid'):
try:
setattr(cur, attr, 'spam')
except AttributeError:
# It's readonly, that's OK
pass
else:
self.fail('Need read-only property: {0}'.format(attr))
class BugOra14259954(tests.MySQLConnectorTests):
"""BUG#14259954: ON DUPLICATE KEY UPDATE VALUE FAILS REGEX"""
def setUp(self):
self.tbl = 'Bug14259954'
def _setup(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % (self.tbl))
create = ("CREATE TABLE %s ( "
"`id` int(11) NOT NULL AUTO_INCREMENT, "
"`c1` int(11) NOT NULL DEFAULT '0', "
"PRIMARY KEY (`id`,`c1`))" % (self.tbl))
cur.execute(create)
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
@foreach_cnx()
def test_executemany(self):
self._setup()
cur = self.cnx.cursor()
query = ("INSERT INTO %s (id,c1) VALUES (%%s,%%s) "
"ON DUPLICATE KEY UPDATE c1=VALUES(c1)") % self.tbl
try:
cur.executemany(query, [(1, 1), (2, 2)])
except errors.ProgrammingError as err:
self.fail("Regular expression fails with executemany(): %s" %
err)
class BugOra14548043(tests.MySQLConnectorTests):
"""BUG#14548043: ERROR MESSAGE SHOULD BE IMPROVED TO DIAGNOSE THE PROBLEM
"""
@foreach_cnx()
def test_unix_socket(self):
config = self.config.copy()
config['unix_socket'] = os.path.join(
tempfile.gettempdir(), 'a' * 100 + 'myconnpy_bug14548043.test')
try:
cnx = self.cnx.__class__(**config)
except errors.InterfaceError as exc:
self.assertEqual(2002, exc.errno)
class BugOra14754894(tests.MySQLConnectorTests):
"""
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
self.tbl = 'BugOra14754894'
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.cmd_query("CREATE TABLE {0} (c1 INT)".format(self.tbl))
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS %s" % (self.tbl))
@foreach_cnx()
def test_executemany(self):
self.cnx.cmd_query("TRUNCATE TABLE {0}".format(self.tbl))
cur = self.cnx.cursor()
insert = "INSERT INTO {0} (c1) VALUES (%(c1)s)".format(self.tbl)
data = [{'c1': 1}]
try:
cur.executemany(insert, [{'c1': 1}])
except ValueError as err:
self.fail(err)
cur.execute("SELECT c1 FROM %s" % self.tbl)
self.assertEqual(data[0]['c1'], cur.fetchone()[0])
cur.close()
@unittest.skipIf(not tests.IPV6_AVAILABLE, "IPv6 testing disabled")
class BugOra15876886(tests.MySQLConnectorTests):
"""BUG#15876886: CONNECTOR/PYTHON CAN NOT CONNECT TO MYSQL THROUGH IPV6
"""
@foreach_cnx()
def test_ipv6(self):
config = self.config.copy()
config['host'] = '::1'
config['unix_socket'] = None
try:
cnx = self.cnx.__class__(**config)
except errors.InterfaceError as err:
self.fail("Can not connect using IPv6: {0}".format(str(err)))
else:
cnx.close()
class BugOra15915243(tests.MySQLConnectorTests):
"""BUG#15915243: PING COMMAND ALWAYS RECONNECTS TO THE DATABASE
"""
@foreach_cnx()
def test_ping(self):
cid = self.cnx.connection_id
self.cnx.ping()
# Do not reconnect
self.assertEqual(cid, self.cnx.connection_id)
self.cnx.close()
# Do not reconnect
self.assertRaises(errors.InterfaceError, self.cnx.ping)
# Do reconnect
self.cnx.ping(reconnect=True)
self.assertNotEqual(cid, self.cnx.connection_id)
self.cnx.close()
class BugOra15916486(tests.MySQLConnectorTests):
"""BUG#15916486: RESULTS AFTER STORED PROCEDURE WITH ARGUMENTS ARE NOT KEPT
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP PROCEDURE IF EXISTS sp1")
cur.execute("DROP PROCEDURE IF EXISTS sp2")
sp1 = ("CREATE PROCEDURE sp1(IN pIn INT, OUT pOut INT)"
" BEGIN SELECT 1; SET pOut := pIn; SELECT 2; END")
sp2 = ("CREATE PROCEDURE sp2 ()"
" BEGIN SELECT 1; SELECT 2; END")
cur.execute(sp1)
cur.execute(sp2)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
try:
cur.execute("DROP PROCEDURE IF EXISTS sp1")
cur.execute("DROP PROCEDURE IF EXISTS sp2")
except:
pass # Clean up fail is acceptable for this test
cnx.close()
@foreach_cnx()
def test_callproc_with_args(self):
cur = self.cnx.cursor()
exp = (5, 5)
self.assertEqual(exp, cur.callproc('sp1', (5, 0)))
exp = [[(1,)], [(2,)]]
results = []
for result in cur.stored_results():
results.append(result.fetchall())
self.assertEqual(exp, results)
@foreach_cnx()
def test_callproc_without_args(self):
cur = self.cnx.cursor()
exp = ()
self.assertEqual(exp, cur.callproc('sp2'))
exp = [[(1,)], [(2,)]]
results = []
for result in cur.stored_results():
results.append(result.fetchall())
self.assertEqual(exp, results)
@unittest.skipIf(os.name == 'nt',
"Cannot test error handling when doing handshake on Windows")
@unittest.skipIf(tests.MYSQL_VERSION > (8, 0, 4),
"Revoked users can no more grant")
class BugOra15836979(tests.MySQLConnectorTests):
"""BUG#15836979: UNCLEAR ERROR MESSAGE CONNECTING USING UNALLOWED IP ADDRESS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP USER 'root'@'127.0.0.1'")
try:
cnx.cmd_query("DROP USER 'root'@'::1'")
except errors.DatabaseError:
# Some MySQL servers have no IPv6 entry
pass
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query(
"GRANT ALL PRIVILEGES ON *.* TO 'root'@'127.0.0.1' "
"WITH GRANT OPTION")
cnx.cmd_query(
"GRANT ALL PRIVILEGES ON *.* TO 'root'@'::1' "
"WITH GRANT OPTION")
cnx.close()
@foreach_cnx()
def test_handshake(self):
config = self.config.copy()
config['host'] = '127.0.0.1'
config['unix_socket'] = None
try:
self.cnx.__class__(**config)
except errors.Error as exc:
self.assertTrue(
'Access denied' in str(exc) or 'not allowed' in str(exc),
'Wrong error message, was: {0}'.format(str(exc)))
class BugOra16217743(tests.MySQLConnectorTests):
"""BUG#16217743: CALLPROC FUNCTION WITH STRING PARAMETERS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS bug16217743")
cnx.cmd_query("DROP PROCEDURE IF EXISTS sp_bug16217743")
cnx.cmd_query("CREATE TABLE bug16217743 (c1 VARCHAR(20), c2 INT)")
cnx.cmd_query(
"CREATE PROCEDURE sp_bug16217743 (p1 VARCHAR(20), p2 INT) "
"BEGIN INSERT INTO bug16217743 (c1, c2) "
"VALUES (p1, p2); END;")
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS bug16217743")
cnx.cmd_query("DROP PROCEDURE IF EXISTS sp_bug16217743")
@foreach_cnx()
def test_procedure(self):
exp = ('ham', 42)
cur = self.cnx.cursor()
cur.callproc('sp_bug16217743', ('ham', 42))
cur.execute("SELECT c1, c2 FROM bug16217743")
self.assertEqual(exp, cur.fetchone())
@unittest.skipIf(not tests.SSL_AVAILABLE,
"BugOra16217667 test failed. Python lacks SSL support.")
class BugOra16217667(tests.MySQLConnectorTests):
"""BUG#16217667: PYTHON CONNECTOR 3.2 SSL CONNECTION FAILS
"""
def setUp(self):
config = tests.get_mysql_config()
self.admin_cnx = connection.MySQLConnection(**config)
self.admin_cnx.cmd_query(
"CREATE USER 'ssluser'@'{host}'".format(
db=config['database'], host=tests.get_mysql_config()['host']))
if tests.MYSQL_VERSION < (5, 7, 21):
self.admin_cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}' REQUIRE X509"
"".format(db=config['database'],
host=tests.get_mysql_config()['host']))
else:
self.admin_cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}'"
"".format(db=config['database'],
host=tests.get_mysql_config()['host']))
self.admin_cnx.cmd_query(
"ALTER USER 'ssluser'@'{host}' REQUIRE X509"
"".format(db=config['database'],
host=tests.get_mysql_config()['host']))
def tearDown(self):
self.admin_cnx.cmd_query("DROP USER 'ssluser'@'{0}'".format(
tests.get_mysql_config()['host']))
@foreach_cnx()
def test_sslauth(self):
config = self.config.copy()
config['user'] = 'ssluser'
config['password'] = ''
config['unix_socket']= None
config['ssl_verify_cert'] = True
config.update({
'ssl_ca': os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem')),
'ssl_cert': os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_cert.pem')),
'ssl_key': os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_key.pem')),
})
try:
self.cnx = self.cnx.__class__(**config)
except errors.Error as exc:
self.assertTrue('ssl' in str(exc).lower(), str(exc))
self.cnx.cmd_query("SHOW STATUS LIKE 'Ssl_cipher'")
self.assertTrue(self.cnx.get_rows()[0][0] != '')
@unittest.skipIf(not tests.SSL_AVAILABLE,
"BugOra16316049 test failed. Python lacks SSL support.")
class BugOra16316049(tests.MySQLConnectorTests):
""" SSL ERROR: [SSL: TLSV1_ALERT_UNKNOWN_CA] AFTER FIX 6217667"""
def setUp(self):
config = tests.get_mysql_config()
self.host = config['host']
cnx = connection.MySQLConnection(**config)
if tests.MYSQL_VERSION < (5, 7, 21):
cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}' REQUIRE SSL".format(
db=config['database'], host=tests.get_mysql_config()['host']))
else:
cnx.cmd_query(
"CREATE USER 'ssluser'@'{host}'".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}'".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
cnx.cmd_query(
"ALTER USER 'ssluser'@'{host}' REQUIRE SSL".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP USER 'ssluser'@'{host}'".format(host=self.host))
cnx.close()
@foreach_cnx()
def test_ssl(self):
ssl_ca = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'))
ssl_cert = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_cert.pem'))
ssl_key = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_key.pem'))
config = self.config.copy()
config.update({
'ssl_ca': None,
'ssl_cert': None,
'ssl_key': None,
})
# Use wrong value for ssl_ca
config['user'] = 'ssluser'
config['password'] = ''
config['unix_socket']= None
config['ssl_ca'] = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_casdfasdfdsaa_cert.pem'))
config['ssl_cert'] = ssl_cert
config['ssl_key'] = ssl_key
config['ssl_verify_cert'] = True
# An Exception should be raised
try:
self.cnx.__class__(**config)
except errors.Error as exc:
exc_str = str(exc).lower()
self.assertTrue('ssl' in exc_str or 'no such file' in exc_str)
# Use correct value
config['ssl_ca'] = ssl_ca
config['host'] = 'localhost' # common name must be equal
try:
self.cnx = self.cnx.__class__(**config)
except errors.Error as exc:
if exc.errno == 1045 and ':' not in self.host:
# For IPv4
self.fail("Auth failed:" + str(exc))
if ':' in self.host:
# Special case for IPv6
config['ssl_verify_cert'] = False
config['host'] = self.host
try:
self.cnx = self.cnx.__class__(**config)
except errors.Error as exc:
if exc.errno == 1045 and not tests.IPV6_AVAILABLE:
self.fail("Auth failed:" + str(exc))
self.cnx.cmd_query("SHOW STATUS LIKE 'Ssl_cipher'")
self.assertTrue(self.cnx.get_rows()[0][0] != '')
class BugOra16662920(tests.MySQLConnectorTests):
"""BUG#16662920: FETCHALL() IGNORES NEXT_ROW FOR BUFFERED CURSORS
"""
def setUp(self):
self.tbl = 'BugOra16662920'
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.execute(
"CREATE TABLE {0} (id INT AUTO_INCREMENT, c1 VARCHAR(20), "
"PRIMARY KEY (id)) ENGINE=InnoDB".format(self.tbl)
)
data = [('a',), ('c',), ('e',), ('d',), ('g',), ('f',)]
cur.executemany("INSERT INTO {0} (c1) VALUES (%s)".format(self.tbl),
data)
cur.close()
cnx.commit()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_buffered(self):
cur = self.cnx.cursor(buffered=True)
cur.execute("SELECT * FROM {0} ORDER BY c1".format(self.tbl))
self.assertEqual((1, 'a'), cur.fetchone())
exp = [(2, 'c'), (4, 'd'), (3, 'e')]
self.assertEqual(exp, cur.fetchmany(3))
exp = [(6, 'f'), (5, 'g')]
self.assertEqual(exp, cur.fetchall())
cur.close()
@foreach_cnx()
def test_buffered_raw(self):
cur = self.cnx.cursor(buffered=True, raw=True)
cur.execute("SELECT * FROM {0} ORDER BY c1".format(self.tbl))
exp_one = (b'1', b'a')
exp_many = [(b'2', b'c'), (b'4', b'd'), (b'3', b'e')]
exp_all = [(b'6', b'f'), (b'5', b'g')]
self.assertEqual(exp_one, cur.fetchone())
self.assertEqual(exp_many, cur.fetchmany(3))
self.assertEqual(exp_all, cur.fetchall())
cur.close()
class BugOra17041412(tests.MySQLConnectorTests):
"""BUG#17041412: FETCHALL() DOES NOT RETURN SELF._NEXTROW IF AVAILABLE
"""
def setUp(self):
self.table_name = 'BugOra17041412'
self.data = [(1,), (2,), (3,)]
self.data_raw = [(b'1',), (b'2',), (b'3',)]
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % self.table_name)
cur.execute("CREATE TABLE %s (c1 INT)" % self.table_name)
cur.executemany(
"INSERT INTO %s (c1) VALUES (%%s)" % self.table_name,
self.data)
cnx.commit()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % self.table_name)
@foreach_cnx()
def test_one_all(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data[0], cur.fetchone())
self.assertEqual(1, cur.rowcount)
self.assertEqual(self.data[1:], cur.fetchall())
self.assertEqual(3, cur.rowcount)
@foreach_cnx()
def test_many_all(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data[0:2], cur.fetchmany(2))
self.assertEqual(2, cur.rowcount)
self.assertEqual(self.data[2:], cur.fetchall())
self.assertEqual(3, cur.rowcount)
@foreach_cnx()
def test_many(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data, cur.fetchall())
self.assertEqual(3, cur.rowcount)
cur.execute("SELECT * FROM %s WHERE c1 > %%s" % self.table_name,
(self.data[-1][0] + 100,))
self.assertEqual([], cur.fetchall())
@foreach_cnx()
def test_raw_one_all(self):
self._setup()
cur = self.cnx.cursor(raw=True)
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data_raw[0], cur.fetchone())
self.assertEqual(1, cur.rowcount)
self.assertEqual(self.data_raw[1:], cur.fetchall())
self.assertEqual(3, cur.rowcount)
@foreach_cnx()
def test_raw_many_all(self):
self._setup()
cur = self.cnx.cursor(raw=True)
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data_raw[0:2], cur.fetchmany(2))
self.assertEqual(2, cur.rowcount)
self.assertEqual(self.data_raw[2:], cur.fetchall())
self.assertEqual(3, cur.rowcount)
@foreach_cnx()
def test_raw_many(self):
self._setup()
cur = self.cnx.cursor(raw=True)
cur.execute("SELECT * FROM %s ORDER BY c1" % self.table_name)
self.assertEqual(self.data_raw, cur.fetchall())
self.assertEqual(3, cur.rowcount)
cur.execute("SELECT * FROM %s WHERE c1 > 1000" % self.table_name)
self.assertEqual([], cur.fetchall())
class BugOra16819486(tests.MySQLConnectorTests):
"""BUG#16819486: ERROR 1210 TO BE HANDLED
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra16819486")
cur.execute("CREATE TABLE BugOra16819486 (c1 INT, c2 INT)")
cur.executemany("INSERT INTO BugOra16819486 VALUES (%s, %s)",
[(1, 10), (2, 20), (3, 30)])
cnx.commit()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra16819486")
cnx.close()
@foreach_cnx()
def test_error1210(self):
cur = self.cnx.cursor(prepared=True)
prep_stmt = "SELECT * FROM BugOra16819486 WHERE c1 = %s AND c2 = %s"
self.assertRaises(mysql.connector.ProgrammingError,
cur.execute, prep_stmt, (1,))
prep_stmt = "SELECT * FROM BugOra16819486 WHERE c1 = %s AND c2 = %s"
exp = [(1, 10)]
cur.execute(prep_stmt, (1, 10))
self.assertEqual(exp, cur.fetchall())
class BugOra16656621(tests.MySQLConnectorTests):
"""BUG#16656621: IMPOSSIBLE TO ROLLBACK WITH UNREAD RESULTS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra16656621")
cur.execute(
"CREATE TABLE BugOra16656621 "
"(id INT AUTO_INCREMENT, c1 VARCHAR(20), "
"PRIMARY KEY (id)) ENGINE=InnoDB")
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra16656621")
@foreach_cnx()
def test_rollback(self):
cur = self.cnx.cursor()
cur.execute(
"INSERT INTO BugOra16656621 (c1) VALUES ('a'),('b'),('c')")
self.cnx.commit()
cur.execute("SELECT * FROM BugOra16656621")
try:
self.cnx.rollback()
except mysql.connector.InternalError:
self.fail("Rollback not possible with unread results")
class BugOra16660356(tests.MySQLConnectorTests):
"""BUG#16660356: USING EXECUTEMANY WITH EMPTY DATA SHOULD DO NOTHING
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS bug16660356")
cur.execute(
"CREATE TABLE bug16660356 (id INT AUTO_INCREMENT, c1 VARCHAR(20), "
"PRIMARY KEY (id)) ENGINE=InnoDB"
)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS bug16660356")
cnx.close()
@foreach_cnx()
def test_executemany(self):
cur = self.cnx.cursor()
try:
cur.executemany(
"INSERT INTO bug16660356 (c1) VALUES (%s)", []
)
except mysql.connector.ProgrammingError:
self.fail("executemany raise ProgrammingError with empty data")
class BugOra17041240(tests.MySQLConnectorTests):
"""BUG#17041240: UNCLEAR ERROR CLOSING CURSOR WITH UNREAD RESULTS
"""
def setUp(self):
self.table_name = 'BugOra17041240'
self.data = [(1,), (2,), (3,)]
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {table}".format(
table=self.table_name))
cur.execute("CREATE TABLE {table} (c1 INT)".format(
table=self.table_name))
cur.executemany(
"INSERT INTO {table} (c1) VALUES (%s)".format(
table=self.table_name),
self.data)
cnx.commit()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {table}".format(
table=self.table_name))
cnx.close()
@foreach_cnx()
def test_cursor_close(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM {table} ORDER BY c1".format(
table=self.table_name))
self.assertEqual(self.data[0], cur.fetchone())
self.assertEqual(self.data[1], cur.fetchone())
self.assertRaises(mysql.connector.InternalError, cur.close)
self.assertEqual(self.data[2], cur.fetchone())
@foreach_cnx()
def test_cursor_new(self):
self._setup()
cur = self.cnx.cursor()
cur.execute("SELECT * FROM {table} ORDER BY c1".format(
table=self.table_name))
self.assertEqual(self.data[0], cur.fetchone())
self.assertEqual(self.data[1], cur.fetchone())
self.assertRaises(mysql.connector.InternalError, self.cnx.cursor)
self.assertEqual(self.data[2], cur.fetchone())
class BugOra17065366(tests.MySQLConnectorTests):
"""BUG#17065366: EXECUTEMANY FAILS USING MYSQL FUNCTION FOR INSERTS
"""
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.table_name = 'BugOra17065366'
cur.execute(
"DROP TABLE IF EXISTS {table}".format(table=self.table_name))
cur.execute(
"CREATE TABLE {table} ( "
"id INT UNSIGNED NOT NULL AUTO_INCREMENT KEY, "
"c1 INT, c2 DATETIME) ENGINE=INNODB".format(table=self.table_name))
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {table}".format(
table=self.table_name))
cnx.close()
@foreach_cnx()
def test_executemany(self):
self._setup()
cur = self.cnx.cursor()
adate = datetime(2012, 9, 30)
stmt = (
"INSERT INTO {table} (id, c1, c2) "
"VALUES (%s, %s, DATE('{date} 13:07:00'))"
"/* Using DATE() */ ON DUPLICATE KEY UPDATE c1 = id"
).format(table=self.table_name, date=adate.strftime('%Y-%m-%d'))
exp = [
(1, 0, datetime(2012, 9, 30, 0, 0)),
(2, 0, datetime(2012, 9, 30, 0, 0))
]
cur.executemany(stmt, [(None, 0), (None, 0)])
self.cnx.commit()
cur.execute("SELECT * FROM {table}".format(table=self.table_name))
rows = cur.fetchall()
self.assertEqual(exp, rows)
exp = [
(1, 1, datetime(2012, 9, 30, 0, 0)),
(2, 2, datetime(2012, 9, 30, 0, 0))
]
cur.executemany(stmt, [(1, 1), (2, 2)])
self.cnx.commit()
cur.execute("SELECT * FROM {table}".format(table=self.table_name))
rows = cur.fetchall()
self.assertEqual(exp, rows)
class BugOra16933795(tests.MySQLConnectorTests):
"""BUG#16933795: ERROR.MSG ATTRIBUTE DOES NOT CONTAIN CORRECT VALUE
"""
def test_error(self):
exp = "Some error message"
error = mysql.connector.Error(msg=exp, errno=-1024)
self.assertEqual(exp, error.msg)
exp = "Unknown MySQL error"
error = mysql.connector.Error(errno=2000)
self.assertEqual(exp, error.msg)
self.assertEqual("2000: " + exp, str(error))
class BugOra17022399(tests.MySQLConnectorTests):
"""BUG#17022399: EXECUTING AFTER CONNECTION CLOSED GIVES UNCLEAR ERROR
"""
@foreach_cnx()
def test_execute(self):
cur = self.cnx.cursor()
self.cnx.close()
try:
cur.execute("SELECT 1")
except mysql.connector.OperationalError as exc:
self.assertEqual(2055, exc.errno, 'Was: ' + str(exc))
@cnx_config(client_flags=[constants.ClientFlag.COMPRESS])
@foreach_cnx()
def test_execute_compressed(self):
cur = self.cnx.cursor()
self.cnx.close()
try:
cur.execute("SELECT 1")
except mysql.connector.OperationalError as exc:
self.assertEqual(2055, exc.errno, 'Was: ' + str(exc))
class BugOra16369511(tests.MySQLConnectorTests):
"""BUG#16369511: LOAD DATA LOCAL INFILE IS MISSING
"""
def setUp(self):
self.data_file = os.path.join('tests', 'data', 'local_data.csv')
def _setup(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query("DROP TABLE IF EXISTS local_data")
cnx.cmd_query(
"CREATE TABLE local_data (id int, c1 VARCHAR(6), c2 VARCHAR(6))")
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query("DROP TABLE IF EXISTS local_data")
cnx.close()
@foreach_cnx(allow_local_infile=True)
def test_load_csv(self):
self._setup()
cur = self.cnx.cursor()
sql = "LOAD DATA LOCAL INFILE %s INTO TABLE local_data"
cur.execute(sql, (self.data_file,))
cur.execute("SELECT * FROM local_data")
exp = [
(1, 'c1_1', 'c2_1'), (2, 'c1_2', 'c2_2'),
(3, 'c1_3', 'c2_3'), (4, 'c1_4', 'c2_4'),
(5, 'c1_5', 'c2_5'), (6, 'c1_6', 'c2_6')]
self.assertEqual(exp, cur.fetchall())
@cnx_config(compress=True, allow_local_infile=True)
@foreach_cnx()
def test_load_csv_with_compress(self):
self._setup()
cur = self.cnx.cursor()
sql = "LOAD DATA LOCAL INFILE %s INTO TABLE local_data"
cur.execute(sql, (self.data_file,))
cur.execute("SELECT * FROM local_data")
exp = [
(1, 'c1_1', 'c2_1'), (2, 'c1_2', 'c2_2'),
(3, 'c1_3', 'c2_3'), (4, 'c1_4', 'c2_4'),
(5, 'c1_5', 'c2_5'), (6, 'c1_6', 'c2_6')]
self.assertEqual(exp, cur.fetchall())
@foreach_cnx(allow_local_infile=True)
def test_filenotfound(self):
self._setup()
cur = self.cnx.cursor()
sql = "LOAD DATA LOCAL INFILE %s INTO TABLE local_data"
try:
cur.execute(sql, (self.data_file + '_spam',))
except (errors.InterfaceError, errors.DatabaseError) as exc:
self.assertTrue(
'not found' in str(exc) or 'could not be read' in str(exc),
'Was: ' + str(exc))
class BugOra17002411(tests.MySQLConnectorTests):
"""BUG#17002411: LOAD DATA LOCAL INFILE FAILS WITH BIGGER FILES
"""
def setUp(self):
self.data_file = os.path.join('tests', 'data', 'local_data_big.csv')
self.exp_rows = 33000
with open(self.data_file, 'w') as fp:
i = 0
while i < self.exp_rows:
fp.write("{0}\t{1}\n".format('a' * 255, 'b' * 255))
i += 1
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS local_data")
cur.execute(
"CREATE TABLE local_data ("
"id INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(255), c2 VARCHAR(255))"
)
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query("DROP TABLE IF EXISTS local_data")
os.unlink(self.data_file)
cnx.close()
@foreach_cnx(allow_local_infile=True)
def test_load_csv(self):
self._setup()
cur = self.cnx.cursor()
sql = "LOAD DATA LOCAL INFILE %s INTO TABLE local_data (c1, c2)"
cur.execute(sql, (self.data_file,))
cur.execute("SELECT COUNT(*) FROM local_data")
self.assertEqual(self.exp_rows, cur.fetchone()[0])
@unittest.skipIf(tests.MYSQL_VERSION >= (8, 0, 1),
"BugOra17422299 not tested with MySQL version >= 8.0.1")
@unittest.skipIf(tests.MYSQL_VERSION <= (5, 7, 1),
"BugOra17422299 not tested with MySQL version 5.6")
class BugOra17422299(tests.MySQLConnectorTests):
"""BUG#17422299: cmd_shutdown fails with malformed connection packet
"""
def setUp(self):
self.config = tests.get_mysql_config()
self.mysql_server = tests.MYSQL_SERVERS[0]
def tearDown(self):
self.ensure_up()
def ensure_up(self):
# Start the MySQL server again
if not self.mysql_server.check_running():
self.mysql_server.start()
if not self.mysql_server.wait_up():
self.fail("Failed restarting MySQL server after test")
def test_shutdown(self):
for cnx_class in self.all_cnx_classes:
self.ensure_up()
cnx = cnx_class(**self.config)
try:
cnx.cmd_shutdown()
except mysql.connector.DatabaseError as err:
self.fail("COM_SHUTDOWN failed: {0}".format(err))
if not self.mysql_server.wait_down():
self.fail("MySQL not shut down after COM_SHUTDOWN")
def test_shutdown__with_type(self):
for cnx_class in self.all_cnx_classes:
self.ensure_up()
cnx = cnx_class(**self.config)
try:
cnx.cmd_shutdown(
constants.ShutdownType.SHUTDOWN_WAIT_ALL_BUFFERS)
except mysql.connector.DatabaseError as err:
self.fail("COM_SHUTDOWN failed: {0}".format(err))
if not self.mysql_server.wait_down():
self.fail("MySQL not shut down after COM_SHUTDOWN")
class BugOra17215197(tests.MySQLConnectorTests):
"""BUG#17215197: MYSQLCONNECTION.CURSOR(PREPARED=TRUE) NOT POSSIBLE
"""
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS BugOra17215197")
cur.execute("CREATE TABLE BugOra17215197 (c1 INT, c2 INT)")
cur.executemany("INSERT INTO BugOra17215197 VALUES (%s, %s)",
[(1, 10), (2, 20), (3, 30)])
cnx.commit()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query("DROP TABLE IF EXISTS BugOra17215197")
@foreach_cnx()
def test_prepared_argument(self):
self._setup()
cur = self.cnx.cursor(prepared=True)
prep_stmt = "SELECT * FROM BugOra17215197 WHERE c1 = %s AND c2 = %s"
exp = [(1, 10)]
cur.execute(prep_stmt, (1, 10))
self.assertEqual(exp, cur.fetchall())
class BugOra17414258(tests.MySQLConnectorTests):
"""BUG#17414258: IT IS ALLOWED TO CHANGE SIZE OF ACTIVE POOL
"""
def setUp(self):
self.config = tests.get_mysql_config()
self.config['pool_name'] = 'test'
self.config['pool_size'] = 3
if tests.MYSQL_VERSION < (5, 7):
self.config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
def tearDown(self):
# Remove pools created by test
del mysql.connector._CONNECTION_POOLS[self.config['pool_name']]
def test_poolsize(self):
cnx = mysql.connector.connect(**self.config)
cnx.close()
newconfig = self.config.copy()
newconfig['pool_size'] = self.config['pool_size'] + 1
self.assertRaises(mysql.connector.PoolError,
mysql.connector.connect, **newconfig)
class Bug17578937(tests.MySQLConnectorTests):
"""CONNECTION POOL DOES NOT HANDLE A NOT AVAILABLE MYSQL SERVER"""
def setUp(self):
self.mysql_server = tests.MYSQL_SERVERS[0]
def tearDown(self):
# Start the MySQL server again
if not self.mysql_server.check_running():
self.mysql_server.start()
if not self.mysql_server.wait_up():
self.fail("Failed restarting MySQL server after test")
def test_get_connection(self):
"""Test reconnect once MySQL server is back
To make the test case simpler, we create a pool which only has
one connection in the queue. This way we can similuate getting a
connection from a pool for which the MySQL server is not running.
"""
config = tests.get_mysql_config().copy()
if tests.MYSQL_VERSION < (5, 7):
config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
config['connection_timeout'] = 2
cnxpool = pooling.MySQLConnectionPool(
pool_name='test', pool_size=1, **config)
pcnx = cnxpool.get_connection()
self.assertTrue(isinstance(pcnx, pooling.PooledMySQLConnection))
pcnx.close()
self.mysql_server.stop()
if not self.mysql_server.wait_down():
self.fail("MySQL not shut down; can not continue test")
self.assertRaises(errors.InterfaceError, cnxpool.get_connection)
self.mysql_server.start()
if not self.mysql_server.wait_up():
self.fail("MySQL started; can not continue test")
pcnx = cnxpool.get_connection()
pcnx.close()
class BugOra17079344(tests.MySQLConnectorTests):
"""BUG#17079344: ERROR WITH GBK STRING WITH CHARACTERS ENCODED AS BACKSLASH
"""
def setUp(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
for charset in ('gbk', 'sjis', 'big5'):
tablename = charset + 'test'
cur.execute("DROP TABLE IF EXISTS {0}".format(tablename))
table = (
"CREATE TABLE {table} ("
"id INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(40)"
") CHARACTER SET '{charset}'"
).format(table=tablename, charset=charset)
cur.execute(table)
cnx.commit()
cur.close()
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
for charset in ('gbk', 'sjis', 'big5'):
tablename = charset + 'test'
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(tablename))
cnx.close()
def _test_charset(self, charset, data):
config = tests.get_mysql_config()
config['charset'] = charset
config['use_unicode'] = True
self.cnx = self.cnx.__class__(**config)
tablename = charset + 'test'
cur = self.cnx.cursor()
cur.execute("TRUNCATE {0}".format(tablename))
self.cnx.commit()
insert = "INSERT INTO {0} (c1) VALUES (%s)".format(tablename)
for value in data:
cur.execute(insert, (value,))
self.cnx.commit()
cur.execute("SELECT id, c1 FROM {0} ORDER BY id".format(tablename))
for row in cur:
self.assertEqual(data[row[0] - 1], row[1])
cur.close()
self.cnx.close()
@foreach_cnx()
def test_gbk(self):
self._test_charset('gbk', [u'赵孟頫', u'赵\孟\頫\\', u'遜', ])
@foreach_cnx()
def test_sjis(self):
self._test_charset('sjis', ['\u005c'])
@foreach_cnx()
def test_big5(self):
self._test_charset('big5', ['\u5C62'])
class BugOra17780576(tests.MySQLConnectorTests):
"""BUG#17780576: CHARACTER SET 'UTF8MB4' UNSUPPORTED
"""
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS utf8mb4test")
cur.close()
cnx.close()
@foreach_cnx()
def test_utf8mb4(self):
if tests.MYSQL_VERSION < (5, 5, 0):
# Test only valid for MySQL 5.5.0 and later.
return
config = tests.get_mysql_config()
tablename = 'utf8mb4test'
self.cnx.set_charset_collation('utf8mb4', 'utf8mb4_general_ci')
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(tablename))
table = (
"CREATE TABLE {table} ("
"id INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(40) CHARACTER SET 'utf8mb4'"
") CHARACTER SET 'utf8mb4'"
).format(table=tablename)
cur.execute(table)
insert = "INSERT INTO {0} (c1) VALUES (%s)".format(tablename)
data = [u'😉😍', u'😃😊', u'😄😘😚', ]
for value in data:
cur.execute(insert, (value,))
cur.execute("SELECT id, c1 FROM {0} ORDER BY id".format(tablename))
for row in cur:
self.assertEqual(data[row[0] - 1], row[1])
cur.close()
self.cnx.close()
class BugOra17573172(tests.MySQLConnectorTests):
"""BUG#17573172: MISSING SUPPORT FOR READ-ONLY TRANSACTIONS
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.cur.execute("DROP TABLE IF EXISTS BugOra17573172")
self.cur.execute("CREATE TABLE BugOra17573172 (c1 INT, c2 INT)")
self.cur.executemany("INSERT INTO BugOra17573172 VALUES (%s, %s)",
[(1, 10), (2, 20), (3, 30)])
self.cnx.commit()
def test_read_only(self):
if self.cnx.get_server_version() < (5, 6, 5):
self.assertRaises(ValueError, self.cnx.start_transaction,
readonly=True)
else:
self.cnx.start_transaction(readonly=True)
self.assertTrue(self.cnx.in_transaction)
self.assertRaises(errors.ProgrammingError,
self.cnx.start_transaction)
query = "INSERT INTO BugOra17573172 VALUES(4, 40)"
self.assertRaises(errors.ProgrammingError, self.cur.execute, query)
self.cnx.rollback()
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS BugOra17573172")
self.cur.close()
class BugOra17826833(tests.MySQLConnectorTests):
"""BUG#17826833: EXECUTEMANY() FOR INSERTS W/O VALUES
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
self.emp_tbl = 'Bug17826833_emp'
self.cursor.execute("DROP TABLE IF EXISTS %s" % (self.emp_tbl))
self.city_tbl = 'Bug17826833_city'
self.cursor.execute("DROP TABLE IF EXISTS %s" % (self.city_tbl))
create = ("CREATE TABLE %s ( "
"`id` int(11) NOT NULL, "
"`name` varchar(20) NOT NULL , "
"`phone` varchar(20), "
"PRIMARY KEY (`id`))" % (self.emp_tbl))
self.cursor.execute(create)
create = ("CREATE TABLE %s ( "
"`id` int(11) NOT NULL, "
"`name` varchar(20) NOT NULL, "
"PRIMARY KEY (`id`))" % (self.city_tbl))
self.cursor.execute(create)
def tearDown(self):
self.cursor.execute("DROP TABLE IF EXISTS {0}".format(self.city_tbl))
self.cursor.execute("DROP TABLE IF EXISTS {0}".format(self.emp_tbl))
def test_executemany(self):
stmt = "INSERT INTO {0} (id,name) VALUES (%s,%s)".format(
self.city_tbl)
self.cursor.executemany(stmt, [(1, 'ABC'), (2, 'CDE'), (3, 'XYZ')])
query = ("INSERT INTO %s (id, name, phone)"
"SELECT id,name,%%s FROM %s WHERE name=%%s") % (self.emp_tbl,
self.city_tbl)
try:
self.cursor.executemany(query, [('4567', 'CDE'), ('1234', 'XYZ')])
stmt = "SELECT * FROM {0}".format(self.emp_tbl)
self.cursor.execute(stmt)
self.assertEqual([(2, 'CDE', '4567'), (3, 'XYZ', '1234')],
self.cursor.fetchall(), "INSERT ... SELECT failed")
except errors.ProgrammingError as err:
self.fail("Regular expression fails with executemany(): %s" %
err)
class BugOra18040042(tests.MySQLConnectorTests):
"""BUG#18040042: Reset session closing pooled Connection"""
def test_clear_session(self):
pool_config = tests.get_mysql_config()
if tests.MYSQL_VERSION < (5, 7):
pool_config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
cnxpool = pooling.MySQLConnectionPool(
pool_name='test', pool_size=1, **pool_config)
pcnx = cnxpool.get_connection()
exp_session_id = pcnx.connection_id
pcnx.cmd_query("SET @ham = 2")
pcnx.close()
pcnx = cnxpool.get_connection()
pcnx.cmd_query("SELECT @ham")
self.assertEqual(exp_session_id, pcnx.connection_id)
self.assertNotEqual(('2',), pcnx.get_rows()[0][0])
def test_do_not_clear_session(self):
cnxpool = pooling.MySQLConnectionPool(
pool_name='test', pool_size=1, pool_reset_session=False,
**tests.get_mysql_config())
pcnx = cnxpool.get_connection()
exp_session_id = pcnx.connection_id
pcnx.cmd_query("SET @ham = 2")
pcnx.close()
pcnx = cnxpool.get_connection()
pcnx.cmd_query("SELECT @ham")
self.assertEqual(exp_session_id, pcnx.connection_id)
self.assertEqual((2,), pcnx.get_rows()[0][0])
class BugOra17965619(tests.MySQLConnectorTests):
"""BUG#17965619: CALLPROC FUNCTION WITH BYTES PARAMETERS
"""
def setUp(self):
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
procedure = ("DROP PROCEDURE IF EXISTS `proce_with_binary`")
self.cnx.cmd_query(procedure)
procedure = ("CREATE PROCEDURE `proce_with_binary` "
"(data VARBINARY(512)) BEGIN END;")
self.cnx.cmd_query(procedure)
def tearDown(self):
procedure = ("DROP PROCEDURE IF EXISTS `proce_with_binary`")
self.cnx.cmd_query(procedure)
self.cnx.close()
def test_callproc(self):
cur = self.cnx.cursor()
data = b'\xf0\xf1\xf2'
output = cur.callproc('proce_with_binary', ((data, 'BINARY'),))
self.assertEqual((data,), output)
cur.close()
class BugOra17054848(tests.MySQLConnectorTests):
"""BUG#17054848: USE OF SSL SHOULD NOT REQUIRE SSL_CERT AND SSL_KEY
"""
def setUp(self):
config = tests.get_mysql_config()
self.admin_cnx = connection.MySQLConnection(**config)
if tests.MYSQL_VERSION < (5, 7, 21):
self.admin_cnx.cmd_query(
"GRANT ALL ON %s.* TO 'ssluser'@'%s' REQUIRE SSL" % (
config['database'], config['host']))
else:
self.admin_cnx.cmd_query(
"CREATE USER 'ssluser'@'{host}'".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
self.admin_cnx.cmd_query(
"GRANT ALL ON {db}.* TO 'ssluser'@'{host}'".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
self.admin_cnx.cmd_query(
"ALTER USER 'ssluser'@'{host}' REQUIRE SSL".format(
db=config['database'],
host=tests.get_mysql_config()['host']))
def tearDown(self):
config = tests.get_mysql_config()
self.admin_cnx.cmd_query("DROP USER 'ssluser'@'%s'" % (
config['host']))
def test_ssl(self):
if not tests.SSL_AVAILABLE:
tests.MESSAGES['WARNINGS'].append(
"BugOra16217667 test failed. Python lacks SSL support.")
return
ssl_ca = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'))
ssl_key = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_key.pem'))
config = tests.get_mysql_config()
config['user'] = 'ssluser'
config['password'] = ''
config['unix_socket'] = None
config['ssl_verify_cert'] = False
config.update({
'ssl_ca': ssl_ca,
'ssl_cipher': 'AES256-SHA',
})
try:
cnx = connection.MySQLConnection(**config)
except errors.ProgrammingError:
self.fail("Failed authentication with SSL")
cnx.cmd_query("SHOW STATUS LIKE 'Ssl_cipher'")
res = cnx.get_rows()[0][0]
self.assertTrue(res != '')
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0),
"BugOra16217765 not tested with MySQL version < 5.6.7. "
"Not working with cross version MySQL lib< 8.0.")
class BugOra16217765(tests.MySQLConnectorTests):
"""BUG#16217765: Fix authentication plugin support
"""
users = {
'sha256user': {
'username': 'sha256user',
'password': 'sha256P@ss',
'auth_plugin': 'sha256_password',
},
'nativeuser': {
'username': 'nativeuser',
'password': 'nativeP@ss',
'auth_plugin': 'mysql_native_password',
},
'sha256user_np': {
'username': 'sha256user_np',
'password': '',
'auth_plugin': 'sha256_password',
},
'nativeuser_np': {
'username': 'nativeuser_np',
'password': '',
'auth_plugin': 'mysql_native_password',
},
}
def _create_user(self, cnx, user, password, host, database,
plugin):
self._drop_user(cnx, user, host)
create_user = ("CREATE USER '{user}'@'{host}' "
"IDENTIFIED WITH {plugin}")
cnx.cmd_query(create_user.format(user=user, host=host, plugin=plugin))
if tests.MYSQL_VERSION[0:3] < (8, 0, 5):
if plugin == 'sha256_password':
cnx.cmd_query("SET old_passwords = 2")
else:
cnx.cmd_query("SET old_passwords = 0")
if tests.MYSQL_VERSION < (5, 7, 5):
passwd = ("SET PASSWORD FOR '{user}'@'{host}' = "
"PASSWORD('{password}')").format(user=user, host=host,
password=password)
else:
passwd = ("ALTER USER '{user}'@'{host}' IDENTIFIED BY "
"'{password}'").format(user=user, host=host,
password=password)
cnx.cmd_query(passwd)
grant = "GRANT ALL ON {database}.* TO '{user}'@'{host}'"
cnx.cmd_query(grant.format(database=database, user=user, host=host))
def _drop_user(self, cnx, user, host):
try:
self.admin_cnx.cmd_query("DROP USER '{user}'@'{host}'".format(
host=host,
user=user))
except errors.DatabaseError:
# It's OK when drop fails
pass
def setUp(self):
self.errmsg = "AuthPlugin {0} failed: {1}"
config = tests.get_mysql_config()
self.host = config['host']
self.admin_cnx = connection.MySQLConnection(**config)
for key, user in self.users.items():
self._create_user(self.admin_cnx, user['username'],
user['password'],
self.host,
config['database'],
plugin=user['auth_plugin'])
def tearDown(self):
for key, user in self.users.items():
self._drop_user(self.admin_cnx, user['username'], self.host)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 6, 6),
"MySQL {0} does not support sha256_password auth".format(
tests.MYSQL_VERSION_TXT))
@unittest.skipIf(
not tests.SSL_AVAILABLE,
"BugOra16217765.test_sha256 test skipped: SSL support not available")
def test_sha256(self):
config = tests.get_mysql_config()
config['unix_socket'] = None
config.update({
'ssl_ca': tests.SSL_CA,
'ssl_cert': tests.SSL_CERT,
'ssl_key': tests.SSL_KEY,
'ssl_cipher': 'AES256-SHA',
})
user = self.users['sha256user']
config['user'] = user['username']
config['password'] = user['password']
config['client_flags'] = [constants.ClientFlag.PLUGIN_AUTH,
-constants.ClientFlag.CONNECT_ARGS]
config['auth_plugin'] = user['auth_plugin']
try:
cnx = connection.MySQLConnection(**config)
except Exception as exc:
import traceback
traceback.print_exc()
self.fail(self.errmsg.format(config['auth_plugin'], exc))
try:
cnx.cmd_change_user(config['user'], config['password'])
except:
self.fail("Changing user using sha256_password auth failed "
"with pure Python connector. \nflags on cnx: {} \n"
"".format(config['client_flags']))
if CMySQLConnection:
try:
cnx = CMySQLConnection(**config)
except Exception as exc:
import traceback
traceback.print_exc()
self.fail(self.errmsg.format(config['auth_plugin'], exc))
try:
cnx.cmd_change_user(config['user'], config['password'])
except:
self.fail("Changing user using sha256_password auth failed "
"with CExtension")
@unittest.skipIf(tests.MYSQL_VERSION < (5, 6, 6),
"MySQL {0} does not support sha256_password auth".format(
tests.MYSQL_VERSION_TXT))
def test_sha256_nonssl(self):
config = tests.get_mysql_config()
config['unix_socket'] = None
config['ssl_disabled'] = True
config['client_flags'] = [constants.ClientFlag.PLUGIN_AUTH]
user = self.users['sha256user']
config['user'] = user['username']
config['password'] = user['password']
config['auth_plugin'] = user['auth_plugin']
self.assertRaises(errors.InterfaceError, connection.MySQLConnection,
**config)
if CMySQLConnection:
self.assertRaises(errors.InterfaceError, CMySQLConnection, **config)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 5, 7),
"MySQL {0} does not support authentication plugins".format(
tests.MYSQL_VERSION_TXT))
def test_native(self):
config = tests.get_mysql_config()
config['unix_socket'] = None
user = self.users['nativeuser']
config['user'] = user['username']
config['password'] = user['password']
config['client_flags'] = [constants.ClientFlag.PLUGIN_AUTH]
config['auth_plugin'] = user['auth_plugin']
try:
cnx = connection.MySQLConnection(**config)
except Exception as exc:
self.fail(self.errmsg.format(config['auth_plugin'], exc))
if CMySQLConnection:
try:
cnx = CMySQLConnection(**config)
except Exception as exc:
self.fail(self.errmsg.format(config['auth_plugin'], exc))
class BugOra18144971(tests.MySQLConnectorTests):
"""BUG#18144971 ERROR WHEN USING UNICODE ARGUMENTS IN PREPARED STATEMENT"""
def setUp(self):
self.table = 'Bug18144971'
self.table_cp1251 = 'Bug18144971_cp1251'
def _setup(self):
config = tests.get_mysql_config()
config['use_unicode'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table))
create = ("CREATE TABLE {0} ( "
"`id` int(11) NOT NULL, "
"`name` varchar(40) NOT NULL , "
"`phone` varchar(40), "
"PRIMARY KEY (`id`))"
" CHARACTER SET 'utf8'".format(self.table))
cur.execute(create)
cur.execute(
"DROP TABLE IF EXISTS {0}".format(self.table_cp1251)
)
create = ("CREATE TABLE {0} ( "
"`id` int(11) NOT NULL, "
"`name` varchar(40) NOT NULL , "
"`phone` varchar(40), "
"PRIMARY KEY (`id`))"
" CHARACTER SET 'cp1251'".format(self.table_cp1251))
cur.execute(create)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
config['use_unicode'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table))
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table_cp1251))
@cnx_config(use_unicode=True)
@foreach_cnx()
def test_prepared_statement(self):
self._setup()
cur = self.cnx.cursor(prepared=True)
stmt = "INSERT INTO {0} VALUES (?,?,?)".format(
self.table)
data = [(1, b'bytes', '1234'), (2, u'aaaаффф', '1111')]
exp = [(1, 'bytes', '1234'), (2, u'aaaаффф', '1111')]
cur.execute(stmt, data[0])
self.cnx.commit()
cur.execute("SELECT * FROM {0}".format(self.table))
self.assertEqual(cur.fetchall(), [exp[0]])
config = tests.get_mysql_config()
config['charset'] = 'cp1251'
self.cnx = self.cnx.__class__(**config)
cur = self.cnx.cursor(prepared=True)
stmt = "INSERT INTO {0} VALUES (?,?,?)".format(
self.table_cp1251)
cur.execute(stmt, data[1])
self.cnx.commit()
cur.execute("SELECT * FROM {0}".format(self.table_cp1251))
self.assertEqual(cur.fetchall(), [exp[1]])
class BugOra18389196(tests.MySQLConnectorTests):
"""BUG#18389196: INSERTING PARAMETER MULTIPLE TIMES IN STATEMENT
"""
def setUp(self):
self.tbl = 'Bug18389196'
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS %s" % self.tbl)
create = ("CREATE TABLE %s ( "
"`id` int(11) NOT NULL, "
"`col1` varchar(20) NOT NULL, "
"`col2` varchar(20) NOT NULL, "
"PRIMARY KEY (`id`))" % self.tbl)
cur.execute(create)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS %s" % self.tbl)
cnx.close()
@foreach_cnx()
def test_parameters(self):
self.cnx.cmd_query("TRUNCATE {0}".format(self.tbl))
cur = self.cnx.cursor()
stmt = ("INSERT INTO {0} (id,col1,col2) VALUES "
"(%(id)s,%(name)s,%(name)s)".format(
self.tbl))
try:
cur.execute(stmt, {'id': 1, 'name': 'ABC'})
except errors.ProgrammingError as err:
self.fail("Inserting parameter multiple times in a statement "
"failed: %s" % err)
cur.close()
@unittest.skipIf(tests.MYSQL_VERSION >= (5, 7, 5),
"MySQL {0} does not support old password auth".format(
tests.MYSQL_VERSION_TXT))
class BugOra18415927(tests.MySQLConnectorTests):
"""BUG#18415927: AUTH_RESPONSE VARIABLE INCREMENTED WITHOUT BEING DEFINED
"""
user = {
'username': 'nativeuser',
'password': 'nativeP@ss',
}
def setUp(self):
config = tests.get_mysql_config()
host = config['host']
database = config['database']
cnx = connection.MySQLConnection(**config)
try:
cnx.cmd_query("DROP USER '{user}'@'{host}'".format(
host=host,
user=self.user['username']))
except:
pass
create_user = "CREATE USER '{user}'@'{host}' "
cnx.cmd_query(create_user.format(user=self.user['username'],
host=host))
passwd = ("SET PASSWORD FOR '{user}'@'{host}' = "
"PASSWORD('{password}')").format(
user=self.user['username'], host=host,
password=self.user['password'])
cnx.cmd_query(passwd)
grant = "GRANT ALL ON {database}.* TO '{user}'@'{host}'"
cnx.cmd_query(grant.format(database=database,
user=self.user['username'],
host=host))
def tearDown(self):
config = tests.get_mysql_config()
host = config['host']
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP USER '{user}'@'{host}'".format(
host=host,
user=self.user['username']))
def test_auth_response(self):
config = tests.get_mysql_config()
config['unix_socket'] = None
config['user'] = self.user['username']
config['password'] = self.user['password']
config['client_flags'] = [-constants.ClientFlag.SECURE_CONNECTION,
-constants.ClientFlag.CONNECT_WITH_DB]
try:
cnx = connection.MySQLConnection(**config)
except Exception as exc:
self.fail("Connection failed: {0}".format(exc))
class BugOra18527437(tests.MySQLConnectorTests):
"""BUG#18527437: UNITTESTS FAILING WHEN --host=::1 IS PASSED AS ARGUMENT
"""
def test_poolname(self):
config = tests.get_mysql_config()
config['host'] = '::1'
config['pool_size'] = 3
exp = '{0}_{1}_{2}_{3}'.format(config['host'], config['port'],
config['user'], config['database'])
self.assertEqual(exp, pooling.generate_pool_name(**config))
def test_custom_poolname(self):
cnxpool = pooling.MySQLConnectionPool(pool_name='ham:spam',
**tests.get_mysql_config())
self.assertEqual('ham:spam', cnxpool._pool_name)
cnxpool._remove_connections()
class BugOra18694096(tests.MySQLConnectorTests):
"""
BUG#18694096: INCORRECT CONVERSION OF NEGATIVE TIMEDELTA
"""
cases = [
(timedelta(hours=0, minutes=0, seconds=1, microseconds=0),
'00:00:01',),
(timedelta(hours=0, minutes=0, seconds=-1, microseconds=0),
'-00:00:01'),
(timedelta(hours=0, minutes=1, seconds=1, microseconds=0),
'00:01:01'),
(timedelta(hours=0, minutes=-1, seconds=-1, microseconds=0),
'-00:01:01'),
(timedelta(hours=1, minutes=1, seconds=1, microseconds=0),
'01:01:01'),
(timedelta(hours=-1, minutes=-1, seconds=-1, microseconds=0),
'-01:01:01'),
(timedelta(days=3, seconds=86401),
'96:00:01'),
(timedelta(days=-3, seconds=86401),
'-47:59:59'),
]
# Cases for MySQL 5.6.4 and higher
cases_564 = [
(timedelta(hours=0, minutes=0, seconds=0, microseconds=1),
'00:00:00.000001'),
(timedelta(hours=0, minutes=0, seconds=0, microseconds=-1),
'-00:00:00.000001'),
(timedelta(days=2, hours=0, microseconds=1),
'48:00:00.000001'),
(timedelta(days=-3, seconds=86399, microseconds=999999),
'-48:00:00.000001'),
]
def setUp(self):
config = tests.get_mysql_config()
self.cnx = mysql.connector.connect(**config)
self.tbl = 'times'
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
if tests.MYSQL_VERSION >= (5, 6, 4):
create = "CREATE TABLE {0} (c1 TIME(6))".format(self.tbl)
self.cases += self.cases_564
else:
create = "CREATE TABLE {0} (c1 TIME)".format(self.tbl)
self.cnx.cmd_query(create)
def tearDown(self):
if self.cnx:
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
def test_timedelta(self):
# Note that both _timedelta_to_mysql and _TIME_to_python are
# tested
cur = self.cnx.cursor()
# Following uses _timedelta_to_mysql to insert data
data = [(case[0],) for case in self.cases]
cur.executemany("INSERT INTO {0} (c1) VALUES (%s)".format(self.tbl),
data)
self.cnx.commit()
# We use _TIME_to_python to convert back to Python
cur.execute("SELECT c1 FROM {0}".format(self.tbl))
for i, row in enumerate(cur.fetchall()):
self.assertEqual(self.cases[i][0], row[0],
"Incorrect timedelta for {0}, was {1!r}".format(
self.cases[i][1], row[0]))
class BugOra18220593(tests.MySQLConnectorTests):
"""BUG#18220593 MYSQLCURSOR.EXECUTEMANY() DOESN'T LIKE UNICODE OPERATIONS
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.table = u"⽃⽄⽅⽆⽇⽈⽉⽊"
self.cur.execute(u"DROP TABLE IF EXISTS {0}".format(self.table))
self.cur.execute(u"CREATE TABLE {0} (c1 VARCHAR(100)) "
u"CHARACTER SET 'utf8'".format(self.table))
def test_unicode_operation(self):
data = [('database',), (u'データベース',), (u'데이터베이스',)]
self.cur.executemany(u"INSERT INTO {0} VALUES (%s)".format(
self.table), data)
self.cnx.commit()
self.cur.execute(u"SELECT c1 FROM {0}".format(self.table))
self.assertEqual(self.cur.fetchall(), data)
def tearDown(self):
self.cur.execute(u"DROP TABLE IF EXISTS {0}".format(self.table))
self.cur.close()
self.cnx.close()
class BugOra14843456(tests.MySQLConnectorTests):
"""BUG#14843456: UNICODE USERNAME AND/OR PASSWORD FAILS
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
if config['unix_socket'] and os.name != 'nt':
self.host = 'localhost'
else:
self.host = config['host']
grant = u"CREATE USER '{user}'@'{host}' IDENTIFIED BY '{password}'"
self._credentials = [
(u'Herne', u'Herne'),
(u'\u0141owicz', u'\u0141owicz'),
]
for user, password in self._credentials:
self.cursor.execute(grant.format(
user=user, host=self.host, password=password))
def tearDown(self):
for user, password in self._credentials:
self.cursor.execute(u"DROP USER '{user}'@'{host}'".format(
user=user, host=self.host))
def test_unicode_credentials(self):
config = tests.get_mysql_config()
for user, password in self._credentials:
config['user'] = user
config['password'] = password
config['database'] = None
try:
cnx = connection.MySQLConnection(**config)
except (UnicodeDecodeError, errors.InterfaceError):
self.fail('Failed using unicode username or password')
else:
cnx.close()
class Bug499410(tests.MySQLConnectorTests):
"""lp:499410 Disabling unicode does not work"""
def test_use_unicode(self):
config = tests.get_mysql_config()
config['use_unicode'] = False
cnx = connection.MySQLConnection(**config)
self.assertEqual(False, cnx._use_unicode)
cnx.close()
@cnx_config(use_unicode=False, charset='greek')
@foreach_cnx()
def test_charset(self):
charset = 'greek'
cur = self.cnx.cursor()
data = [b'\xe1\xed\xf4\xdf\xef'] # Bye in Greek
exp_unicode = [(u'\u03b1\u03bd\u03c4\u03af\u03bf',), ]
exp_nonunicode = [(data[0],)]
tbl = '{0}test'.format(charset)
try:
cur.execute("DROP TABLE IF EXISTS {0}".format(tbl))
cur.execute(
"CREATE TABLE {0} (c1 VARCHAR(60)) charset={1}".format(
tbl, charset))
except:
self.fail("Failed creating test table.")
stmt = u'INSERT INTO {0} VALUES (%s)'.format(tbl)
try:
for line in data:
cur.execute(stmt, (line,))
except Exception as exc:
self.fail("Failed populating test table: {0}".format(str(exc)))
cur.execute("SELECT * FROM {0}".format(tbl))
res_nonunicode = cur.fetchall()
self.cnx.set_unicode(True)
cur.execute("SELECT * FROM {0}".format(tbl))
res_unicode = cur.fetchall()
try:
cur.execute('DROP TABLE IF EXISTS {0}'.format(tbl))
except:
self.fail("Failed cleaning up test table.")
self.assertEqual(exp_nonunicode, res_nonunicode)
self.assertEqual(exp_unicode, res_unicode)
class BugOra18742429(tests.MySQLConnectorTests):
"""BUG#18742429: CPY FAILS WHEN QUERYING LARGE NUMBER OF COLUMNS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
self.tbl = 'Bug18742429'
cnx.cmd_query("DROP TABLE IF EXISTS %s" % self.tbl)
create = 'CREATE TABLE {0}({1})'.format(self.tbl, ','.join(
['col'+str(i)+' INT(10)' for i in range(1000)]))
cnx.cmd_query(create)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx(connection.MySQLConnection)
def test_columns(self):
cur = self.cnx.cursor()
cur.execute('TRUNCATE TABLE {0}'.format(self.tbl))
stmt = "INSERT INTO {0} VALUES({1})".format(self.tbl, ','.join(
[str(i) if i%2==0 else 'NULL' for i in range(1000)]
))
exp = tuple(i if i%2==0 else None for i in range(1000))
cur.execute(stmt)
cur = self.cnx.cursor(prepared=True)
stmt = 'SELECT * FROM {0} WHERE col0=?'.format(self.tbl)
cur.execute(stmt, (0,))
self.assertEqual(exp, cur.fetchone())
class BugOra19164627(tests.MySQLConnectorTests):
"""BUG#19164627: Cursor tries to decode LINESTRING data as utf-8
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
self.tbl = 'BugOra19164627'
cnx.cmd_query("DROP TABLE IF EXISTS %s" % self.tbl)
cnx.cmd_query("CREATE TABLE {0} ( "
"id SERIAL PRIMARY KEY AUTO_INCREMENT NOT NULL, "
"line LINESTRING NOT NULL "
") DEFAULT CHARSET=ascii".format(self.tbl))
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_linestring(self):
cur = self.cnx.cursor()
cur.execute('TRUNCATE TABLE {0}'.format(self.tbl))
cur.execute('INSERT IGNORE INTO {0} (id, line) '
'VALUES (0,LINESTRING(POINT(0, 0), POINT(0, 1)))'.format(
self.tbl
))
cur.execute("SELECT * FROM {0} LIMIT 1".format(self.tbl))
self.assertEqual(cur.fetchone(), (1, b'\x00\x00\x00\x00\x01\x02\x00\x00'
b'\x00\x02\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\xf0?', ))
cur.close()
class BugOra19225481(tests.MySQLConnectorTests):
"""BUG#19225481: FLOATING POINT INACCURACY WITH PYTHON v2
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
self.tbl = 'Bug19225481'
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = 'CREATE TABLE {0} (col1 DOUBLE)'.format(self.tbl)
cnx.cmd_query(create)
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_columns(self):
self.cnx.cmd_query("TRUNCATE {0}".format(self.tbl))
cur = self.cnx.cursor()
values = [
(123.123456789987,),
(234.234,),
(12.12,),
(111.331,),
(0.0,),
(-99.99999900099,)
]
stmt = "INSERT INTO {0} VALUES(%s)".format(self.tbl)
cur.executemany(stmt, values)
stmt = "SELECT * FROM {0}".format(self.tbl)
cur.execute(stmt)
self.assertEqual(values, cur.fetchall())
class BugOra19169990(tests.MySQLConnectorTests):
"""BUG#19169990: Issue with compressed cnx using Python 2
"""
@cnx_config(compress=True)
@foreach_cnx()
def test_compress(self):
for charset in ('utf8', 'latin1', 'latin7'):
self.config['charset'] = charset
try:
self.cnx = self.cnx.__class__(**self.config)
cur = self.cnx.cursor()
cur.execute("SELECT %s", ('mysql'*10000,))
except TypeError:
traceback.print_exc()
self.fail("Failed setting up compressed cnx using {0}".format(
charset
))
except errors.Error:
self.fail("Failed sending/retrieving compressed data")
self.cnx.close()
class BugOra19184025(tests.MySQLConnectorTests):
"""BUG#19184025: FIRST NULL IN ROW RETURNS REST OF ROW AS NONE
"""
def setUp(self):
self.tbl = 'Bug19184025'
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = "CREATE TABLE {0} (c1 INT, c2 INT NOT NULL DEFAULT 2)".format(
self.tbl
)
cnx.cmd_query(create)
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
cnx.close()
@foreach_cnx()
def test_row_to_python(self):
self.cnx.cmd_query("TRUNCATE {0}".format(self.tbl))
cur = self.cnx.cursor()
cur.execute("INSERT INTO {0} (c1) VALUES (NULL)".format(self.tbl))
cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual((None, 2), cur.fetchone())
cur.close()
class BugOra19170287(tests.MySQLConnectorTests):
"""BUG#19170287: DUPLICATE OPTION_GROUPS RAISING ERROR WITH PYTHON 3
"""
def test_duplicate_groups(self):
option_file_dir = os.path.join('tests', 'data', 'option_files')
opt_file = os.path.join(option_file_dir, 'dup_groups.cnf')
exp = {
u'password': u'mypass',
u'user': u'mysql',
u'database': u'duplicate_data',
u'port': 10000
}
self.assertEqual(exp, read_option_files(option_files=opt_file))
class BugOra19169143(tests.MySQLConnectorTests):
"""BUG#19169143: FAILURE IN RAISING ERROR WITH DUPLICATE OPTION_FILES
"""
def test_duplicate_optionfiles(self):
option_file_dir = os.path.join('tests', 'data', 'option_files')
files = [
os.path.join(option_file_dir, 'include_files', '1.cnf'),
os.path.join(option_file_dir, 'include_files', '2.cnf'),
os.path.join(option_file_dir, 'include_files', '1.cnf'),
]
self.assertRaises(ValueError, mysql.connector.connect,
option_files=files)
class BugOra19282158(tests.MySQLConnectorTests):
"""BUG#19282158: NULL values with prepared statements
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
self.tbl = 'Bug19282158'
self.cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
create = ('CREATE TABLE {0}(col1 INT NOT NULL, col2 INT NULL, '
'col3 VARCHAR(10), col4 DECIMAL(4,2) NULL, '
'col5 DATETIME NULL, col6 INT NOT NULL, col7 VARCHAR(10), '
'PRIMARY KEY(col1))'.format(self.tbl))
self.cursor.execute(create)
def tearDown(self):
self.cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
self.cursor.close()
self.cnx.close()
def test_null(self):
cur = self.cnx.cursor(prepared=True)
sql = ("INSERT INTO {0}(col1, col2, col3, col4, col5, col6, col7) "
"VALUES (?, ?, ?, ?, ?, ?, ?)".format(self.tbl))
params = (100, None, 'foo', None, datetime(2014, 8, 4, 9, 11, 14),
10, 'bar')
exp = (100, None, 'foo', None,
datetime(2014, 8, 4, 9, 11, 14), 10, 'bar')
cur.execute(sql, params)
sql = "SELECT * FROM {0}".format(self.tbl)
cur.execute(sql)
self.assertEqual(exp, cur.fetchone())
cur.close()
class BugOra19168737(tests.MySQLConnectorTests):
"""BUG#19168737: UNSUPPORTED CONNECTION ARGUMENTS WHILE USING OPTION_FILES
"""
def test_unsupported_arguments(self):
option_file_dir = os.path.join('tests', 'data', 'option_files')
opt_file = os.path.join(option_file_dir, 'pool.cnf')
config = tests.get_mysql_config()
if tests.MYSQL_VERSION < (5, 7):
config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
conn = mysql.connector.connect(option_files=opt_file,
option_groups=['pooling'], **config)
self.assertEqual('my_pool', conn.pool_name)
mysql.connector._CONNECTION_POOLS = {}
conn.close()
new_config = read_option_files(option_files=opt_file,
option_groups=['failover'], **config)
exp = {
'failover': ({'pool_name': 'failA', 'port': 3306},
{'pool_name': 'failB', 'port': 3307})
}
exp.update(config)
self.assertEqual(exp, new_config)
class BugOra21530100(tests.MySQLConnectorTests):
"""BUG#21530100: CONNECT FAILS WHEN USING MULTIPLE OPTION_GROUPS WITH
PYTHON 3.3
"""
def test_option_files_with_option_groups(self):
temp_cnf_file = os.path.join(os.getcwd(), 'temp.cnf')
temp_include_file = os.path.join(os.getcwd(), 'include.cnf')
try:
cnf_file = open(temp_cnf_file, "w+")
include_file = open(temp_include_file, "w+")
config = tests.get_mysql_config()
cnf = "[group32]\n"
cnf += '\n'.join(['{0} = {1}'.format(key, value)
for key, value in config.items()])
cnf += "\n[group31]\n"
cnf += "!include {0}\n".format(temp_include_file)
include_cnf = "[group41]\n"
include_cnf += "charset=utf8\n"
cnf_file.write(cnf)
include_file.write(include_cnf)
cnf_file.close()
include_file.close()
conn = mysql.connector.connect(option_files=temp_cnf_file,
option_groups=['group31','group32','group41'])
except Exception as exc:
self.fail("Connection failed with option_files argument: {0}"
"".format(exc))
finally:
os.remove(temp_cnf_file)
os.remove(temp_include_file)
class BugOra19481761(tests.MySQLConnectorTests):
"""BUG#19481761: OPTION_FILES + !INCLUDE FAILS WITH TRAILING NEWLINE
"""
def test_option_files_with_include(self):
temp_cnf_file = os.path.join(os.getcwd(), 'temp.cnf')
temp_include_file = os.path.join(os.getcwd(), 'include.cnf')
cnf_file = open(temp_cnf_file, "w+")
include_file = open(temp_include_file, "w+")
config = tests.get_mysql_config()
cnf = "[connector_python]\n"
cnf += '\n'.join(['{0} = {1}'.format(key, value)
for key, value in config.items()])
include_file.write(cnf)
cnf_file.write("!include {0}\n".format(temp_include_file))
cnf_file.close()
include_file.close()
try:
conn = mysql.connector.connect(option_files=temp_cnf_file)
except:
self.fail("Connection failed with option_files argument.")
self.assertEqual(config, read_option_files(option_files=temp_cnf_file))
os.remove(temp_cnf_file)
os.remove(temp_include_file)
class BugOra19584051(tests.MySQLConnectorTests):
"""BUG#19584051: TYPE_CODE DOES NOT COMPARE EQUAL
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
self.tbl = 'Bug19584051'
self.cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
create = ('CREATE TABLE {0}(col1 INT NOT NULL, col2 BLOB, '
'col3 VARCHAR(10), col4 DECIMAL(4,2), '
'col5 DATETIME , col6 YEAR, '
'PRIMARY KEY(col1))'.format(self.tbl))
self.cursor.execute(create)
def tearDown(self):
self.cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
self.cursor.close()
self.cnx.close()
def test_dbapi(self):
cur = self.cnx.cursor()
sql = ("INSERT INTO {0}(col1, col2, col3, col4, col5, col6) "
"VALUES (%s, %s, %s, %s, %s, %s)".format(self.tbl))
params = (100, 'blob-data', 'foo', 1.2, datetime(2014, 8, 4, 9, 11, 14),
2014)
exp = [
mysql.connector.NUMBER,
mysql.connector.BINARY,
mysql.connector.STRING,
mysql.connector.NUMBER,
mysql.connector.DATETIME,
mysql.connector.NUMBER,
]
cur.execute(sql, params)
sql = "SELECT * FROM {0}".format(self.tbl)
cur.execute(sql)
temp = cur.fetchone()
type_codes = [row[1] for row in cur.description]
self.assertEqual(exp, type_codes)
cur.close()
class BugOra19522948(tests.MySQLConnectorTests):
"""BUG#19522948: DATA CORRUPTION WITH TEXT FIELDS
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.tbl = 'Bug19522948'
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = "CREATE TABLE {0} (c1 LONGTEXT NOT NULL)".format(
self.tbl
)
self.cur.execute(create)
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
self.cnx.close()
def test_row_to_python(self):
cur = self.cnx.cursor(prepared=True)
data = "test_data"*10
cur.execute("INSERT INTO {0} (c1) VALUES (?)".format(self.tbl), (data,))
self.cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual((data,), self.cur.fetchone())
self.cur.execute("TRUNCATE TABLE {0}".format(self.tbl))
data = "test_data"*1000
cur.execute("INSERT INTO {0} (c1) VALUES (?)".format(self.tbl), (data,))
self.cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual((data,), self.cur.fetchone())
self.cur.execute("TRUNCATE TABLE {0}".format(self.tbl))
data = "test_data"*10000
cur.execute("INSERT INTO {0} (c1) VALUES (?)".format(self.tbl), (data,))
self.cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual((data,), self.cur.fetchone())
class BugOra19500097(tests.MySQLConnectorTests):
"""BUG#19500097: BETTER SUPPORT FOR RAW/BINARY DATA
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug19500097'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (col1 VARCHAR(10), col2 INT) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_binary_charset(self):
sql = "INSERT INTO {0} VALUES(%s, %s)".format(self.tbl)
cur = self.cnx.cursor()
cur.execute(sql, ('foo', 1))
cur.execute(sql, ('ëëë', 2))
cur.execute(sql, (u'ááá', 5))
self.cnx.set_charset_collation('binary')
cur.execute(sql, ('bar', 3))
cur.execute(sql, ('ëëë', 4))
cur.execute(sql, (u'ááá', 6))
exp = [
(bytearray(b'foo'), 1),
(bytearray(b'\xeb\xeb\xeb'), 2),
(bytearray(b'\xe1\xe1\xe1'), 5),
(bytearray(b'bar'), 3),
(bytearray(b'\xc3\xab\xc3\xab\xc3\xab'), 4),
(bytearray(b'\xc3\xa1\xc3\xa1\xc3\xa1'), 6)
]
cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual(exp, cur.fetchall())
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 3),
"MySQL {0} does not support COM_RESET_CONNECTION".format(
tests.MYSQL_VERSION_TXT))
class BugOra19549363(tests.MySQLConnectorTests):
"""BUG#19549363: Compression does not work with Change User
"""
def test_compress_reset_connection(self):
config = tests.get_mysql_config()
config['compress'] = True
mysql.connector._CONNECTION_POOLS = {}
config['pool_name'] = 'mypool'
config['pool_size'] = 3
config['pool_reset_session'] = True
cnx1 = mysql.connector.connect(**config)
try:
cnx1.close()
except:
self.fail("Reset session with compression test failed.")
finally:
mysql.connector._CONNECTION_POOLS = {}
class BugOra19803702(tests.MySQLConnectorTests):
"""BUG#19803702: CAN'T REPORT ERRORS THAT HAVE NON-ASCII CHARACTERS
"""
def test_errors(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.tbl = 'áááëëëááá'
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (col1 VARCHAR(10), col2 INT) "
"DEFAULT CHARSET latin1".format(self.tbl))
self.cur.execute(create)
self.assertRaises(errors.DatabaseError, self.cur.execute, create)
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
self.cnx.close()
class BugOra19777815(tests.MySQLConnectorTests):
"""BUG#19777815: CALLPROC() DOES NOT SUPPORT WARNINGS
"""
def setUp(self):
config = tests.get_mysql_config()
config['get_warnings'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.sp1 = 'BUG19777815'
self.sp2 = 'BUG19777815_with_result'
create1 = (
"CREATE PROCEDURE {0}() BEGIN SIGNAL SQLSTATE '01000' "
"SET MESSAGE_TEXT = 'TEST WARNING'; END;".format(self.sp1)
)
create2 = (
"CREATE PROCEDURE {0}() BEGIN SELECT 1; SIGNAL SQLSTATE '01000' "
"SET MESSAGE_TEXT = 'TEST WARNING'; END;".format(self.sp2)
)
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.sp1))
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.sp2))
cur.execute(create1)
cur.execute(create2)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.sp1))
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.sp2))
cur.close()
cnx.close()
@foreach_cnx(get_warnings=True)
def test_warning(self):
cur = self.cnx.cursor()
cur.callproc(self.sp1)
exp = [(u'Warning', 1642, u'TEST WARNING')]
self.assertEqual(exp, cur.fetchwarnings())
@foreach_cnx(get_warnings=True)
def test_warning_with_rows(self):
cur = self.cnx.cursor()
cur.callproc(self.sp2)
exp = [(1,)]
if PY2:
self.assertEqual(exp, cur.stored_results().next().fetchall())
else:
self.assertEqual(exp, next(cur.stored_results()).fetchall())
exp = [(u'Warning', 1642, u'TEST WARNING')]
self.assertEqual(exp, cur.fetchwarnings())
class BugOra20407036(tests.MySQLConnectorTests):
"""BUG#20407036: INCORRECT ARGUMENTS TO MYSQLD_STMT_EXECUTE ERROR
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.tbl = 'Bug20407036'
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} ( id int(10) unsigned NOT NULL, "
"text VARCHAR(70000) CHARACTER SET utf8 NOT NULL, "
"rooms tinyint(3) unsigned NOT NULL) "
"ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8 "
"COLLATE=utf8_unicode_ci".format(self.tbl))
self.cur.execute(create)
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
self.cnx.close()
def test_binary_charset(self):
cur = self.cnx.cursor(prepared=True)
sql = "INSERT INTO {0}(text, rooms) VALUES(%s, %s)".format(self.tbl)
cur.execute(sql, ('a'*252, 1))
cur.execute(sql, ('a'*253, 2))
cur.execute(sql, ('a'*255, 3))
cur.execute(sql, ('a'*251, 4))
cur.execute(sql, ('a'*65535, 5))
exp = [
(0, 'a'*252, 1),
(0, 'a'*253, 2),
(0, 'a'*255, 3),
(0, 'a'*251, 4),
(0, 'a'*65535, 5),
]
self.cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual(exp, self.cur.fetchall())
class BugOra20301989(tests.MySQLConnectorTests):
"""BUG#20301989: SET DATA TYPE NOT TRANSLATED CORRECTLY WHEN EMPTY
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug20301989'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (col1 SET('val1', 'val2')) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
sql = "INSERT INTO {0} VALUES(%s)".format(self.tbl)
cur.execute(sql, ('val1,val2',))
cur.execute(sql, ('val1',))
cur.execute(sql, ('',))
cur.execute(sql, (None,))
exp = [
(set([u'val1', u'val2']),),
(set([u'val1']),),
(set([]),),
(None,)
]
cur.execute("SELECT * FROM {0}".format(self.tbl))
self.assertEqual(exp, cur.fetchall())
class BugOra20462427(tests.MySQLConnectorTests):
"""BUG#20462427: BYTEARRAY INDEX OUT OF RANGE
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'BugOra20462427'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} ("
"id INT PRIMARY KEY, "
"a LONGTEXT "
") ENGINE=Innodb DEFAULT CHARSET utf8".format(self.tbl))
cur.execute(create)
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
def _test_bigdata(self):
temp = 'a'*16777210
insert = "INSERT INTO {0} (a) VALUES ('{1}')".format(self.tbl, temp)
cur = self.cnx.cursor()
cur.execute(insert)
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777210, len(res[0][0]))
cur.execute("UPDATE {0} SET a = concat(a, 'a')".format(self.tbl))
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777211, len(res[0][0]))
cur.execute("UPDATE {0} SET a = concat(a, 'a')".format(self.tbl))
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777212, len(res[0][0]))
cur.execute("UPDATE {0} SET a = concat(a, 'a')".format(self.tbl))
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777213, len(res[0][0]))
cur.execute("UPDATE {0} SET a = concat(a, 'aaa')".format(self.tbl))
cur.execute("SELECT a FROM {0}".format(self.tbl))
res = cur.fetchall()
self.assertEqual(16777216, len(res[0][0]))
cur.close()
@cnx_config(compress=False, connection_timeout=100)
@foreach_cnx()
def test_bigdata_compress(self):
self._test_bigdata()
@cnx_config(connection_timeout=100)
@foreach_cnx()
def test_bigdata_nocompress(self):
self._test_bigdata()
class BugOra20811802(tests.MySQLConnectorTests):
"""BUG#20811802: ISSUES WHILE USING BUFFERED=TRUE OPTION WITH CPY CEXT
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug20811802'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (id INT, name VARCHAR(5), dept VARCHAR(5)) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
sql = "INSERT INTO {0} VALUES(%s, %s, %s)".format(self.tbl)
data = [
(1, 'abc', 'cs'),
(2, 'def', 'is'),
(3, 'ghi', 'cs'),
(4, 'jkl', 'it'),
]
cur.executemany(sql, data)
cur.close()
cur = self.cnx.cursor(named_tuple=True, buffered=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
i = 0
for row in cur:
self.assertEqual((row.id, row.name, row.dept), data[i])
i += 1
cur.close()
cur = self.cnx.cursor(dictionary=True, buffered=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
i = 0
for row in cur:
self.assertEqual(row, dict(zip(('id', 'name', 'dept'), data[i])))
i += 1
cur = self.cnx.cursor(named_tuple=True, buffered=False)
cur.execute("SELECT * FROM {0}".format(self.tbl))
i = 0
for row in cur:
self.assertEqual((row.id, row.name, row.dept), data[i])
i += 1
cur.close()
cur = self.cnx.cursor(dictionary=True, buffered=False)
cur.execute("SELECT * FROM {0}".format(self.tbl))
i = 0
for row in cur:
self.assertEqual(row, dict(zip(('id', 'name', 'dept'), data[i])))
i += 1
class BugOra20834643(tests.MySQLConnectorTests):
"""BUG#20834643: ATTRIBUTE ERROR NOTICED WHILE TRYING TO PROMOTE SERVERS
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug20834643'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (id INT, name VARCHAR(5), dept VARCHAR(5)) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
sql = "INSERT INTO {0} VALUES(%s, %s, %s)".format(self.tbl)
data = [
(1, 'abc', 'cs'),
(2, 'def', 'is'),
(3, 'ghi', 'cs'),
(4, 'jkl', 'it'),
]
cur.executemany(sql, data)
cur.close()
cur = self.cnx.cursor(named_tuple=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
res = cur.fetchone()
self.assertEqual(data[0], (res.id, res.name, res.dept))
res = cur.fetchall()
exp = []
for row in res:
exp.append((row.id, row.name, row.dept))
self.assertEqual(exp, data[1:])
cur.close()
cur = self.cnx.cursor(named_tuple=True, buffered=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
res = cur.fetchone()
self.assertEqual(data[0], (res.id, res.name, res.dept))
res = cur.fetchall()
exp = []
for row in res:
exp.append((row.id, row.name, row.dept))
self.assertEqual(exp, data[1:])
cur.close()
cur = self.cnx.cursor(named_tuple=True, buffered=False)
cur.execute("SELECT * FROM {0}".format(self.tbl))
res = cur.fetchone()
self.assertEqual(data[0], (res.id, res.name, res.dept))
res = cur.fetchall()
exp = []
for row in res:
exp.append((row.id, row.name, row.dept))
self.assertEqual(exp, data[1:])
cur.close()
class BugOra20653441(tests.MySQLConnectorTests):
"""BUG#20653441: PYTHON CONNECTOR HANGS IF A QUERY IS KILLED (ERROR 1317)"""
def setUp(self):
self.table_name = 'Bug20653441'
self._setup()
def _setup(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.table_name))
table = (
"CREATE TABLE {table} ("
" id INT UNSIGNED NOT NULL AUTO_INCREMENT,"
" c1 VARCHAR(255) DEFAULT '{default}',"
" PRIMARY KEY (id)"
")"
).format(table=self.table_name, default='a' * 255)
cnx.cmd_query(table)
stmt = "INSERT INTO {table} (id) VALUES {values}".format(
table=self.table_name,
values=','.join(['(NULL)'] * 1024)
)
cnx.cmd_query(stmt)
cnx.commit()
cnx.close()
def tearDown(self):
try:
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cnx.cmd_query(
"DROP TABLE IF EXISTS {0}".format(self.table_name))
cnx.close()
except:
pass
@foreach_cnx()
def test_kill_query(self):
def kill(connection_id):
"""Kill query using separate connection"""
killer_cnx = connection.MySQLConnection(**tests.get_mysql_config())
time.sleep(1)
killer_cnx.cmd_query("KILL QUERY {0}".format(connection_id))
killer_cnx.close()
def sleepy_select(cnx):
"""Execute a SELECT statement which takes a while to complete"""
cur = cnx.cursor()
# Ugly query ahead!
stmt = "SELECT x1.*, x2.* from {table} as x1, {table} as x2".format(
table=self.table_name)
cur.execute(stmt)
# Save the error so we can check in the calling thread
cnx.test_error = None
try:
cur.fetchall()
except errors.Error as err:
cnx.test_error = err
cur.close()
worker = Thread(target=sleepy_select, args=[self.cnx])
killer = Thread(target=kill, args=[self.cnx.connection_id])
worker.start()
killer.start()
worker.join()
killer.join()
self.cnx.close()
self.assertTrue(isinstance(self.cnx.test_error, errors.DatabaseError))
self.assertEqual(str(self.cnx.test_error),
"1317 (70100): Query execution was interrupted")
class BugOra21535573(tests.MySQLConnectorTests):
"""BUG#21535573: SEGFAULT WHEN TRY TO SELECT GBK DATA WITH C-EXTENSION
"""
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
for charset in ('gbk', 'sjis', 'big5'):
tablename = charset + 'test'
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(tablename))
cnx.close()
def _test_charset(self, charset, data):
config = tests.get_mysql_config()
config['charset'] = charset
config['use_unicode'] = True
self.cnx = self.cnx.__class__(**config)
tablename = charset + 'test'
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(tablename))
if PY2:
column = data.encode(charset)
else:
column = data
table = (
"CREATE TABLE {table} ("
" {col} INT AUTO_INCREMENT KEY, "
"c1 VARCHAR(40)"
") CHARACTER SET '{charset}'"
).format(table=tablename, charset=charset, col=column)
cur.execute(table)
self.cnx.commit()
cur.execute("TRUNCATE {0}".format(tablename))
self.cnx.commit()
insert = "INSERT INTO {0} (c1) VALUES (%s)".format(tablename)
cur.execute(insert, (data,))
self.cnx.commit()
cur.execute("SELECT * FROM {0}".format(tablename))
for row in cur:
self.assertEqual(data, row[1])
cur.close()
self.cnx.close()
@foreach_cnx()
def test_gbk(self):
self._test_charset('gbk', u'海豚')
@foreach_cnx()
def test_sjis(self):
self._test_charset('sjis', u'シイラ')
@foreach_cnx()
def test_big5(self):
self._test_charset('big5', u'皿')
class BugOra21536507(tests.MySQLConnectorTests):
"""BUG#21536507:C/PYTHON BEHAVIOR NOT PROPER WHEN RAISE_ON_WARNINGS=TRUE
"""
@cnx_config(raw=False, get_warnings=True, raise_on_warnings=True)
@foreach_cnx()
def test_with_raw(self):
cur = self.cnx.cursor()
drop_stmt = "DROP TABLE IF EXISTS unknown"
self.assertRaises(errors.DatabaseError, cur.execute, drop_stmt)
exp = [('Note', 1051, "Unknown table 'myconnpy.unknown'")]
res = cur.fetchwarnings()
self.assertEqual('Note', res[0][0])
self.assertEqual(1051, res[0][1])
self.assertTrue(res[0][2].startswith("Unknown table"))
select_stmt = "SELECT 'a'+'b'"
cur.execute(select_stmt)
self.assertRaises(errors.DatabaseError, cur.fetchall)
if os.name != 'nt':
exp = [
('Warning', 1292, "Truncated incorrect DOUBLE value: 'a'"),
('Warning', 1292, "Truncated incorrect DOUBLE value: 'b'"),
]
else:
exp = [
('Warning', 1292, "Truncated incorrect DOUBLE value: 'b'"),
('Warning', 1292, "Truncated incorrect DOUBLE value: 'a'"),
]
self.assertEqual(exp, cur.fetchwarnings())
try:
cur.close()
except errors.InternalError as exc:
self.fail("Closing cursor failed with: %s" % str(exc))
class BugOra21420633(tests.MySQLConnectorTests):
"""BUG#21420633: CEXTENSION CRASHES WHILE FETCHING LOTS OF NULL VALUES
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.tbl = 'Bug21420633'
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
create = ("CREATE TABLE {0} (id INT, dept VARCHAR(5)) "
"DEFAULT CHARSET latin1".format(self.tbl))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
cur.close()
cnx.close()
@foreach_cnx()
def test_null(self):
cur = self.cnx.cursor()
sql = "INSERT INTO {0} VALUES(%s, %s)".format(self.tbl)
data = [(i, None) for i in range(10000)]
cur.executemany(sql, data)
cur.close()
cur = self.cnx.cursor(named_tuple=True)
cur.execute("SELECT * FROM {0}".format(self.tbl))
res = cur.fetchall()
cur.close()
class BugOra21492428(tests.MySQLConnectorTests):
"""BUG#21492428: CONNECT FAILS WHEN PASSWORD STARTS OR ENDS WITH SPACES
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.cursor = self.cnx.cursor()
if config['unix_socket'] and os.name != 'nt':
self.host = 'localhost'
else:
self.host = config['host']
grant = u"CREATE USER '{user}'@'{host}' IDENTIFIED BY '{password}'"
self._credentials = [
('ABCD', ' XYZ'),
('PQRS', ' 1 2 3 '),
('XYZ1', 'XYZ123 '),
('A B C D', ' ppppp '),
]
if self.cnx.get_server_version() > (5, 6):
self._credentials += [
(' PQRSWITHSPACE', ' 1 2 3 '),
('XYZ1WITHSPACE ', 'XYZ123 '),
(' S P A C E D ', ' ppppp '),
]
for user, password in self._credentials:
self.cursor.execute(grant.format(
user=user, host=self.host, password=password))
def tearDown(self):
for user, password in self._credentials:
self.cursor.execute(u"DROP USER '{user}'@'{host}'".format(
user=user, host=self.host))
def test_password_with_spaces(self):
config = tests.get_mysql_config()
for user, password in self._credentials:
config['user'] = user
config['password'] = password
config['database'] = None
try:
cnx = connection.MySQLConnection(**config)
except errors.ProgrammingError:
self.fail('Failed using password with spaces for user %s' % user)
else:
cnx.close()
class BugOra21476495(tests.MySQLConnectorTests):
"""Bug 21476495 - CHARSET VALUE REMAINS INVALID AFTER FAILED
SET_CHARSET_COLLATION() CALL
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
def test_bad_set_charset_number(self):
old_val = self.cnx._charset_id
self.assertRaises(mysql.connector.Error,
self.cnx.set_charset_collation, 19999)
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cursor = cnx.cursor(raw="true",buffered="true")
cursor.execute("SHOW VARIABLES LIKE 'character_set_connection'")
row = cursor.fetchone()
self.assertEqual(row[1], u"utf8mb4")
cursor.close()
self.assertEqual(self.cnx._charset_id, old_val)
class BugOra21477493(tests.MySQLConnectorTests):
"""Bug 21477493 - EXECUTEMANY() API WITH INSERT INTO .. SELECT STATEMENT
RETURNS INTERFACEERROR
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
cursor = self.cnx.cursor()
cursor.execute("DROP TABLE IF EXISTS fun1")
cursor.execute("CREATE TABLE fun1(a CHAR(50), b INT)")
data=[('A',1),('B',2)]
cursor.executemany("INSERT INTO fun1 (a, b) VALUES (%s, %s)",data)
cursor.close()
def tearDown(self):
cursor = self.cnx.cursor()
cursor.execute("DROP TABLE IF EXISTS fun1")
cursor.close()
def test_insert_into_select_type1(self):
data = [('A',1),('B',2)]
cursor = self.cnx.cursor()
cursor.executemany("INSERT INTO fun1 SELECT CONCAT('VALUES', %s), "
"b + %s FROM fun1", data)
cursor.close()
cursor = self.cnx.cursor()
cursor.execute("SELECT * FROM fun1")
self.assertEqual(8, len(cursor.fetchall()))
def test_insert_into_select_type2(self):
data = [('A',1),('B',2)]
cursor = self.cnx.cursor()
cursor.executemany("INSERT INTO fun1 SELECT CONCAT('VALUES(ab, cd)',"
"%s), b + %s FROM fun1", data)
cursor.close()
cursor = self.cnx.cursor()
cursor.execute("SELECT * FROM fun1")
self.assertEqual(8, len(cursor.fetchall()))
def test_insert_into_select_type3(self):
config = tests.get_mysql_config()
data = [('A',1),('B',2)]
cursor = self.cnx.cursor()
cursor.executemany("INSERT INTO `{0}`.`fun1` SELECT CONCAT('"
"VALUES(ab, cd)', %s), b + %s FROM fun1"
"".format(config["database"]), data)
cursor.close()
cursor = self.cnx.cursor()
cursor.execute("SELECT * FROM fun1")
self.assertEqual(8, len(cursor.fetchall()))
class BugOra21492815(tests.MySQLConnectorTests):
"""BUG#21492815: CALLPROC() HANGS WHEN CONSUME_RESULTS=TRUE
"""
def setUp(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
self.proc1 = 'Bug20834643'
self.proc2 = 'Bug20834643_1'
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.proc1))
create = ("CREATE PROCEDURE {0}() BEGIN SELECT 1234; "
"END".format(self.proc1))
cur.execute(create)
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.proc2))
create = ("CREATE PROCEDURE {0}() BEGIN SELECT 9876; "
"SELECT CONCAT('','abcd'); END".format(self.proc2))
cur.execute(create)
cur.close()
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.proc1))
cur.execute("DROP PROCEDURE IF EXISTS {0}".format(self.proc2))
cur.close()
cnx.close()
@cnx_config(consume_results=True, raw=True)
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
cur.callproc(self.proc1)
self.assertEqual((bytearray(b'1234'),),
next(cur.stored_results()).fetchone())
cur.callproc(self.proc2)
exp = [[(bytearray(b'9876'),)], [(bytearray(b'abcd'),)]]
results = []
for result in cur.stored_results():
results.append(result.fetchall())
self.assertEqual(exp, results)
cur.close()
@cnx_config(consume_results=True, raw=False)
@foreach_cnx()
def test_set(self):
cur = self.cnx.cursor()
cur.callproc(self.proc1)
self.assertEqual((1234,),
next(cur.stored_results()).fetchone())
cur.callproc(self.proc2)
exp = [[(9876,)], [('abcd',)]]
results = []
for result in cur.stored_results():
results.append(result.fetchall())
self.assertEqual(exp, results)
cur.close()
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
class BugOra21656282(tests.MySQLConnectorTests):
"""BUG#21656282: CONNECT FAILURE WITH C-EXT WHEN PASSWORD CONTAINS UNICODE
CHARACTER
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = CMySQLConnection(**config)
self.host = '127.0.0.1' if config['unix_socket'] and os.name != 'nt' \
else config['host']
self.user = 'unicode_user'
self.password = u'步'
# Use utf8mb4 character set
self.cnx.cmd_query("SET character_set_server='utf8mb4'")
# Drop user if exists
self._drop_user(self.host, self.user)
# Create the user with unicode password
create_user = (u"CREATE USER '{user}'@'{host}' IDENTIFIED BY "
u"'{password}'")
self.cnx.cmd_query(create_user.format(user=self.user, host=self.host,
password=self.password))
# Grant all to new user on database
grant = "GRANT ALL ON {database}.* TO '{user}'@'{host}'"
self.cnx.cmd_query(grant.format(database=config['database'],
user=self.user, host=self.host))
def tearDown(self):
self._drop_user(self.host, self.user)
def _drop_user(self, host, user):
try:
drop_user = "DROP USER '{user}'@'{host}'"
self.cnx.cmd_query(drop_user.format(user=user, host=host))
except errors.DatabaseError:
# It's OK when drop user fails
pass
def test_unicode_password(self):
config = tests.get_mysql_config()
config.pop('unix_socket')
config['user'] = self.user
config['password'] = self.password
try:
cnx = CMySQLConnection(**config)
except Exception as err:
self.fail('Failed using password with unicode characters: '
'e->{} t->{}'.format(err, type(err)))
else:
cnx.close()
class BugOra21530841(tests.MySQLConnectorTests):
"""BUG#21530841: SELECT FAILS WITH ILLEGAL RESULT SET ERROR WHEN COLUMN
COUNT IN RESULT > 4096
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.tbl = "Bug21530841"
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
def tearDown(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cnx.close()
def test_big_column_count(self):
cur = self.cnx.cursor(raw=False, buffered=False)
# Create table with 512 Columns
table = "CREATE TABLE {0} ({1})".format(self.tbl,
", ".join(["c{0} INT".format(idx) for idx in range(512)]))
cur.execute(table)
# Insert 1 record
cur.execute("INSERT INTO {0}(c1) values (1) ".format(self.tbl))
self.cnx.commit()
# Select from 10 tables
query = "SELECT * FROM {0} WHERE a1.c1 > 0".format(
", ".join(["{0} a{1}".format(self.tbl, idx) for idx in range(10)]))
cur.execute(query)
cur.fetchone()
cur.close()
@unittest.skipIf(sys.version_info < (2, 7, 9),
"Python 2.7.9+ is required for SSL")
class BugOra25397650(tests.MySQLConnectorTests):
"""BUG#25397650: CERTIFICATE VALIDITY NOT VERIFIED
"""
def setUp(self):
self.config = tests.get_mysql_config().copy()
self.config.pop('unix_socket')
self.config['host'] = 'localhost'
self.ca = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'))
self.ca_1 = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert_1.pem'))
def _verify_cert(self, config):
# Test with a bad CA
config['ssl_ca'] = self.ca_1
config['ssl_verify_cert'] = True
self.assertRaises(errors.InterfaceError,
mysql.connector.connect, **config)
config['ssl_verify_cert'] = False
mysql.connector.connect(**config)
# Test with the correct CA
config['ssl_ca'] = self.ca
config['ssl_verify_cert'] = True
mysql.connector.connect(**config)
config['ssl_verify_cert'] = False
mysql.connector.connect(**config)
def test_pure_verify_server_certifcate(self):
config = self.config.copy()
config['use_pure'] = True
self._verify_cert(config)
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_cext_verify_server_certifcate(self):
config = self.config.copy()
config['use_pure'] = False
self._verify_cert(config)
@unittest.skipIf(tests.MYSQL_VERSION < (5, 6, 39), "skip in older server")
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
class Bug28133321(tests.MySQLConnectorTests):
"""BUG#28133321: FIX INCORRECT COLUMNS NAMES REPRESENTING AGGREGATE
FUNCTIONS
"""
tbl = "BUG28133321"
def setUp(self):
create_table = ("CREATE TABLE {} ("
" dish_id INT(11) UNSIGNED AUTO_INCREMENT UNIQUE KEY,"
" category TEXT,"
" dish_name TEXT,"
" price FLOAT,"
" servings INT,"
" order_time TIME) CHARACTER SET utf8"
" COLLATE utf8_general_ci")
config = tests.get_mysql_config()
cnx = CMySQLConnection(**config)
try:
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
except:
pass
cnx.cmd_query(create_table.format(self.tbl))
cur = cnx.cursor(dictionary=True)
insert_stmt = ('INSERT INTO {} ('
' category, dish_name, price, servings, order_time'
') VALUES ("{{}}", "{{}}", {{}}, {{}}, "{{}}")'
).format(self.tbl)
values = [("dinner", "lassanya", 10.53, "2", "00:10"),
("dinner", "hamburger", 9.35, "1", "00:15"),
("dinner", "hamburger whit fries", 10.99, "2", "00:20"),
("dinner", "Pizza", 9.99, "4", "00:30"),
("dessert", "cheescake", 4.95, "1", "00:05"),
("dessert", "cheescake special", 5.95, "2", "00:05")]
for value in values:
cur.execute(insert_stmt.format(*value))
cnx.close()
def tearDown(self):
config = tests.get_mysql_config()
cnx = CMySQLConnection(**config)
try:
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
except:
pass
cnx.close()
def test_columns_name_are_not_bytearray(self):
sql_statement = ["SELECT",
" dish_id,",
" category,",
" JSON_OBJECTAGG(category, dish_name) as special,",
" JSON_ARRAYAGG(dish_name) as dishes,",
" GROUP_CONCAT(dish_name) as dishes2,",
" price,",
" servings,",
" ROUND(AVG(price)) AS round_avg_price,",
" AVG(price) AS avg_price,",
" MIN(price) AS min_price,",
" MAX(price) AS max_price,",
" MAX(order_time) AS preparation_time,",
" STD(servings) as deviation,",
" SUM(price) AS sum,",
" VARIANCE(price) AS var,",
" COUNT(DISTINCT servings) AS cd_servings,",
" COUNT(servings) AS c_servings ",
"FROM {} ",
"GROUP BY category"]
# Remove JSON functions when testing againsts server version < 5.7.22
# JSON_OBJECTAGG JSON_ARRAYAGG were introduced on 5.7.22
if tests.MYSQL_VERSION < (5, 7, 22):
sql_statement.pop(3)
sql_statement.pop(3)
sql_statement = "".join(sql_statement)
config = tests.get_mysql_config()
cnx = CMySQLConnection(**config)
cur = cnx.cursor(dictionary=True)
cur.execute(sql_statement.format(self.tbl))
rows = cur.fetchall()
col_names = [x[0] for x in cur.description]
for row in rows:
for col, val in row.items():
self.assertTrue(isinstance(col, STRING_TYPES),
"The columns name {} is not a string type"
"".format(col))
self.assertFalse(isinstance(col, (bytearray)),
"The columns name {} is a bytearray type"
"".format(col))
self.assertFalse(isinstance(val, (bytearray)),
"The value {} of column {} is a bytearray type"
"".format(val, col))
for col_name in col_names:
self.assertTrue(isinstance(col_name, STRING_TYPES),
"The columns name {} is not a string type"
"".format(col_name))
self.assertFalse(isinstance(col_name, (bytearray)),
"The columns name {} is a bytearray type"
"".format(col_name))
class BugOra21947091(tests.MySQLConnectorTests):
"""BUG#21947091: """
def setUp(self):
self.config = tests.get_mysql_config()
self.config.pop('unix_socket')
self.server = tests.MYSQL_SERVERS[0]
def _disable_ssl(self):
self.server.stop()
self.server.wait_down()
self.server.start(ssl_ca='', ssl_cert='', ssl_key='', ssl=0)
self.server.wait_up()
time.sleep(1)
def _enable_ssl(self):
self.server.stop()
self.server.wait_down()
self.server.start()
self.server.wait_up()
time.sleep(1)
def _verify_ssl(self, cnx, available=True):
cur = cnx.cursor()
cur.execute("SHOW STATUS LIKE 'Ssl_version'")
result = cur.fetchall()[0]
if available:
self.assertNotEqual(result[1], '')
else:
self.assertEqual(result[1], '')
def test_ssl_disabled_pure(self):
self.config['use_pure'] = True
self._test_ssl_modes()
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_ssl_disabled_cext(self):
self.config['use_pure'] = False
self._test_ssl_modes()
def _test_ssl_modes(self):
config = self.config.copy()
# With SSL on server
# default
cnx = mysql.connector.connect(**config)
self._verify_ssl(cnx)
# disabled
config['ssl_disabled'] = True
cnx = mysql.connector.connect(**config)
self._verify_ssl(cnx, False)
self._disable_ssl()
config = self.config.copy()
config['ssl_ca'] = tests.SSL_CA
# Without SSL on server
try:
# default
cnx = mysql.connector.connect(**config)
self._verify_ssl(cnx, False)
# disabled
config['ssl_disabled'] = True
cnx = mysql.connector.connect(**config)
self._verify_ssl(cnx, False)
finally:
self._enable_ssl()
class BugOra25589496(tests.MySQLConnectorTests):
"""BUG#25589496: COMMITS RELATED TO "BUG22529828" BROKE BINARY DATA
HANDLING FOR PYTHON 2.7
"""
def setUp(self):
config = tests.get_mysql_config()
self.cnx = connection.MySQLConnection(**config)
self.tbl = "Bug25589496"
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
def tearDown(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cnx.close()
def test_insert_binary(self):
table = """
CREATE TABLE {0} (
`id` int(10) unsigned NOT NULL AUTO_INCREMENT PRIMARY KEY,
`section` VARCHAR(50) NOT NULL,
`pickled` LONGBLOB NOT NULL
)
""".format(self.tbl)
cursor = self.cnx.cursor()
cursor.execute(table)
pickled = pickle.dumps({'a': 'b'}, pickle.HIGHEST_PROTOCOL)
add_row_q = "INSERT INTO {0} (section, pickled) " \
"VALUES (%(section)s, %(pickled)s)".format(self.tbl)
new_row = cursor.execute(add_row_q, {'section': 'foo',
'pickled': pickled})
self.cnx.commit()
self.assertEqual(1, cursor.lastrowid)
cursor.close()
class BugOra25383644(tests.MySQLConnectorTests):
"""BUG#25383644: LOST SERVER CONNECTION LEAKS POOLED CONNECTIONS
"""
def setUp(self):
config = tests.get_mysql_config()
config["pool_size"] = 3
self.cnxpool = pooling.MySQLConnectionPool(**config)
self.mysql_server = tests.MYSQL_SERVERS[0]
def test_pool_exhaustion(self):
sql = "SELECT * FROM dummy"
i = 4
while i > 0:
cnx = self.cnxpool.get_connection()
cur = cnx.cursor()
try:
self.mysql_server.stop()
self.mysql_server.wait_down()
cur.execute(sql)
except (mysql.connector.errors.OperationalError,
mysql.connector.errors.ProgrammingError):
try:
cur.close()
cnx.close()
except mysql.connector.errors.OperationalError:
pass
finally:
i -= 1
if not self.mysql_server.check_running():
self.mysql_server.start()
self.mysql_server.wait_up()
class BugOra25558885(tests.MySQLConnectorTests):
"""BUG#25558885: ERROR 2013 (LOST CONNECTION TO MYSQL SERVER) USING C
EXTENSIONS
"""
def setUp(self):
pass
def _long_query(self, config, cursor_class):
db_conn = mysql.connector.connect(**config)
cur = db_conn.cursor(cursor_class=cursor_class)
cur.execute("select sleep(15)")
cur.close()
db_conn.disconnect()
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_cext_cnx(self):
config = tests.get_mysql_config()
config["use_pure"] = False
del config["connection_timeout"]
cursor_class = mysql.connector.cursor_cext.CMySQLCursorBufferedRaw
self._long_query(config, cursor_class)
def test_pure_cnx(self):
config = tests.get_mysql_config()
config["use_pure"] = True
del config["connection_timeout"]
cursor_class = mysql.connector.cursor.MySQLCursorBufferedRaw
self._long_query(config, cursor_class)
class BugOra22564149(tests.MySQLConnectorTests):
"""BUG#22564149: CMD_QUERY_ITER ERRONEOUSLY CALLS ".ENCODE('UTF8')" ON
BYTESTRINGS
"""
def setUp(self):
config = tests.get_mysql_config()
self.tbl = "BugOra22564149"
self.cnx = connection.MySQLConnection(**config)
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cnx.cmd_query("CREATE TABLE {0} (id INT, name VARCHAR(50))"
"".format(self.tbl))
def tearDown(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cnx.close()
def test_cmd_query_iter(self):
stmt = (u"SELECT 1; INSERT INTO {0} VALUES (1, 'João'),(2, 'André'); "
u"SELECT 3")
results = []
for result in self.cnx.cmd_query_iter(
stmt.format(self.tbl).encode("utf-8")):
results.append(result)
if "columns" in result:
results.append(self.cnx.get_rows())
class BugOra24659561(tests.MySQLConnectorTests):
"""BUG#24659561: LOOKUPERROR: UNKNOWN ENCODING: UTF8MB4
"""
def setUp(self):
config = tests.get_mysql_config()
config["charset"] = "utf8mb4"
config["collation"] = "utf8mb4_general_ci"
self.tbl = "BugOra24659561"
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.execute("CREATE TABLE {0} (id INT, name VARCHAR(100))"
"".format(self.tbl))
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
def test_executemany_utf8mb4(self):
self.cur.executemany(
"INSERT INTO {0} VALUES (%s, %s)".format(self.tbl),
[(1, "Nuno"), (2, "Amitabh"), (3, "Rafael")]
)
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
class BugOra27991948(tests.MySQLConnectorTests):
"""BUG#27991948: UNREAD_RESULT IS NOT UNSET AFTER INVOKE GET_ROWS ON C-EXT
"""
test_sql_single_result = "show variables like '%port%'"
cnx_cext = None
cnx_cext_raw = None
def setUp(self):
config_cext = tests.get_mysql_config()
config_cext["use_pure"] = False
self.cnx_cext = mysql.connector.connect(**config_cext)
def tearDown(self):
self.cnx_cext.close()
def test_automatically_set_of_unread_rows(self):
"""Test unread_rows is automatically set after fetchall()"""
# Test get all the rows and execute a query without invoke free_result
self.cnx_cext.cmd_query(self.test_sql_single_result)
unread_result = self.cnx_cext.unread_result
self.assertTrue(unread_result, "unread_rows is expected to be True")
_ = self.cnx_cext.get_rows()
unread_result = self.cnx_cext.unread_result
self.assertFalse(unread_result, "unread_rows was not set to False")
# Query execution must not raise InternalError: Unread result found
self.cnx_cext.cmd_query(self.test_sql_single_result)
_ = self.cnx_cext.get_rows()
# Test cursor fetchall
cur_cext = self.cnx_cext.cursor()
cur_cext.execute(self.test_sql_single_result)
unread_result = self.cnx_cext.unread_result
self.assertTrue(unread_result, "unread_rows is expected to be True")
_ = cur_cext.fetchall()
unread_result = self.cnx_cext.unread_result
self.assertFalse(unread_result, "unread_rows was not set to False")
# Query execution must not raise InternalError: Unread result found
cur_cext.execute(self.test_sql_single_result)
_ = cur_cext.fetchall()
cur_cext.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 1),
"Collation utf8mb4_0900_ai_ci not available on 5.7.x")
class BugOra27277964(tests.MySQLConnectorTests):
"""BUG#27277964: NEW UTF8MB4 COLLATIONS NOT SUPPORTED
"""
def setUp(self):
config = tests.get_mysql_config()
config["charset"] = "utf8mb4"
config["collation"] = "utf8mb4_0900_ai_ci"
self.tbl = "BugOra27277964"
self.cnx = connection.MySQLConnection(**config)
self.cur = self.cnx.cursor()
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.execute("CREATE TABLE {0} (id INT, name VARCHAR(100))"
"".format(self.tbl))
def tearDown(self):
self.cur.execute("DROP TABLE IF EXISTS {0}".format(self.tbl))
self.cur.close()
def test_execute_utf8mb4_collation(self):
self.cur.execute("INSERT INTO {0} VALUES (1, 'Nuno')".format(self.tbl))
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 11),
"Not support for TLSv1.2 or not available by default")
class Bug26484601(tests.MySQLConnectorTests):
"""UNABLE TO CONNECT TO A MYSQL SERVER USING TLSV1.2"""
def try_connect(self, tls_version, expected_ssl_version):
config = tests.get_mysql_config().copy()
config['tls_versions'] = tls_version
config['ssl_ca'] = ''
cnx = connection.MySQLConnection(**config)
query = "SHOW STATUS LIKE 'ssl_version%'"
cur = cnx.cursor()
cur.execute(query)
res = cur.fetchall()
if isinstance(expected_ssl_version, tuple):
msg = ("Not using the expected or greater TLS version: {}, instead"
" the connection used: {}.")
# Get the version as tuple
server_tls = tuple([int(d) for d in
(res[0][1].split('v')[1].split("."))])
self.assertGreaterEqual(server_tls, expected_ssl_version,
msg.format(expected_ssl_version, res))
else:
msg = ("Not using the expected TLS version: {}, instead the "
"connection used: {}.")
self.assertEqual(res[0][1], expected_ssl_version,
msg.format(expected_ssl_version, res))
def test_get_connection_using_given_TLS_version(self):
"""Test connect using the given TLS version
The system variable tls_version determines which protocols the
server is permitted to use from those that are available (note#3).
+---------------+-----------------------+
| Variable_name | Value |
+---------------+-----------------------+
| tls_version | TLSv1,TLSv1.1,TLSv1.2 |
+---------------+-----------------------+
To restrict and permit only connections with a specific version, the
variable can be set with those specific versions that will be allowed,
changing the configuration file.
[mysqld]
tls_version=TLSv1.1,TLSv1.2
This test will take adventage of the fact that the connector can
request to use a defined version of TLS to test that the connector can
connect to the server using such version instead of changing the
configuration of the server that will imply the stoping and restarting
of the server incrementing the time to run the test. In addition the
test relay in the default value of the 'tls_version' variable is set to
'TLSv1,TLSv1.1,TLSv1.2' (note#2).
On this test a connection will be
attempted forcing to use a determined version of TLS, (all of them
must be successfully) finally making sure that the connection was done
using the given TLS_version using the ssl.version() method (note#3).
Notes:
1.- tls_version is only available on MySQL 5.7
2.- 5.6.39 does not support TLSv1.2 so for test will be skip. Currently
in 5.7.21 is set to default values TLSv1,TLSv1.1,TLSv1.2 same as in
8.0.11+. This test will be only run in such versions and above.
3.- The ssl.version() method returns the version of tls used in during
the connection, however the version returned using ssl.cipher() is
not correct on windows, only indicates the newer version supported.
"""
test_tls_versions = check_tls_versions_support(["TLSv1.1", "TLSv1.2"])
if not test_tls_versions:
self.fail("No TLS version to test: {}".format(test_tls_versions))
for tls_v_name in test_tls_versions:
self.try_connect([tls_v_name], tls_v_name)
def test_get_connection_using_servers_TLS_version(self):
"""Test connect using the servers default TLS version
The TLS version used during the secured connection is chosen by the
server at the time the ssl handshake is made if the connector does not
specifies any specific version to use. The default value of the
ssl_version is None, however this only mean to the connector that none
specific version will be chosen by the server when the ssl handshake
occurs.
"""
# The default value for the connector 'ssl_version' is None
# For the expected version, the server will use the latest version of
# TLS available "TLSv1.2" or newer.
tls_version = None
self.try_connect(tls_version, (1, 2))
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
class BugOra27650437(tests.MySQLConnectorTests):
"""BUG#27650437: DIFFERENCES PYTHON AND C-EXT FOR GET_ROW()/GET_ROWS()
"""
test_sql_single_result = "show variables like '%port%'"
cnx_pure = None
cnx_cext = None
cnx_pure_raw = None
cnx_cext_raw = None
def setUp(self):
config_pure = tests.get_mysql_config()
config_pure["use_pure"] = True
self.cnx_pure = mysql.connector.connect(**config_pure)
config_cext = tests.get_mysql_config()
config_cext["use_pure"] = False
self.cnx_cext = mysql.connector.connect(**config_cext)
config_pure_raw = tests.get_mysql_config()
config_pure_raw["use_pure"] = True
config_pure_raw["raw"] = True
self.cnx_pure_raw = mysql.connector.connect(**config_pure_raw)
config_cext_raw = tests.get_mysql_config()
config_cext_raw["use_pure"] = False
config_cext_raw["raw"] = True
self.cnx_cext_raw = mysql.connector.connect(**config_cext_raw)
def tearDown(self):
self.cnx_pure.close()
self.cnx_cext.close()
self.cnx_pure_raw.close()
self.cnx_cext_raw.close()
def test_get_row(self):
"""Test result from get_row is the same in pure and using c-ext"""
self.cnx_pure.cmd_query(self.test_sql_single_result)
res_pure = self.cnx_pure.get_row()
self.cnx_cext.cmd_query(self.test_sql_single_result)
res_cext = self.cnx_cext.get_row()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def test_get_rows(self):
"""Test results from get_rows are the same in pure and using c-ext"""
self.cnx_pure.cmd_query(self.test_sql_single_result)
res_pure = self.cnx_pure.get_rows()
self.cnx_cext.cmd_query(self.test_sql_single_result)
res_cext = self.cnx_cext.get_rows()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def test_get_row_raw(self):
"""Test result from get_row is the same in pure and using c-ext"""
self.cnx_pure_raw.cmd_query(self.test_sql_single_result)
res_pure = self.cnx_pure_raw.get_row()
self.cnx_cext_raw.cmd_query(self.test_sql_single_result)
res_cext = self.cnx_cext_raw.get_row()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def test_get_rows_raw(self):
"""Test results from get_rows are the same in pure and using c-ext"""
self.cnx_pure_raw.cmd_query(self.test_sql_single_result)
res_pure = self.cnx_pure_raw.get_rows()
self.cnx_cext_raw.cmd_query(self.test_sql_single_result)
res_cext = self.cnx_cext_raw.get_rows()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def _test_fetchone(self, cur_pure, cur_cext):
"""Test result from fetchone is the same in pure and using c-ext"""
cur_pure.execute(self.test_sql_single_result)
res_pure = cur_pure.fetchone()
_ = cur_pure.fetchall()
cur_cext.execute(self.test_sql_single_result)
res_cext = cur_cext.fetchone()
_ = cur_cext.fetchall()
self.cnx_cext.free_result()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def _test_fetchmany(self, cur_pure, cur_cext):
"""Test results from fetchmany are the same in pure and using c-ext"""
cur_pure.execute(self.test_sql_single_result)
res_pure = cur_pure.fetchmany()
cur_cext.execute(self.test_sql_single_result)
res_cext = cur_cext.fetchmany()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
res_pure = cur_pure.fetchmany(2)
res_cext = cur_cext.fetchmany(2)
_ = cur_pure.fetchall()
_ = cur_cext.fetchall()
self.cnx_cext.free_result()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def _test_fetch_fetchall(self, cur_pure, cur_cext):
"""Test results from fetchall are the same in pure and using c-ext"""
cur_pure.execute(self.test_sql_single_result)
res_pure = cur_pure.fetchall()
cur_cext.execute(self.test_sql_single_result)
res_cext = cur_cext.fetchall()
self.cnx_cext.free_result()
self.assertEqual(res_pure, res_cext, "Result using pure: {} differs"
"from c-ext result {}".format(res_pure, res_cext))
def test_cursor(self):
"""Test results from cursor are the same in pure and using c-ext"""
cur_pure = self.cnx_pure.cursor()
cur_cext = self.cnx_cext.cursor()
self._test_fetchone(cur_pure, cur_cext)
self._test_fetchmany(cur_pure, cur_cext)
self._test_fetch_fetchall(cur_pure, cur_cext)
cur_pure.close()
cur_cext.close()
def test_cursor_raw(self):
"""Test results from cursor raw are the same in pure and using c-ext"""
raw = True
cur_pure_raw = self.cnx_pure.cursor(raw=raw)
cur_cext_raw = self.cnx_cext.cursor(raw=raw)
self._test_fetchone(cur_pure_raw, cur_cext_raw)
self._test_fetchmany(cur_pure_raw, cur_cext_raw)
self._test_fetch_fetchall(cur_pure_raw, cur_cext_raw)
cur_pure_raw.close()
cur_cext_raw.close()
def test_cursor_buffered(self):
"""Test results from cursor buffered are the same in pure or c-ext"""
buffered = True
cur_pure_buffered = self.cnx_pure.cursor(buffered=buffered)
cur_cext_buffered = self.cnx_cext.cursor(buffered=buffered)
self._test_fetchone(cur_pure_buffered, cur_cext_buffered)
self._test_fetchmany(cur_pure_buffered, cur_cext_buffered)
self._test_fetch_fetchall(cur_pure_buffered, cur_cext_buffered)
cur_pure_buffered.close()
cur_cext_buffered.close()
def test_cursor_dictionary(self):
"""Test results from cursor buffered are the same in pure or c-ext"""
cur_pure_dictionary = self.cnx_pure.cursor(dictionary=True)
cur_cext_dictionary = self.cnx_cext.cursor(dictionary=True)
self._test_fetchone(cur_pure_dictionary, cur_cext_dictionary)
self._test_fetchmany(cur_pure_dictionary, cur_cext_dictionary)
self._test_fetch_fetchall(cur_pure_dictionary, cur_cext_dictionary)
cur_pure_dictionary.close()
cur_cext_dictionary.close()
def test_cursor_dictionary_buf(self):
"""Test results from cursor buffered are the same in pure or c-ext"""
cur_pure = self.cnx_pure.cursor(dictionary=True,
buffered=True)
cur_cext = self.cnx_cext.cursor(dictionary=True,
buffered=True)
self._test_fetchone(cur_pure, cur_cext)
self._test_fetchmany(cur_pure, cur_cext)
self._test_fetch_fetchall(cur_pure, cur_cext)
cur_pure.close()
cur_cext.close()
class BugOra28239074(tests.MySQLConnectorTests):
"""BUG#28239074: CURSOR DICTIONARY DOES NOT RETURN DICTIONARY TYPE RESULTS
"""
table = "bug28239074"
def setUp(self):
config_pure = tests.get_mysql_config()
config_pure["use_pure"] = True
self.cnx = mysql.connector.connect(**config_pure)
cur = self.cnx.cursor(dictionary=True)
cur.execute("DROP TABLE IF EXISTS {0}".format(self.table))
cur.execute("CREATE TABLE {0}(a char(50) ,b int) "
"DEFAULT CHARSET utf8".format(self.table))
data = [(chr(1), 1),('s', 2),(chr(120), 3),(chr(121), 4),(chr(127), 5)]
cur.executemany("INSERT INTO {0} (a, b) VALUES "
"(%s, %s)".format(self.table), data)
def tearDown(self):
self.cnx.cmd_query("DROP TABLE IF EXISTS {}".format(self.table))
self.cnx.close()
def test_cursor_dict(self):
exp = [
{u'a': u'\x01', u'b': 1},
{u'a': u's', u'b': 2},
{u'a': u'\x78', u'b': 3},
{u'a': u'\x79', u'b': 4},
{u'a': u'\x7f', u'b': 5}
]
cur = self.cnx.cursor(dictionary=True)
# Test fetchone
cur.execute("SELECT * FROM {}".format(self.table))
i = 0
row = cur.fetchone()
while row is not None:
self.assertTrue(isinstance(row, dict))
self.assertEqual(exp[i], row, "row {} is not equal to expected row"
" {}".format(row, exp[i]))
row = cur.fetchone()
i += 1
# Test fetchall
cur.execute("SELECT * FROM {}".format(self.table))
rows = cur.fetchall()
self.assertEqual(exp, rows, "rows {} is not equal to expected row")
# Test for each in cursor
cur.execute("SELECT * FROM {}".format(self.table))
i = 0
for row in cur:
self.assertTrue(isinstance(row, dict))
self.assertEqual(exp[i], row, "row {} is not equal to expected row"
" {}".format(row, exp[i]))
i += 1
class BugOra27364914(tests.MySQLConnectorTests):
"""BUG#27364914: CURSOR PREPARED STATEMENTS DO NOT CONVERT STRINGS
"""
charsets_list = ('gbk', 'sjis', 'big5', 'utf8', 'utf8mb4', 'latin1')
def setUp(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
for charset in self.charsets_list:
tablename = '{0}_ps_test'.format(charset)
cur.execute("DROP TABLE IF EXISTS {0}".format(tablename))
table = (
"CREATE TABLE {table} ("
" id INT AUTO_INCREMENT KEY,"
" c1 VARCHAR(40),"
" val2 datetime"
") CHARACTER SET '{charset}'"
).format(table=tablename, charset=charset)
cur.execute(table)
cnx.commit()
cur.close()
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**tests.get_mysql_config())
for charset in self.charsets_list:
tablename = '{0}_ps_test'.format(charset)
cnx.cmd_query("DROP TABLE IF EXISTS {0}".format(tablename))
cnx.close()
def _test_charset(self, charset, data):
config = tests.get_mysql_config()
config['charset'] = charset
config['use_unicode'] = True
self.cnx = connection.MySQLConnection(**tests.get_mysql_config())
cur = self.cnx.cursor(cursor_class=cursor.MySQLCursorPrepared)
tablename = '{0}_ps_test'.format(charset)
cur.execute("TRUNCATE {0}".format(tablename))
self.cnx.commit()
insert = "INSERT INTO {0} (c1) VALUES (%s)".format(tablename)
for value in data:
cur.execute(insert, (value,))
self.cnx.commit()
cur.execute("SELECT id, c1 FROM {0} ORDER BY id".format(tablename))
for row in cur:
self.assertTrue(isinstance(row[1], STRING_TYPES),
"The value is expected to be a string")
self.assertEqual(data[row[0] - 1], row[1])
cur.close()
self.cnx.close()
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_gbk(self):
self._test_charset('gbk', [u'赵孟頫', u'赵\孟\頫\\', u'遜'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_sjis(self):
self._test_charset('sjis', ['\u005c'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_big5(self):
self._test_charset('big5', ['\u5C62'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_utf8mb4(self):
self._test_charset('utf8mb4', ['\u5C62'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_utf8(self):
self._test_charset('utf8', [u'データベース', u'데이터베이스'])
@foreach_cnx()
def test_cursor_prepared_statement_with_charset_latin1(self):
self._test_charset('latin1', [u'ñ', u'Ñ'])
class BugOra27802700(tests.MySQLConnectorTests):
"""BUG#27802700: A BYTEARRAY IS RETURNED FROM USING get_rows METHOD
"""
table_name = "BugOra27802700"
insert_stmt = u"INSERT INTO {} ({}) values ({{value}})"
def setUp(self):
config = tests.get_mysql_config()
config['charset'] = "utf8"
config['use_unicode'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {}".format(self.table_name))
cur.execute("CREATE TABLE IF NOT EXISTS {} ("
" id INT(11) UNSIGNED AUTO_INCREMENT UNIQUE KEY,"
" int_long INT,"
" time TIME,"
" date DATE,"
" datetime DATETIME,"
" var_char VARCHAR(50),"
" long_blob LONGBLOB,"
" str TEXT) CHARACTER SET utf8"
" COLLATE utf8_general_ci".format(self.table_name))
def tearDown(self):
config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
try:
cur.execute("DROP TABLE IF EXISTS {}".format(self.table_name))
except:
pass
def run_test_retrieve_stored_type(self, stm, test_values, expected_values,
column, expected_type):
config = tests.get_mysql_config()
config['charset'] = "utf8"
config['use_unicode'] = True
config['autocommit'] = True
cnx = connection.MySQLConnection(**config)
cur = cnx.cursor()
for test_value in test_values:
cnx.cmd_query(stm.format(value=test_value))
qry = "SELECT {column} FROM {table} ORDER BY id"
cur.execute(qry.format(column=column, table=self.table_name))
rows = cnx.get_rows()[0][len(test_values) * (-1):]
for returned_val, expected_value in zip(rows, expected_values):
self.assertEqual(returned_val[0], expected_value)
self.assertTrue(isinstance(returned_val[0], expected_type))
cur.close()
cnx.close()
@foreach_cnx()
def test_retrieve_stored_int_long(self):
column = "int_long"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ["-12345", "0", "12345"]
expected_values = [-12345, 0, 12345]
if PY2:
expected_type = (int, long)
else:
expected_type = (int)
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_str(self):
column = "str"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ['\' \'', '\'some text\'', u'\'データベース\'',
'\'"12345"\'']
expected_values = [' ', 'some text', u'データベース', '"12345"']
expected_type = STRING_TYPES
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_blob(self):
column = "long_blob"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ['\' \'', '\'some text\'', u'\'データベース\'',
"\"'12345'\""]
expected_values = [b' ', b'some text', b'\xe3\x83\x87\xe3\x83\xbc\xe3'
b'\x82\xbf\xe3\x83\x99\xe3\x83\xbc\xe3\x82\xb9'
if PY2 else u'データベース'.encode("utf-8"),
b"'12345'"]
expected_type = bytes
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_varchar(self):
column = "var_char"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ['\' \'', '\'some text\'', u'\'データベース\'',
"'12345'"]
expected_values = [' ', 'some text', u'データベース', "12345"]
expected_type = STRING_TYPES
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_datetime_types(self):
column = "datetime"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ["cast('1972-01-01 00:42:49.000000' as DATETIME)",
"cast('2018-01-01 23:59:59.000000' as DATETIME)"]
expected_values = [datetime(1972, 1, 1, 0, 42, 49),
datetime(2018, 1, 1, 23, 59, 59)]
expected_type = datetime
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_date_types(self):
column = "date"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ["DATE('1972-01-01')",
"DATE('2018-12-31')"]
expected_values = [date(1972, 1, 1),
date(2018, 12, 31)]
expected_type = date
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
@foreach_cnx()
def test_retrieve_stored_time_types(self):
column = "time"
stm = self.insert_stmt.format(self.table_name, column)
test_values = ["TIME('00:42:49.00000')",
"TIME('23:59:59.00000')"]
expected_values = [timedelta(hours=0, minutes=42, seconds=49),
timedelta(hours=23, minutes=59, seconds=59)]
expected_type = timedelta
self.run_test_retrieve_stored_type(stm, test_values, expected_values,
column, expected_type)
class BugOra27277937(tests.MySQLConnectorTests):
"""BUG#27277937: CONFUSING ERROR MESSAGE WHEN SPECIFYING UNSUPPORTED
COLLATION
"""
def setUp(self):
pass
def test_invalid_collation(self):
config = tests.get_mysql_config()
config["charset"] = "utf8"
config["collation"] = "foobar"
self.cnx = connection.MySQLConnection()
try:
self.cnx.connect(**config)
except errors.ProgrammingError as err:
self.assertEqual(err.msg, "Collation 'foobar' unknown.")
else:
self.fail("A ProgrammingError was expected")
def tearDown(self):
pass
class BugOra28188883(tests.MySQLConnectorTests):
"""BUG#27277937: DEPRECATED UTF8 IS THE DEFAULT CHARACTER SET IN 8.0
"""
def setUp(self):
# Remove charset from the connection configuration if is set, so the
# default charset 'utf8mb4' is used for each connection
self.config = tests.get_mysql_config().copy()
if "charset" in self.config:
del self.config
@foreach_cnx()
def test_utf8mb4_default_charset(self):
self.assertEqual(self.cnx.charset, "utf8mb4")
data = [(1, u'🐬'), (2, u'🐍'), (3, u'🐶')]
tbl = "BugOra28188883"
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {0}".format(tbl))
cur.execute("CREATE TABLE {0} (id INT, name VARCHAR(100)) "
"DEFAULT CHARSET utf8mb4".format(tbl))
stmt = "INSERT INTO {0} (id, name) VALUES (%s, %s)".format(tbl)
cur.executemany(stmt, data)
cur.execute("SELECT id, name FROM {0}".format(tbl))
self.assertEqual(data, cur.fetchall())
cur.execute("DROP TABLE IF EXISTS {0}".format(tbl))
cur.close()
self.cnx.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 23),
"MySQL 5.7.23+ is required for VERIFY_IDENTITY")
@unittest.skipIf(sys.version_info < (2, 7, 9),
"Python 2.7.9+ is required for SSL")
class BugOra27434751(tests.MySQLConnectorTests):
"""BUG#27434751: MYSQL.CONNECTOR HAS NO TLS/SSL OPTION TO VERIFY SERVER NAME
"""
def setUp(self):
ssl_ca = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_CA_cert.pem'))
ssl_cert = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_cert.pem'))
ssl_key = os.path.abspath(
os.path.join(tests.SSL_DIR, 'tests_client_key.pem'))
self.config = tests.get_mysql_config()
self.config.pop("unix_socket")
self.config["ssl_ca"] = ssl_ca
self.config["ssl_cert"] = ssl_cert
self.config["ssl_key"] = ssl_key
self.config["ssl_verify_cert"] = True
def _verify_server_name_cnx(self, use_pure=True):
config = self.config.copy()
config["use_pure"] = use_pure
# Setting an invalid host name against a server certificate
config["host"] = "127.0.0.1"
# Should connect with ssl_verify_identity=False
config["ssl_verify_identity"] = False
cnx = mysql.connector.connect(**config)
cnx.close()
# Should fail to connect with ssl_verify_identity=True
config["ssl_verify_identity"] = True
self.assertRaises(errors.InterfaceError, mysql.connector.connect,
**config)
# Should connect with the correct host name and ssl_verify_identity=True
config["host"] = "localhost"
cnx = mysql.connector.connect(**config)
cnx.close()
@unittest.skipIf(not CMySQLConnection, ERR_NO_CEXT)
def test_verify_server_name_cext_cnx(self):
self._verify_server_name_cnx(use_pure=False)
def test_verify_server_name_pure_cnx(self):
self._verify_server_name_cnx(use_pure=True)
@unittest.skipIf(CMySQLConnection, "Test only available without C Extension")
class BugOra27794178(tests.MySQLConnectorTests):
"""BUG#27794178: USING USE_PURE=FALSE SHOULD RAISE AN ERROR WHEN CEXT IS NOT
AVAILABLE
"""
def test_connection_use_pure(self):
config = tests.get_mysql_config().copy()
if "use_pure" in config:
del config["use_pure"]
cnx = mysql.connector.connect(**config)
cnx.close()
# Force using C Extension should fail if not available
config["use_pure"] = False
self.assertRaises(ImportError, mysql.connector.connect, **config)
class Bug27897881(tests.MySQLConnectorTests):
"""BUG#27897881: Fix typo in BLOB data conversion
"""
def setUp(self):
self.config = tests.get_mysql_config()
cnx = connection.MySQLConnection(**self.config)
cursor = cnx.cursor()
self.tbl = 'Bug27897881'
cursor.execute("DROP TABLE IF EXISTS %s" % self.tbl)
create = ('CREATE TABLE {0}(col1 INT NOT NULL, col2 LONGBLOB, '
'PRIMARY KEY(col1))'.format(self.tbl))
cursor.execute(create)
cursor.close()
cnx.close()
def tearDown(self):
cnx = connection.MySQLConnection(**self.config)
cursor = cnx.cursor()
cursor.execute("DROP TABLE IF EXISTS {}".format(self.tbl))
cursor.close()
cnx.close()
@foreach_cnx()
def test_retrieve_from_LONGBLOB(self):
cnx_config = self.config.copy()
cnx_config['charset'] = "utf8"
cnx_config['use_unicode'] = True
cnx = connection.MySQLConnection(**cnx_config)
cur = cnx.cursor()
# Empty blob produces index error.
# "12345" handle as datetime in JSON produced index error.
# LONGBLOB can store big data
test_values = ["", "12345", '"54321"', "A"*(2**20)]
expected_values = [b"", b"12345", b'"54321"', b"A"*(2**20)]
stm = "INSERT INTO {} (col1, col2) VALUES ('{}', '{}')"
for num, test_value in zip(range(len(test_values)), test_values):
cur.execute(stm.format(self.tbl, num, test_value))
stm = "SELECT * FROM {} WHERE col1 like '{}'"
for num, expected_value in zip(range(len(test_values)), expected_values):
cur.execute(stm.format(self.tbl, num))
row = cur.fetchall()[0]
self.assertEqual(row[1], expected_value, "value {} is not "
"the expected {}".format(row[1], expected_value))
cur.close()
cnx.close()
class BugOra29324966(tests.MySQLConnectorTests):
"""BUG#29324966: ADD MISSING USERNAME CONNECTION ARGUMENT FOR DRIVER
COMPATIBILITY.
"""
def setUp(self):
pass
def tearDown(self):
pass
@foreach_cnx()
def test_connection_args_compatibility(self):
config = self.config.copy()
config["username"] = config["user"]
config["passwd"] = config["password"]
config["db"] = config["database"]
config["connect_timeout"] = config["connection_timeout"]
config.pop("user")
config.pop("password")
config.pop("database")
config.pop("connection_timeout")
cnx = self.cnx.__class__(**config)
cnx.close()
@unittest.skipIf(tests.MYSQL_VERSION < (8, 0, 17),
"MySQL 8.0.17+ is required for utf8mb4_0900_bin collation")
class BugOra29855733(tests.MySQLConnectorTests):
"""BUG#29855733: ERROR DURING THE CLASSIC CONNECTION WITH CHARSET AND
COLLATION SPECIFIED.
"""
def setUp(self):
pass
def tearDown(self):
pass
@foreach_cnx()
def test_connection_collation_utf8mb4_0900_bin(self):
config = self.config.copy()
config["username"] = config["user"]
config["passwd"] = config["password"]
config["charset"] = "utf8mb4"
config["collation"] = "utf8mb4_0900_bin"
cnx = self.cnx.__class__(**config)
cnx.close()
class BugOra25349794(tests.MySQLConnectorTests):
"""BUG#25349794: ADD READ_DEFAULT_FILE ARGUMENT FOR CONNECT().
"""
def setUp(self):
pass
def tearDown(self):
pass
@foreach_cnx()
def test_read_default_file_alias(self):
opt_file = os.path.join("tests", "data", "option_files", "pool.cnf")
config = tests.get_mysql_config()
if tests.MYSQL_VERSION < (5, 7):
config["client_flags"] = [-constants.ClientFlag.CONNECT_ARGS]
conn = mysql.connector.connect(read_default_file=opt_file,
option_groups=["pooling"], **config)
self.assertEqual("my_pool", conn.pool_name)
mysql.connector._CONNECTION_POOLS = {}
conn.close()
@unittest.skipIf(tests.MYSQL_VERSION < (5, 7, 8), "No JSON support")
class BugOra29808262(tests.MySQLConnectorTests):
"""BUG#229808262: TEXT COLUMN WITH ONLY DIGITS READS IN AS INT.
"""
table_name = "BugOra29808262"
def setUp(self):
pass
def tearDown(self):
pass
@foreach_cnx()
def test_blob_fields(self):
cur = self.cnx.cursor()
cur.execute("DROP TABLE IF EXISTS {}".format(self.table_name))
cur.execute("CREATE TABLE {} ("
" my_blob BLOB,"
" my_longblob LONGBLOB,"
" my_json JSON,"
" my_text TEXT) CHARACTER SET utf8"
" COLLATE utf8_general_ci".format(self.table_name))
test_values = (
"BLOB" * (2**10),
"LONG_BLOB" * (2**20),
'{"lat": "41.14961", "lon": "-8.61099", "name": "Porto"}',
"My TEXT",
)
expected_values = (
b"BLOB" * (2**10),
b"LONG_BLOB" * (2**20),
'{"lat": "41.14961", "lon": "-8.61099", "name": "Porto"}',
"My TEXT",
)
cur = self.cnx.cursor()
cur.execute("INSERT INTO {} VALUES ('{}')"
"".format(self.table_name, "', '".join(test_values)))
cur.execute("SELECT my_blob, my_longblob, my_json, my_text FROM {}"
"".format(self.table_name))
res = cur.fetchall()
self.assertEqual(res[0], expected_values)
cur.execute("DROP TABLE IF EXISTS {}".format(self.table_name))
cur.close()
|
pruebas.py
|
import threading
# global variable x
x = 0
def increment():
"""
function to increment global variable x
"""
global x
x += 1
def thread_task(lock: threading.Lock):
"""
task for thread
calls increment function 100000 times.
"""
for _ in range(100000):
# lock.acquire()
increment()
# lock.release()
def main_task():
global x
# setting global variable x as 0
x = 0
# creating a lock
# lock = threading.Lock()
# creating threads
t1 = threading.Thread(target=thread_task)
t2 = threading.Thread(target=thread_task)
# start threads
t1.start()
t2.start()
# wait until threads finish their job
t1.join()
t2.join()
if __name__ == "__main__":
main_task()
|
PPO - Copy.py
|
import multiprocessing
import multiprocessing.connection
import time
from collections import deque
from typing import Dict, List
import cv2
import gym
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.distributions import Categorical
from torch.nn import functional as F
if torch.cuda.is_available():
device = torch.device("cuda:0")
else:
device = torch.device("cpu")
class Game:
def __init__(self, seed: int):
self.env = gym.make('BreakoutNoFrameskip-v4')
self.env.seed(seed)
# buffer to take the max of last 2 frames for each action
self.obs_2_max = np.zeros((2, 84, 84, 1), np.uint8)
# tensor for a stack of 4 frames
self.obs_4 = np.zeros((84, 84, 4))
# episode rewards
self.rewards = []
# Number of lives left
self.lives = 0
def step(self, action):
reward = 0
done = None
for i in range(4):
obs, r, done, info = self.env.step(action)
if i >= 2:
self.obs_2_max[i % 2] = self._process_obs(obs)
reward += r
lives = self.env.unwrapped.ale.lives()
if lives < self.lives:
done = True
self.lives = lives
if done:
break
self.rewards.append(reward)
if done:
episode_info = {"reward": sum(self.rewards),
"length": len(self.rewards)}
self.reset()
else:
episode_info = None
obs = self.obs_2_max.max(axis=0)
self.obs_4 = np.roll(self.obs_4, shift=-1, axis=-1)
self.obs_4[..., -1:] = obs
return self.obs_4, reward, done, episode_info
def reset(self):
obs = self.env.reset()
obs = self._process_obs(obs)
self.obs_4[..., 0:] = obs
self.obs_4[..., 1:] = obs
self.obs_4[..., 2:] = obs
self.obs_4[..., 3:] = obs
self.rewards = []
self.lives = self.env.unwrapped.ale.lives()
return self.obs_4
@staticmethod
def _process_obs(obs):
obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
obs = cv2.resize(obs, (84, 84), interpolation=cv2.INTER_AREA)
return obs[:, :, None] # Shape (84, 84, 1)
def worker_process(remote: multiprocessing.connection.Connection, seed: int):
game = Game(seed)
while True:
cmd, data = remote.recv()
if cmd == "step":
remote.send(game.step(data)) # send back results from a step
elif cmd == "reset":
remote.send(game.reset())
elif cmd == "close":
remote.close()
break
else:
raise NotImplementedError
class Worker:
child: multiprocessing.connection.Connection
process: multiprocessing.Process
def __init__(self, seed):
self.child, parent = multiprocessing.Pipe()
self.process = multiprocessing.Process(target=worker_process, args=(parent, seed))
self.process.start()
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(in_channels=4,
out_channels=32,
kernel_size=8,
stride=4,
padding=0)
nn.init.orthogonal_(self.conv1.weight, np.sqrt(2))
self.conv2 = nn.Conv2d(in_channels=32,
out_channels=64,
kernel_size=4,
stride=2,
padding=0)
nn.init.orthogonal_(self.conv2.weight, np.sqrt(2))
self.conv3 = nn.Conv2d(in_channels=64,
out_channels=64,
kernel_size=3,
stride=1,
padding=0)
nn.init.orthogonal_(self.conv3.weight, np.sqrt(2))
self.lin = nn.Linear(in_features=7 * 7 * 64,
out_features=512)
nn.init.orthogonal_(self.lin.weight, np.sqrt(2))
self.pi_logits = nn.Linear(in_features=512,
out_features=4)
nn.init.orthogonal_(self.pi_logits.weight, np.sqrt(0.01))
# Critic
self.value = nn.Linear(in_features=512,
out_features=1)
nn.init.orthogonal_(self.value.weight, 1)
def forward(self, obs: np.ndarray):
h: torch.Tensor
h = F.relu(self.conv1(obs))
h = F.relu(self.conv2(h))
h = F.relu(self.conv3(h))
h = h.reshape((-1, 7 * 7 * 64))
h = F.relu(self.lin(h))
pi = Categorical(logits=self.pi_logits(h))
value = self.value(h).reshape(-1)
return pi, value
def obs_to_torch(obs: np.ndarray) -> torch.Tensor:
# [N, H, W, C] to [N, C, H, W]
obs = np.swapaxes(obs, 1, 3)
obs = np.swapaxes(obs, 3, 2)
return torch.tensor(obs, dtype=torch.float32, device=device) / 255.
class Trainer:
def __init__(self, model: Model):
self.model = model
self.optimizer = optim.Adam(self.model.parameters(), lr=2.5e-4)
def train(self,
samples: Dict[str, np.ndarray],
learning_rate: float,
clip_range: float):
sampled_obs = samples['obs']
sampled_action = samples['actions']
sampled_return = samples['values'] + samples['advantages']
sampled_normalized_advantage = Trainer._normalize(samples['advantages'])
sampled_neg_log_pi = samples['neg_log_pis']
sampled_value = samples['values']
pi, value = self.model(sampled_obs)
neg_log_pi = -pi.log_prob(sampled_action)
ratio: torch.Tensor = torch.exp(sampled_neg_log_pi - neg_log_pi)
clipped_ratio = ratio.clamp(min=1.0 - clip_range,
max=1.0 + clip_range)
policy_reward = torch.min(ratio * sampled_normalized_advantage,
clipped_ratio * sampled_normalized_advantage)
policy_reward = policy_reward.mean()
entropy_bonus = pi.entropy()
entropy_bonus = entropy_bonus.mean()
clipped_value = sampled_value + (value - sampled_value).clamp(min=-clip_range,
max=clip_range)
vf_loss = torch.max((value - sampled_return) ** 2, (clipped_value - sampled_return) ** 2)
vf_loss = 0.5 * vf_loss.mean()
loss: torch.Tensor = -(policy_reward - 0.5 * vf_loss + 0.01 * entropy_bonus)
for pg in self.optimizer.param_groups:
pg['lr'] = learning_rate
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=0.5)
self.optimizer.step()
approx_kl_divergence = .5 * ((neg_log_pi - sampled_neg_log_pi) ** 2).mean()
clip_fraction = (abs((ratio - 1.0)) > clip_range).type(torch.FloatTensor).mean()
return [policy_reward,
vf_loss,
entropy_bonus,
approx_kl_divergence,
clip_fraction]
@staticmethod
def _normalize(adv: np.ndarray):
return (adv - adv.mean()) / (adv.std() + 1e-8)
class Main():
def __init__(self):
# for advantage calculation
self.gamma = 0.99
self.lamda = 0.95
# Number of updates
self.updates = 10000
# Number of epochs to train the model with sampled data
self.epochs = 4
# Number of worker processes
self.n_workers = 2
# Number of steps to run on each process for a single update
self.worker_steps = 128
# number of mini batches
self.n_mini_batch = 4
# total number of samples for a single update
self.batch_size = self.n_workers * self.worker_steps
# size of mini batch
self.mini_batch_size = self.batch_size // self.n_mini_batch
assert (self.batch_size % self.n_mini_batch == 0)
# Workers
self.workers = [Worker(47) for i in range(self.n_workers)]
# initialize tensors for observations
self.obs = np.zeros((self.n_workers, 84, 84, 4), dtype=np.uint8)
# Reset game states
for worker in self.workers:
worker.child.send(("reset", None))
for i, worker in enumerate(self.workers):
self.obs[i] = worker.child.recv()
self.model = Model() # model for sampling
self.model.to(device)
self.trainer = Trainer(self.model)
def sample(self) -> (Dict[str, np.ndarray], List):
rewards = np.zeros((self.n_workers, self.worker_steps), dtype=np.float32)
actions = np.zeros((self.n_workers, self.worker_steps), dtype=np.int32)
dones = np.zeros((self.n_workers, self.worker_steps), dtype=np.bool)
obs = np.zeros((self.n_workers, self.worker_steps, 84, 84, 4), dtype=np.uint8)
neg_log_pis = np.zeros((self.n_workers, self.worker_steps), dtype=np.float32)
values = np.zeros((self.n_workers, self.worker_steps), dtype=np.float32)
episode_infos = []
for t in range(self.worker_steps):
obs[:, t] = self.obs
# get actions from old policy for each worker
pi, v = self.model(obs_to_torch(self.obs))
values[:, t] = v.cpu().data.numpy()
a = pi.sample()
actions[:, t] = a.cpu().data.numpy()
neg_log_pis[:, t] = -pi.log_prob(a).cpu().data.numpy()
# run sampled actions on workers
for w, worker in enumerate(self.workers):
worker.child.send(("step", actions[w, t]))
for w, worker in enumerate(self.workers):
self.obs[w], rewards[w, t], dones[w, t], info = worker.child.recv()
if info:
info['obs'] = obs[w, t, :, :, 3]
episode_infos.append(info)
advantages = self._calc_advantages(dones, rewards, values)
samples = {
'obs': obs,
'actions': actions,
'values': values,
'neg_log_pis': neg_log_pis,
'advantages': advantages
}
samples_flat = {}
for k, v in samples.items():
v = v.reshape(v.shape[0] * v.shape[1], *v.shape[2:])
if k == 'obs':
samples_flat[k] = obs_to_torch(v)
else:
samples_flat[k] = torch.tensor(v, device=device)
return samples_flat, episode_infos
def _calc_advantages(self, dones: np.ndarray, rewards: np.ndarray,
values: np.ndarray) -> np.ndarray:
advantages = np.zeros((self.n_workers, self.worker_steps), dtype=np.float32)
last_advantage = 0
_, last_value = self.model(obs_to_torch(self.obs))
last_value = last_value.cpu().data.numpy()
for t in reversed(range(self.worker_steps)):
mask = 1.0 - dones[:, t]
last_value = last_value * mask
last_advantage = last_advantage * mask
delta = rewards[:, t] + self.gamma * last_value - values[:, t]
last_advantage = delta + self.gamma * self.lamda * last_advantage
advantages[:, t] = last_advantage
last_value = values[:, t]
return advantages
def train(self, samples: Dict[str, np.ndarray], learning_rate: float, clip_range: float):
train_info = []
for _ in range(self.epochs):
indexes = torch.randperm(self.batch_size)
for start in range(0, self.batch_size, self.mini_batch_size):
end = start + self.mini_batch_size
mini_batch_indexes = indexes[start: end]
mini_batch = {}
for k, v in samples.items():
mini_batch[k] = v[mini_batch_indexes]
res = self.trainer.train(learning_rate=learning_rate,
clip_range=clip_range,
samples=mini_batch)
train_info.append(res)
return np.mean(train_info, axis=0)
def run_training_loop(self):
# summary writer for TensorBoard writer = self._create_summary_writer()
# last 100 episode information
episode_info = deque(maxlen=100)
for update in range(self.updates):
time_start = time.time()
progress = update / self.updates
# learning rate and clip_range (ε) annealing. Can improve on this
learning_rate = 2.5e-4 * (1 - progress)
clip_range = 0.1 * (1 - progress)
samples, sample_episode_info = self.sample()
self.train(samples, learning_rate, clip_range)
time_end = time.time()
fps = int(self.batch_size / (time_end - time_start))
episode_info.extend(sample_episode_info)
reward_mean, length_mean = Main._get_mean_episode_info(episode_info)
print(f"{update:4}: fps={fps:3} reward={reward_mean:.2f} length={length_mean:.3f}")
@staticmethod
def _get_mean_episode_info(episode_info):
if len(episode_info) > 0:
return (np.mean([info["reward"] for info in episode_info]),
np.mean([info["length"] for info in episode_info]))
else:
return np.nan, np.nan
def destroy(self):
for worker in self.workers:
worker.child.send(("close", None))
if __name__ == "__main__":
start_time = time.time()
m = Main()
m.run_training_loop()
m.destroy()
print(f'500 pipes reached consistently time: {time.time() - start_time}')
|
http_service.py
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Core\sims4\gsi\http_service.py
# Compiled at: 2018-07-31 23:22:52
# Size of source mod 2**32: 9525 bytes
import socket
try:
import threading
_threading_enabled = True
except ImportError:
_threading_enabled = False
import dummy_threading as threading
import time, sims4.gsi.dispatcher, sims4.log, sims4.core_services
try:
import urllib.parse
except:
pass
logger = sims4.log.Logger('GSI')
try:
import http.server
except ImportError:
class http:
class server:
class BaseHTTPRequestHandler:
def __init__(self):
pass
class HTTPServer:
def __init__(self):
pass
JSONP_CALLBACK = 'callback'
LOCAL_HOST = 'localhost'
HTTP_SERVER_TIMEOUT = 0.001
class GameHttpHandler(http.server.BaseHTTPRequestHandler):
def log_message(self, log_format, *args):
pass
def do_HEAD(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_OPTIONS(self):
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Access-Control-Allow-Methods', 'GET, POST, OPTIONS')
self.send_header('Access-Control-Allow-Headers', 'content-type')
self.send_header('Content-Length', '0')
self.send_header('Content-Type', 'text/html')
self.end_headers()
def do_GET(self):
try:
parsed_url = urllib.parse.urlparse(self.path)
clean_path = parsed_url.path.strip('/')
try:
if parsed_url.query:
params = urllib.parse.parse_qs(parsed_url.query)
for key, value in params.items():
if value[0] == 'true':
params[key] = True
elif value[0] == 'false':
params[key] = False
else:
params[key] = value[0]
else:
params = None
except Exception:
logger.exception('Unable to parse kwargs from query string:\n{}', parsed_url.query)
params = None
if params is None:
callback_string = None
response = sims4.gsi.dispatcher.handle_request(clean_path, params)
else:
callback_string = params.pop(JSONP_CALLBACK, None)
response = sims4.gsi.dispatcher.handle_request(clean_path, params)
if response is None:
self.send_response(404)
return
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-type', 'application/json')
self.end_headers()
if callback_string:
response = callback_string + '(' + response + ')'
self.write_string(response)
return
except ConnectionAbortedError:
pass
def write_string(self, string):
self.wfile.write(bytes(string, 'UTF-8'))
def _get_host_address():
host_name = socket.gethostname()
try:
return socket.gethostbyname(host_name)
except socket.gaierror:
return socket.gethostbyname(LOCAL_HOST)
def _try_create_http_server(host_address, port, http_handler):
try:
return http.server.HTTPServer((host_address, port), http_handler)
except OSError:
host_address = socket.gethostbyname(LOCAL_HOST)
return http.server.HTTPServer((host_address, port), http_handler)
if _threading_enabled:
class HttpService(sims4.service_manager.Service):
def __init__(self):
self._server_thread = None
self._server_lock = threading.Lock()
self._http_server = None
def on_tick(self):
pass
def stop(self):
self.stop_server()
def start_server(self, callback):
if self._server_thread is None:
self._server_thread = threading.Thread(target=(self._http_server_loop), args=(callback,), name='HTTP Server')
self._server_thread.start()
else:
callback(self._http_server)
def stop_server(self):
if self._server_thread is not None:
with self._server_lock:
if self._http_server is not None:
self._http_server.socket.close()
self._http_server = None
self._server_thread = None
def _http_server_loop(self, callback=None):
host_address = _get_host_address()
port = 0
if self._http_server is None:
with self._server_lock:
self._http_server = _try_create_http_server(host_address, port, GameHttpHandler)
self._http_server.timeout = HTTP_SERVER_TIMEOUT
if callback is not None:
callback(self._http_server)
while self._http_server is not None:
with self._server_lock:
self._http_server.handle_request()
time.sleep(0.1)
else:
class HttpService(sims4.service_manager.Service):
def __init__(self):
self._http_server = None
def on_tick(self):
if self._http_server is None:
return
self._http_server.handle_request()
def stop(self):
self.stop_server()
def start_server(self, callback):
if self._http_server is None:
host_address = _get_host_address()
port = 0
self._http_server = _try_create_http_server(host_address, port, GameHttpHandler)
self._http_server.timeout = HTTP_SERVER_TIMEOUT
if callback is not None:
callback(self._http_server)
def stop_server(self):
if self._http_server is not None:
self._http_server.socket.close()
self._http_server = None
def start_http_server(callback):
service = sims4.core_services.http_service()
if service is not None:
service.start_server(callback)
def stop_http_server():
service = sims4.core_services.http_service()
if service is not None:
service.stop_server()
|
build_localization_tfrecords.py
|
# coding: utf-8
# Copyright 2017 challenger.ai
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Build tfrecord data."""
# python2.7
# __author__ = 'WANG, Heda'
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import Counter
from collections import namedtuple
from datetime import datetime
import json
import os.path
import random
import sys
import base64
reload(sys)
sys.setdefaultencoding('utf8')
import threading
import jieba
import numpy as np
import tensorflow as tf
# input data
tf.flags.DEFINE_string("train_image_dir", "data/ai_challenger_caption_train_20170902/caption_train_images_20170902",
"Training image directory.")
tf.flags.DEFINE_string("train_captions_file", "data/ai_challenger_caption_train_20170902/caption_train_annotations_20170902.json",
"Training captions JSON file.")
tf.flags.DEFINE_string("train_localizations_file", "data/bottom_up_attention/aichallenger_train.tsv.small",
"Training captions TSV file.")
tf.flags.DEFINE_string("validate_image_dir", "data/ai_challenger_caption_validation_20170910/caption_validation_images_20170910",
"Validation image directory.")
tf.flags.DEFINE_string("validate_localizations_file", "data/bottom_up_attention/aichallenger_validate.tsv.small",
"Validating captions TSV file.")
tf.flags.DEFINE_string("test1_image_dir", "data/ai_challenger_caption_test1_20170923/caption_test1_images_20170923",
"Test image directory.")
tf.flags.DEFINE_string("test1_localizations_file", "data/bottom_up_attention/aichallenger_test1.tsv.small",
"Test captions TSV file.")
tf.flags.DEFINE_string("test2_image_dir", "data/ai_challenger_caption_test_b_20171120/caption_test_b_images_20171120",
"Test image directory.")
tf.flags.DEFINE_string("test2_localizations_file", "data/bottom_up_attention/aichallenger_test2.tsv.small",
"Test captions TSV file.")
# use existing word counts file
tf.flags.DEFINE_string("word_counts_input_file",
"",
"If defined, use existing word_counts_file.")
# output files
tf.flags.DEFINE_string("output_dir", "data/Loc_TFRecord_data", "Output directory for tfrecords.")
tf.flags.DEFINE_string("word_counts_output_file",
"data/word_counts.txt",
"Output vocabulary file of word counts.")
# words parameters
tf.flags.DEFINE_string("start_word", "<S>",
"Special word added to the beginning of each sentence.")
tf.flags.DEFINE_string("end_word", "</S>",
"Special word added to the end of each sentence.")
tf.flags.DEFINE_string("unknown_word", "<UNK>",
"Special word meaning 'unknown'.")
# the minimum word count
tf.flags.DEFINE_integer("min_word_count", 4,
"The minimum number of occurrences of each word in the "
"training set for inclusion in the vocabulary.")
# threads
tf.flags.DEFINE_integer("num_threads", 8,
"Number of threads to preprocess the images.")
# sharding parameters
tf.flags.DEFINE_integer("train_shards", 280,
"Number of shards in training TFRecord files.")
tf.flags.DEFINE_integer("validate_shards", 8,
"Number of shards in validation TFRecord files.")
tf.flags.DEFINE_integer("test1_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_integer("test2_shards", 8,
"Number of shards in testing TFRecord files.")
tf.flags.DEFINE_boolean("build_flip_caption", False,
"Whether to generate flip caption. If True, only build train set,"
"If set False, build train and dev set")
tf.flags.DEFINE_integer("max_ref_length", 30,
"Maximum caption length.")
tf.flags.DEFINE_integer("num_refs", 5,
"Number of references per image.")
tf.flags.DEFINE_string("task", "train",
"Options are train/validate/test1/test2.")
FLAGS = tf.flags.FLAGS
ImageMetadata = namedtuple("ImageMetadata",
["id", "filename", "base_filename", "localization", "captions", "flip_captions"])
# functions to flip caption
def find_all(string, query):
# return all positions
query_len = len(query)
positions = []
beg = 0
pos = string.find(query, beg)
while pos != -1:
positions.append(pos)
beg = pos + query_len
pos = string.find(query, beg)
return positions
def func_flip_caption(caption):
lr_pos = find_all(caption, u"左右")
noflip_pos = []
for pos in lr_pos:
noflip_pos.append(pos)
noflip_pos.append(pos + 1)
l_pos = find_all(caption, u"左")
l_pos = [pos for pos in l_pos if pos not in noflip_pos]
r_pos = find_all(caption, u"右")
r_pos = [pos for pos in r_pos if pos not in noflip_pos]
if not l_pos and not r_pos:
return caption
new_caption = ""
for i,c in enumerate(caption):
if i in l_pos:
new_caption += u"右"
elif i in r_pos:
new_caption += u"左"
else:
new_caption += c
return new_caption
class Vocabulary(object):
"""Simple vocabulary wrapper."""
def __init__(self, vocab, unk_id):
"""Initializes the vocabulary.
Args:
vocab: A dictionary of word to word_id.
unk_id: Id of the special 'unknown' word.
"""
self._vocab = vocab
self._unk_id = unk_id
def word_to_id(self, word):
"""Returns the integer id of a word string."""
if word in self._vocab:
return self._vocab[word]
else:
return self._unk_id
def load_vocab(vocab_file):
if not tf.gfile.Exists(vocab_file):
print("Vocab file %s not found.", vocab_file)
exit()
print("Initializing vocabulary from file: %s", vocab_file)
with tf.gfile.GFile(vocab_file, mode="r") as f:
reverse_vocab = list(f.readlines())
reverse_vocab = [line.split()[0].decode('utf-8') for line in reverse_vocab]
assert FLAGS.start_word in reverse_vocab
assert FLAGS.end_word in reverse_vocab
assert FLAGS.unknown_word not in reverse_vocab
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
class ImageDecoder(object):
"""Helper class for decoding images in TensorFlow."""
def __init__(self):
# Create a single TensorFlow Session for all image decoding calls.
self._sess = tf.Session()
# TensorFlow ops for JPEG decoding.
self._encoded_jpeg = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._encoded_jpeg, channels=3)
def decode_jpeg(self, encoded_jpeg):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._encoded_jpeg: encoded_jpeg})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _int64_feature(value):
"""Wrapper for inserting an int64 Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _bytes_feature(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[str(value)]))
def _int64_list(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _float_list(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def _bytes_list(value):
"""Wrapper for inserting a bytes Feature into a SequenceExample proto."""
return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))
def _int64_feature_list(values):
"""Wrapper for inserting an int64 FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_int64_feature(v) for v in values])
def _bytes_feature_list(values):
"""Wrapper for inserting a bytes FeatureList into a SequenceExample proto."""
return tf.train.FeatureList(feature=[_bytes_feature(v) for v in values])
def pad_or_truncate(captions, lengths):
max_length = FLAGS.max_ref_length
num_refs = FLAGS.num_refs
lengths = [min(l, max_length) for l in lengths]
captions = [c[:l] + [0] * (max_length - l) for c, l in zip(captions, lengths)]
if len(captions) < num_refs:
captions = captions + [[0] * max_length for i in xrange(num_refs - len(captions))]
lengths = lengths + [0] * (num_refs - len(captions))
flat_captions = []
for c in captions:
flat_captions.extend(c)
assert len(flat_captions) == num_refs * max_length
assert len(lengths) == num_refs
return flat_captions, lengths
def _to_sequence_example(image, decoder, vocab):
"""Builds a SequenceExample proto for an image-caption pair.
Args:
image: An ImageMetadata object.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
Returns:
A SequenceExample proto.
"""
with tf.gfile.FastGFile(image.filename, "r") as f:
encoded_image = f.read()
try:
decoder.decode_jpeg(encoded_image)
except (tf.errors.InvalidArgumentError, AssertionError):
print("Skipping file with invalid JPEG data: %s" % image.filename)
return
base_filename = image.base_filename
localization = image.localization
feature_list = {
"image/id": _int64_feature(image.id),
"image/filename": _bytes_feature(base_filename),
"image/localization": _float_list(localization),
"image/data": _bytes_feature(encoded_image),
}
if image.captions is not None:
caption_ids = [[vocab.word_to_id(word) for word in caption] for caption in image.captions]
caption_lengths = [len(caption) for caption in caption_ids]
flip_caption_ids = [[vocab.word_to_id(word) for word in caption] for caption in image.flip_captions]
flip_caption_lengths = [len(caption) for caption in flip_caption_ids]
caption_ids, caption_lengths = pad_or_truncate(caption_ids, caption_lengths)
flip_caption_ids, flip_caption_lengths = pad_or_truncate(flip_caption_ids, flip_caption_lengths)
feature_list.update({
"image/ref_words": _int64_list(caption_ids),
"image/ref_lengths": _int64_list(caption_lengths),
"image/flipped_ref_words": _int64_list(flip_caption_ids),
"image/flipped_ref_lengths": _int64_list(flip_caption_lengths),
})
features = tf.train.Features(feature=feature_list)
example = tf.train.Example(features=features)
return example
def _process_image_files(thread_index, ranges, name, images, decoder, vocab,
num_shards):
"""Processes and saves a subset of images as TFRecord files in one thread.
Args:
thread_index: Integer thread identifier within [0, len(ranges)].
ranges: A list of pairs of integers specifying the ranges of the dataset to
process in parallel.
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
decoder: An ImageDecoder object.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Each thread produces N shards where N = num_shards / num_threads. For
# instance, if num_shards = 128, and num_threads = 2, then the first thread
# would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0], ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_images_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in range(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = "%s-%.5d-of-%.5d.tfrecord" % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_dir, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
images_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in images_in_shard:
image = images[i]
sequence_example = _to_sequence_example(image, decoder, vocab)
if sequence_example is not None:
writer.write(sequence_example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print("%s [thread %d]: Processed %d of %d items in thread batch." %
(datetime.now(), thread_index, counter, num_images_in_thread))
sys.stdout.flush()
writer.close()
print("%s [thread %d]: Wrote %d image-caption pairs to %s" %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print("%s [thread %d]: Wrote %d image-caption pairs to %d shards." %
(datetime.now(), thread_index, counter, num_shards_per_batch))
sys.stdout.flush()
def _process_dataset(name, images, vocab, num_shards):
"""Processes a complete data set and saves it as a TFRecord.
Args:
name: Unique identifier specifying the dataset.
images: List of ImageMetadata.
vocab: A Vocabulary object.
num_shards: Integer number of shards for the output files.
"""
# Break up each image into a separate entity for each caption.
images = [ImageMetadata(image.id, image.filename, image.base_filename, image.localization, image.captions, image.flip_captions)
for image in images]
# Shuffle the ordering of images. Make the randomization repeatable.
random.seed(12345)
random.shuffle(images)
# Break the images into num_threads batches. Batch i is defined as
# images[ranges[i][0]:ranges[i][1]].
num_threads = min(num_shards, FLAGS.num_threads)
spacing = np.linspace(0, len(images), num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in range(len(spacing) - 1):
ranges.append([spacing[i], spacing[i + 1]])
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a utility for decoding JPEG images to run sanity checks.
decoder = ImageDecoder()
# Launch a thread for each batch.
print("Launching %d threads for spacings: %s" % (num_threads, ranges))
for thread_index in range(len(ranges)):
args = (thread_index, ranges, name, images, decoder, vocab, num_shards)
t = threading.Thread(target=_process_image_files, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print("%s: Finished processing all %d image-caption pairs in data set '%s'." %
(datetime.now(), len(images), name))
def _create_vocab(captions):
"""Creates the vocabulary of word to word_id.
The vocabulary is saved to disk in a text file of word counts. The id of each
word in the file is its corresponding 0-based line number.
Args:
captions: A list of lists of strings.
Returns:
A Vocabulary object.
"""
print("Creating vocabulary.")
counter = Counter()
for c in captions:
counter.update(c)
print("Total words:", len(counter))
# Filter uncommon words and sort by descending count.
word_counts = [x for x in counter.items() if x[1] >= FLAGS.min_word_count]
word_counts.sort(key=lambda x: x[1], reverse=True)
print("Words in vocabulary:", len(word_counts))
# Write out the word counts file.
with tf.gfile.FastGFile(FLAGS.word_counts_output_file, "w") as f:
f.write("\n".join(["%s %d" % (w, c) for w, c in word_counts]))
print("Wrote vocabulary file:", FLAGS.word_counts_output_file)
# Create the vocabulary dictionary.
reverse_vocab = [x[0] for x in word_counts]
unk_id = len(reverse_vocab)
vocab_dict = dict([(x, y) for (y, x) in enumerate(reverse_vocab)])
vocab = Vocabulary(vocab_dict, unk_id)
return vocab
def _process_caption_jieba(caption):
"""Processes a Chinese caption string into a list of tonenized words.
Args:
caption: A string caption.
Returns:
A list of strings; the tokenized caption.
"""
tokenized_caption = [FLAGS.start_word]
tokenized_caption.extend(jieba.cut(caption, cut_all=False))
tokenized_caption.append(FLAGS.end_word)
return tokenized_caption
def _load_localization_file(localizations_file):
loc_dict = {}
with open(localizations_file) as F:
for line in F:
filename, width, height, num_boxes, box_str = line.strip().split()
num_boxes = int(num_boxes)
assert num_boxes == 36
box_blob = base64.decodestring(box_str)
box_array = np.frombuffer(box_blob, dtype=np.float32)
assert len(box_array) == 4*num_boxes
for i in xrange(0, len(box_array), 4):
l1, u1, l2, u2 = box_array[i:i+4]
assert l1 < l2
assert u1 < u2
loc_dict[filename] = box_array
return loc_dict
def _load_and_process_metadata(captions_file, localizations_file, image_dir):
"""Loads image metadata from a JSON file and processes the captions.
Args:
captions_file: Json file containing caption annotations.
image_dir: Directory containing the image files.
Returns:
A list of ImageMetadata.
"""
loc_dict = _load_localization_file(localizations_file)
image_id = set([])
if captions_file is not None:
id_to_captions = {}
with open(captions_file, 'r') as f:
caption_data = json.load(f)
for data in caption_data:
image_name = data['image_id'].split('.')[0]
descriptions = data['caption']
if image_name not in image_id:
id_to_captions.setdefault(image_name, [])
image_id.add(image_name)
caption_num = len(descriptions)
for i in range(caption_num):
caption_temp = descriptions[i].strip().strip("。").replace('\n', '')
if caption_temp != '':
id_to_captions[image_name].append(caption_temp)
print("Loaded caption metadata for %d images from %s and image_id num is %s" %
(len(id_to_captions), captions_file, len(image_id)))
else:
id_to_captions = None
for filename in os.listdir(image_dir):
if filename.endswith(".jpg"):
image_name = filename.split(".")[0]
if image_name not in image_id:
image_id.add(image_name)
# Process the captions and combine the data into a list of ImageMetadata.
print("Proccessing captions.")
image_metadata = []
num_captions = 0
id = 0
for base_filename in image_id:
localization = loc_dict[base_filename]
filename = os.path.join(image_dir, base_filename + '.jpg')
if id_to_captions is not None:
captions = [_process_caption_jieba(c) for c in id_to_captions[base_filename]]
flip_captions = [_process_caption_jieba(func_flip_caption(c)) for c in id_to_captions[base_filename]]
num_captions += len(captions)
else:
captions = None
flip_captions = None
image_metadata.append(ImageMetadata(id, filename, base_filename, localization, captions, flip_captions))
id = id + 1
print("Finished processing %d captions for %d images in %s" %
(num_captions, len(image_id), captions_file))
return image_metadata
def main(unused_argv):
def _is_valid_num_shards(num_shards):
"""Returns True if num_shards is compatible with FLAGS.num_threads."""
return num_shards < FLAGS.num_threads or not num_shards % FLAGS.num_threads
assert _is_valid_num_shards(FLAGS.train_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.train_shards")
assert _is_valid_num_shards(FLAGS.validate_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.validate_shards")
assert _is_valid_num_shards(FLAGS.test1_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test1_shards")
assert _is_valid_num_shards(FLAGS.test2_shards), (
"Please make the FLAGS.num_threads commensurate with FLAGS.test2_shards")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
if FLAGS.task == "train":
# Load image metadata from caption files.
train_dataset = _load_and_process_metadata(FLAGS.train_captions_file,
FLAGS.train_localizations_file,
FLAGS.train_image_dir)
# Create vocabulary from the training captions.
vocab = load_vocab(FLAGS.word_counts_input_file)
_process_dataset("train", train_dataset, vocab, FLAGS.train_shards)
elif FLAGS.task == "validate":
# Load image metadata from caption files.
validate_dataset = _load_and_process_metadata(None,
FLAGS.validate_localizations_file,
FLAGS.validate_image_dir)
# Create vocabulary from the training captions.
vocab = load_vocab(FLAGS.word_counts_input_file)
_process_dataset("validate", validate_dataset, vocab, FLAGS.validate_shards)
elif FLAGS.task == "test1":
# Load image metadata from caption files.
test1_dataset = _load_and_process_metadata(None,
FLAGS.test1_localizations_file,
FLAGS.test1_image_dir)
# Create vocabulary from the training captions.
vocab = load_vocab(FLAGS.word_counts_input_file)
_process_dataset("test1", test1_dataset, vocab, FLAGS.test1_shards)
elif FLAGS.task == "test2":
# Load image metadata from caption files.
test2_dataset = _load_and_process_metadata(None,
FLAGS.test2_localizations_file,
FLAGS.test2_image_dir)
# Create vocabulary from the training captions.
vocab = load_vocab(FLAGS.word_counts_input_file)
_process_dataset("test2", test2_dataset, vocab, FLAGS.test2_shards)
if __name__ == "__main__":
tf.app.run()
|
test_ringbuffer.py
|
#!/usr/bin/env python3
import ctypes
import gc
import logging
import multiprocessing
import queue
import threading
import time
import unittest
import ringbuffer
class TestException(Exception):
pass
class ReadersWriterLockTest(unittest.TestCase):
def setUp(self):
self.lock = ringbuffer.ReadersWriterLock()
self.assert_unlocked()
self.result_queues = {}
def assert_unlocked(self):
self.assertEqual(0, self.lock.readers.value)
self.assertFalse(self.lock.writer.value)
def assert_readers(self, count):
self.assertEqual(count, self.lock.readers.value)
self.assertFalse(self.lock.writer.value)
def assert_writer(self):
self.assertEqual(0, self.lock.readers.value)
self.assertTrue(self.lock.writer.value)
def reader_count(self):
return self.lock.readers.value
def run_async(self, func):
def wrapper(result_queue):
result = func()
result_queue.put(result)
result_queue = multiprocessing.Queue()
process = multiprocessing.Process(
target=wrapper, args=(result_queue, ))
self.result_queues[process] = result_queue
process.start()
return process
def get_result(self, process):
process.join()
return self.result_queues[process].get()
def test_read_then_write(self):
with self.lock.for_read():
self.assert_readers(1)
self.assert_unlocked()
with self.lock.for_write():
self.assert_writer()
self.assert_unlocked()
def test_reentrant_readers(self):
with self.lock.for_read():
self.assert_readers(1)
with self.lock.for_read():
self.assert_readers(2)
with self.lock.for_read():
self.assert_readers(3)
self.assert_readers(2)
self.assert_readers(1)
self.assert_unlocked()
def test_writer_blocks_reader(self):
with self.lock.for_write():
event = multiprocessing.Event()
def test():
self.assert_writer()
# Caller will block until this event is released.
event.set()
with self.lock.for_read():
self.assert_readers(1)
return 'read'
r = self.run_async(test)
# Wait until we can confirm that the reader is locked out.
event.wait()
self.assert_writer()
self.assertEqual('read', self.get_result(r))
self.assert_unlocked()
def test_writer_blocks_multiple_readers(self):
with self.lock.for_write():
before_read = multiprocessing.Barrier(3)
during_read = multiprocessing.Barrier(2)
after_read = multiprocessing.Barrier(2)
def test():
self.assert_writer()
before_read.wait()
with self.lock.for_read():
during_read.wait()
value = self.reader_count()
after_read.wait()
return value
r1 = self.run_async(test)
r2 = self.run_async(test)
# Wait until we can confirm that all readers are locked out
before_read.wait()
self.assert_writer()
self.assertEqual(2, self.get_result(r1))
self.assertEqual(2, self.get_result(r2))
self.assert_unlocked()
def test_reader_blocks_writer(self):
with self.lock.for_read():
before_write = multiprocessing.Barrier(2)
def test():
self.assert_readers(1)
before_write.wait()
with self.lock.for_write():
self.assert_writer()
return 'written'
writer = self.run_async(test)
# Wait until we can confirm that all writers are locked out.
before_write.wait()
self.assert_readers(1)
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def test_multiple_readers_block_writer(self):
with self.lock.for_read():
before_read = multiprocessing.Barrier(3)
after_read = multiprocessing.Barrier(2)
def test_reader():
self.assert_readers(1)
with self.lock.for_read():
before_read.wait()
value = self.reader_count()
after_read.wait()
return value
def test_writer():
before_read.wait()
with self.lock.for_write():
self.assert_writer()
return 'written'
reader = self.run_async(test_reader)
writer = self.run_async(test_writer)
# Wait for the write to be blocked by multiple readers.
before_read.wait()
self.assert_readers(2)
after_read.wait()
self.assertEqual(2, self.get_result(reader))
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def test_multiple_writers_block_each_other(self):
with self.lock.for_write():
before_write = multiprocessing.Barrier(2)
def test():
before_write.wait()
with self.lock.for_write():
self.assert_writer()
return 'written'
writer = self.run_async(test)
before_write.wait()
self.assert_writer()
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def test_wait_for_write(self):
event = multiprocessing.Event()
wait_count = 0
with self.lock.for_read():
def test():
with self.lock.for_write():
self.assert_writer()
event.set()
return 'written'
writer = self.run_async(test)
while not event.is_set():
self.assert_readers(1)
wait_count += 1
self.lock.wait_for_write()
self.assert_readers(1)
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
self.assertLessEqual(wait_count, 2)
def test_wait_for_write__writer_already_waiting_for_reader(self):
event = multiprocessing.Event()
with self.lock.for_read():
def test():
event.set()
with self.lock.for_write():
self.assert_writer()
event.set()
return 'written'
writer = self.run_async(test)
event.wait()
# Force a context switch so the writer is waiting
time.sleep(0.1)
self.lock.wait_for_write()
self.assert_readers(1)
self.assertEqual('written', self.get_result(writer))
self.assert_unlocked()
def test_wait_for_write_without_lock(self):
self.assert_unlocked()
self.assertRaises(ringbuffer.InternalLockingError,
self.lock.wait_for_write)
def test_unlock_readers_on_exception(self):
try:
with self.lock.for_read():
self.assert_readers(1)
raise TestException
except TestException:
self.assert_unlocked()
else:
self.fail()
def test_unlock_writer_on_exception(self):
try:
with self.lock.for_write():
self.assert_writer()
raise TestException
except TestException:
self.assert_unlocked()
else:
self.fail()
class Expecter:
def __init__(self, ring, pointer, testcase):
self.ring = ring
self.pointer = pointer
self.testcase = testcase
def expect_index(self, i):
self.testcase.assertEqual(i, self.pointer.get().index)
def write(self, data):
self.ring.try_write(data)
def write_multiple(self, data):
self.ring.try_write_multiple(data)
def _get_read_func(self, blocking):
if blocking:
return self.ring.blocking_read
else:
return self.ring.try_read
def expect_read(self, expected_data, blocking=False):
read = self._get_read_func(blocking)
data = read(self.pointer)
item = data[0]
for k, v in expected_data.items():
value = getattr(item, k)
self.testcase.assertEqual(v, value, 'Data field {} was: {}'.format(
k, value))
def expect_multi_read(self, expected_data_list, length=1, blocking=False):
read = self._get_read_func(blocking)
data = read(self.pointer, length=length)
self.testcase.assertEqual(
len(expected_data_list), len(data), 'Data length is not correct')
for i, expected_data in enumerate(expected_data_list):
for k, v in expected_data.items():
value = getattr(data[i], k)
self.testcase.assertEqual(v, value,
'Data field {} was: {}'.format(
k, value))
def expect_read_all(self, expected_data_list):
data = self.ring.try_read_all(self.pointer)
self.testcase.assertEqual(
len(expected_data_list), len(data), 'Data length is not correct')
for i, expected_data in enumerate(expected_data_list):
for k, v in expected_data.items():
value = getattr(data[i], k)
self.testcase.assertEqual(v, value,
'Data field {}: was {}'.format(
k, value))
def expect_waiting_for_writer(self):
# There's no blocking version of this because the WaitingForWriterError
# is what's used to determine when to block on the condition variable.
self.testcase.assertRaises(ringbuffer.WaitingForWriterError,
self.ring.try_read, self.pointer)
def expect_waiting_for_reader(self):
self.testcase.assertRaises(ringbuffer.WaitingForReaderError,
self.ring.try_write, TStruct())
def writer_done(self):
self.ring.writer_done()
def expect_writer_finished(self, blocking=False):
read = self._get_read_func(blocking)
self.testcase.assertRaises(ringbuffer.WriterFinishedError, read,
self.pointer)
def expect_already_closed(self):
self.testcase.assertRaises(ringbuffer.AlreadyClosedError,
self.ring.try_write, TStruct())
def force_reader_sync(self):
self.ring.force_reader_sync()
def expect_try_read_type(self, type_or_class):
data = self.ring.try_read(self.pointer)
self.testcase.assertTrue(isinstance(data, type_or_class))
class AsyncProxy:
def __init__(self, expecter, in_queue, error_queue):
self.expecter = expecter
self.in_queue = in_queue
self.error_queue = error_queue
self.runner = None
def run(self):
while True:
item = self.in_queue.get()
try:
if item == 'done':
logging.debug('Exiting %r', self.runner)
return
name, args, kwargs = item
logging.debug('Running %s(%r, %r)', name, args, kwargs)
try:
getattr(self.expecter, name)(*args, **kwargs)
except Exception as e:
logging.exception('Problem running %s(*%r, **%r)', name,
args, kwargs)
self.error_queue.put(e)
finally:
self.in_queue.task_done()
def shutdown(self):
self.in_queue.put('done')
def __getattr__(self, name):
def proxy(*args, **kwargs):
self.expecter.testcase.assertTrue(
self.runner,
'Must call start_proxies() before setting test expectations')
# This queue is used to sequence operations between functions
# that are running asynchronously (threads or processes).
self.in_queue.put((name, args, kwargs))
# If this test function is running in blocking mode, that means
# the locking and sequencing is built into the semantics of the
# function call itself. That means we can skip waiting for the
# asynchronous function to consume the queue before letting
# subsequent test methods run.
if kwargs.get('blocking'):
# Allow a context switch so the asynchronous function has
# a chance to actually start the function call.
time.sleep(0.1)
else:
self.in_queue.join()
return proxy
class TStruct(ctypes.Structure):
_fields_ = (('i', ctypes.c_int32), ('f', ctypes.c_float))
class RingBufferTestBase:
def setUp(self):
self.ring = ringbuffer.RingBuffer(c_type=TStruct, slot_count=10)
self.proxies = []
self.error_queue = self.new_queue()
def tearDown(self):
for proxy in self.proxies:
if proxy.runner:
proxy.shutdown()
for proxy in self.proxies:
if proxy.runner:
proxy.in_queue.join()
if not self.error_queue.empty():
raise self.error_queue.get()
# Force child processes and pipes to be garbage collected, otherwise
# we'll run out of file descriptors.
gc.collect()
def new_queue(self):
raise NotImplementedError
def run_proxy(self, proxy):
raise NotImplementedError
def start_proxies(self):
for proxy in self.proxies:
self.run_proxy(proxy)
def new_reader(self):
expecter = Expecter(self.ring, self.ring.new_reader(), self)
proxy = AsyncProxy(expecter, self.new_queue(), self.error_queue)
self.proxies.append(proxy)
return proxy
def new_writer(self):
self.ring.new_writer()
expecter = Expecter(self.ring, self.ring.writer, self)
proxy = AsyncProxy(expecter, self.new_queue(), self.error_queue)
self.proxies.append(proxy)
return proxy
def test_write_simple(self):
writer = self.new_writer()
self.start_proxies()
o = TStruct(i=22, f=2.2)
writer.write(o)
def test_write_string(self):
writer = self.new_writer()
self.start_proxies()
self.assertTrue(self.error_queue.empty())
writer.write('this does not work')
error = self.error_queue.get()
self.assertTrue(isinstance(error, TypeError))
def _do_read_single_write(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.expect_index(0)
o = TStruct(i=22, f=2.2)
writer.write(o)
writer.expect_index(1)
reader.expect_index(0)
reader.expect_read({'i': 22}, blocking=blocking)
reader.expect_index(1)
def test_read_single_write_blocking(self):
self._do_read_single_write(True)
def test_read_single_write_non_blocking(self):
self._do_read_single_write(False)
def _do_read_ahead_of_writes(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
reader.expect_waiting_for_writer()
o = TStruct(i=22, f=2.2)
writer.write(o)
reader.expect_read({'i': 22}, blocking=blocking)
def test_read_ahead_of_writes_blocking(self):
self._do_read_ahead_of_writes(True)
def test_read_ahead_of_writes_non_blocking(self):
self._do_read_ahead_of_writes(False)
def _do_two_reads_one_behind_one_ahead(self, blocking):
r1 = self.new_reader()
r2 = self.new_reader()
writer = self.new_writer()
self.start_proxies()
o = TStruct(i=22, f=2.2)
writer.write(o)
r1.expect_read({'i': 22}, blocking=blocking)
r1.expect_waiting_for_writer()
r2.expect_read({'i': 22}, blocking=blocking)
r2.expect_waiting_for_writer()
def test_two_reads_one_behind_one_ahead_blocking(self):
self._do_two_reads_one_behind_one_ahead(True)
def test_two_reads_one_behind_one_ahead_non_blocking(self):
self._do_two_reads_one_behind_one_ahead(False)
def test_write_conflict_first_slot(self):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
for i in range(self.ring.slot_count):
o = TStruct(i=i, f=2.2)
writer.write(o)
# The writer has wrapped around and is now waiting for the reader
# to free up a slot. They have the same index, but are different
# generations.
reader.expect_index(0)
writer.expect_index(0)
writer.expect_waiting_for_reader()
reader.expect_read({'i': 0})
o = TStruct(i=1111, f=2.2)
writer.write(o)
for i in range(1, self.ring.slot_count):
reader.expect_read({'i': i})
reader.expect_index(0)
reader.expect_read({'i': 1111})
def test_write_conflict_last_slot(self):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
last_slot = self.ring.slot_count - 1
self.assertGreater(last_slot, 0)
for i in range(last_slot):
writer.write(TStruct(i=i))
reader.expect_read({'i': i})
writer.expect_index(last_slot)
reader.expect_index(last_slot)
# The reader's pointed at the last slot, now wrap around the writer
# to catch up. They'll have the same index, but different generation
# numbers.
for i in range(self.ring.slot_count):
writer.write(TStruct(i=self.ring.slot_count + i))
reader.expect_index(last_slot)
writer.expect_index(last_slot)
writer.expect_waiting_for_reader()
reader.expect_read({'i': self.ring.slot_count})
writer.write(TStruct())
writer.expect_index(0)
reader.expect_index(0)
def test_create_reader_after_writing(self):
writer = self.new_writer()
self.start_proxies()
self.new_reader() # No error because no writes happened yet.
writer.write(TStruct())
self.assertRaises(ringbuffer.MustCreatedReadersBeforeWritingError,
self.new_reader)
def _do_read_after_close_beginning(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.writer_done()
reader.expect_writer_finished(blocking=blocking)
def test_read_after_close_beginning_blocking(self):
self._do_read_after_close_beginning(True)
def test_read_after_close_beginning_non_blocking(self):
self._do_read_after_close_beginning(False)
def _do_close_before_read(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.write(TStruct(i=4545))
writer.writer_done()
writer.expect_index(1)
reader.expect_read({'i': 4545})
reader.expect_writer_finished(blocking=blocking)
reader.expect_index(1)
def test_close_before_read_blocking(self):
self._do_close_before_read(True)
def test_close_before_read_non_blocking(self):
self._do_close_before_read(False)
def _do_close_after_read(self, blocking):
reader = self.new_reader()
writer = self.new_writer()
self.start_proxies()
writer.write(TStruct(i=3434))
reader.expect_read({'i': 3434})
reader.expect_waiting_for_writer()
reader.expect_index(1)
writer.writer_done()
writer.expect_index(1)
reader.expect_writer_finished(blocking=blocking)
def test_close_after_read_blocking(self):
self._do_close_after_read(True)
def test_close_after_read_non_blocking(self):
self._do_close_after_read(False)
def test_close_then_write(self):
writer = self.new_writer()
self.start_proxies()
writer.write(TStruct())
writer.writer_done()
writer.expect_already_closed()
def test_blocking_readers_wake_up_after_write(self):
writer = self.new_writer()
r1 = self.new_reader()
r2 = self.new_reader()
self.start_proxies()
r1.expect_read({'i': 11}, blocking=True)
r2.expect_read({'i': 11}, blocking=True)
writer.write(TStruct(i=11))
def test_blocking_readers_wake_up_after_close(self):
writer = self.new_writer()
r1 = self.new_reader()
r2 = self.new_reader()
self.start_proxies()
r1.expect_writer_finished(blocking=True)
r2.expect_writer_finished(blocking=True)
writer.writer_done()
def test_force_reader_sync(self):
writer = self.new_writer()
r1 = self.new_reader()
r2 = self.new_reader()
self.start_proxies()
writer.write(TStruct(i=1))
writer.write(TStruct(i=2))
writer.write(TStruct(i=3))
writer.expect_index(3)
r1.expect_index(0)
r2.expect_index(0)
writer.force_reader_sync()
r1.expect_index(3)
r2.expect_index(3)
def _do_multiple_writers(self, blocking):
w1 = self.new_writer()
w2 = self.new_writer()
reader = self.new_reader()
self.start_proxies()
w1.write(TStruct(i=11))
w1.expect_index(1)
w2.expect_index(1)
w2.write(TStruct(i=22))
w1.expect_index(2)
w2.expect_index(2)
w2.write(TStruct(i=33))
w1.expect_index(3)
w2.expect_index(3)
w1.write(TStruct(i=44))
w1.expect_index(4)
w2.expect_index(4)
reader.expect_read({'i': 11}, blocking=blocking)
reader.expect_read({'i': 22}, blocking=blocking)
reader.expect_read({'i': 33}, blocking=blocking)
reader.expect_read({'i': 44}, blocking=blocking)
def test_multiple_writers_blocking(self):
self._do_multiple_writers(True)
def test_multiple_writers_non_blocking(self):
self._do_multiple_writers(False)
def _do_test_multiple_writers_close(self, blocking):
w1 = self.new_writer()
w2 = self.new_writer()
reader = self.new_reader()
self.start_proxies()
w1.write(TStruct(i=11))
w1.writer_done()
w2.write(TStruct(i=22))
w2.writer_done()
reader.expect_read({'i': 11}, blocking=blocking)
reader.expect_read({'i': 22}, blocking=blocking)
reader.expect_writer_finished(blocking=blocking)
def test_multiple_writers_close_blocking(self):
self._do_test_multiple_writers_close(True)
def test_multiple_writers_close_non_blocking(self):
self._do_test_multiple_writers_close(False)
def _do_start_read_before_writer_setup(self, blocking):
reader = self.new_reader()
self.start_proxies()
reader.expect_writer_finished(blocking=blocking)
def test_start_read_before_writer_setup_blocking(self):
self._do_start_read_before_writer_setup(True)
def test_start_read_before_writer_setup_non_blocking(self):
self._do_start_read_before_writer_setup(False)
def test_read_older_gen(self):
w = self.new_writer()
reader = self.new_reader()
self.start_proxies()
for i in range(0, 10):
w.write(TStruct(i=i))
reader.expect_multi_read(
[{
'i': 0
}, {
'i': 1
}, {
'i': 2
}, {
'i': 3
}, {
'i': 4
}], length=5)
for i in range(10, 15):
w.write(TStruct(i=i))
reader.expect_multi_read(
[{
'i': 5
}, {
'i': 6
}, {
'i': 7
}, {
'i': 8
}, {
'i': 9
}, {
'i': 10
}, {
'i': 11
}],
length=7)
w.writer_done()
def test_read_all(self):
w = self.new_writer()
reader = self.new_reader()
self.start_proxies()
for i in range(0, 10):
w.write(TStruct(i=i))
expected_data = [{'i': index} for index in range(0, 10)]
reader.expect_read_all(expected_data)
for i in range(10, 15):
w.write(TStruct(i=i))
expected_data = [{'i': index} for index in range(10, 15)]
reader.expect_read_all(expected_data)
w.writer_done()
class ThreadingTest(RingBufferTestBase, unittest.TestCase):
def new_queue(self):
return queue.Queue()
def run_proxy(self, proxy):
thread = threading.Thread(target=proxy.run)
proxy.runner = thread
thread.daemon = True
thread.start()
class MultiprocessingTest(RingBufferTestBase, unittest.TestCase):
def new_queue(self):
return multiprocessing.JoinableQueue()
def run_proxy(self, proxy):
process = multiprocessing.Process(target=proxy.run)
proxy.runner = process
process.daemon = True
process.start()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
|
scanning.py
|
import numpy as np
import os
import serial
import time
import sys
import cv2
import cv2.cv as cv
import cPickle as pickle
def get_filename(coin_id, image_id):
dir = '/home/pkrush/cents-test/' + str(coin_id / 100) + '/'
filename = dir + str(coin_id).zfill(5) + str(image_id).zfill(2) + '.png'
return filename
def read_from_cameras(top_camera, bottom_camera):
ret, top = top_camera.read()
ret, bottom = bottom_camera.read()
if top == None:
raise ValueError('A frame from the top camera came up None')
if bottom == None:
raise ValueError('A frame from the bottom camera came up None')
return top, bottom
def deskew(src, pixel_shift):
src_tri = np.zeros((3, 2), dtype=np.float32)
dst_tri = np.zeros((3, 2), dtype=np.float32)
rows = src.shape[0]
cols = src.shape[1]
# Set your 3 points to calculate the Affine Transform
src_tri[1] = [cols - 1, 0]
src_tri[2] = [0, rows - 1]
# dstTri is the same except the bottom is moved over shiftpixels:
dst_tri[1] = src_tri[1]
dst_tri[2] = [pixel_shift, rows - 1]
# Get the Affine Transform
warp_mat = cv2.getAffineTransform(src_tri, dst_tri)
## Apply the Affine Transform just found to the src image
cv2.warpAffine(src, warp_mat, (cols, rows), src, cv2.INTER_CUBIC)
return src
def scan(top_camera, bottom_camera, ser):
top_captures = []
bottom_captures = []
for count in range(0, 62):
top, bottom = read_from_cameras(top_camera, bottom_camera)
if count > 4:
top_captures.append(top)
bottom_captures.append(bottom)
led = count / 2
if led < 29:
ser.write(str(led) + "\n")
cv.WaitKey(1)
return top_captures, bottom_captures
def save(captures, coin_id):
count = 0
crop_radius = 224
border_expansion = 30
center_list = []
resized = []
start_time = time.time()
for frame in captures:
if coin_id % 2 == 0:
ratio = .41
else:
ratio = .46
frame_width = int(1920 * ratio)
frame_height = int(1080 * ratio)
frame = cv2.resize(frame, (frame_width, frame_height), interpolation=cv2.INTER_AREA)
blank_image = np.zeros((frame_height + border_expansion * 2, frame_width + border_expansion * 2, 3), np.uint8)
blank_image[border_expansion:frame_height + border_expansion,
border_expansion:frame_width + border_expansion] = frame
frame = blank_image
resized.append(frame)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if coin_id % 2 == 0:
circles = cv2.HoughCircles(gray, cv.CV_HOUGH_GRADIENT, 1, 2000, param1=45, param2=25, minRadius=222,
maxRadius=226)
else:
circles = cv2.HoughCircles(gray, cv.CV_HOUGH_GRADIENT, 1, 2000, param1=45, param2=25, minRadius=222,
maxRadius=226)
if circles is None:
continue
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center_x = i[0]
center_y = i[1]
coin_radius = i[2]
cv2.circle(gray, (center_x, center_y), 2, (0, 0, 255), 1)
cv2.circle(gray, (center_x, center_y), coin_radius, (0, 0, 255), 1)
center_list.append([center_x, center_y, coin_radius])
total_center_x = 0
total_center_y = 0
total_radius = 0
# print '1 In %s seconds' % (time.time() - start_time,)
for center_x, center_y, coin_radius in center_list:
# print center_x, center_y, coin_radius
total_center_x += center_x
total_center_y += center_y
total_radius += coin_radius
#print '2 In %s seconds' % (time.time() - start_time,)
if len(center_list) == 0:
return False
# raise ValueError(str(coin_id) + 'had no detected circles')
#print '3 In %s seconds' % (time.time() - start_time,)
average_center_x = float(total_center_x) / len(center_list)
average_center_y = float(total_center_y) / len(center_list)
average_radius = float(total_radius) / len(center_list)
resized_height,resized_width,channels = frame.shape
crop_top = average_center_y - crop_radius
crop_bottom = average_center_y + crop_radius
crop_left = average_center_x - crop_radius
crop_right = average_center_x + crop_radius
bad_crop = ' is Bad. X&Y:' + str(average_center_x) + "," + str(average_center_y) + ' Frame Width:' + str(resized_width) + ' Frame Height:' + str(resized_height)
if crop_left < 0:
print str(crop_left) + ' crop_left' + bad_crop + '\n\n\n'
#return False
if crop_right > resized_width:
print str(crop_right) + ' crop_right' + bad_crop + '\n\n\n'
#return False
if crop_top < 0:
print str(crop_top) + ' crop_top' + bad_crop + '\n\n\n'
#return False
if crop_bottom > resized_height:
print str(crop_bottom) + ' crop_bottom' + bad_crop + '\n\n\n'
#return False
# dir = '/media/pkrush/Seagate Backup Plus Drive/cents_2/' + str(coin_id/100) + '/'
dir = '/home/pkrush/cents-test/' + str(coin_id / 100) + '/'
if not os.path.exists(dir):
os.mkdir(dir)
#print '5 In %s seconds' % (time.time() - start_time,)
for frame in resized:
crop = frame[crop_top:crop_bottom, crop_left:crop_right]
cv2.imwrite(dir + str(coin_id).zfill(5) + str(count).zfill(2) + '.png', crop)
count += 1
#print '6 In %s seconds' % (time.time() - start_time,)
return True
def get_moving_center_x(frame, ratio, deskew_pixels, frame_name, frame_id):
frame_width = int(1920 * ratio)
frame_height = int(1080 * ratio)
# print '3 In %s seconds' % (time.time() - start_time,)
frame = cv2.resize(frame, (frame_width, frame_height), interpolation=cv2.INTER_AREA)
# print '4 In %s seconds' % (time.time() - start_time,)
height_expansion_amount = 40
blank_image = np.zeros((frame_height + height_expansion_amount, frame_width, 3), np.uint8)
blank_image[height_expansion_amount / 2:frame_height + height_expansion_amount / 2, 0:frame_width] = frame
frame = blank_image
# frame = frame[460:,40:1040]
deskewed = deskew(frame, deskew_pixels)
gray = cv2.cvtColor(deskewed, cv2.COLOR_BGR2GRAY)
circles = cv2.HoughCircles(gray, cv.CV_HOUGH_GRADIENT, 1, 300, param1=45, param2=25, minRadius=52, maxRadius=58)
if circles is None:
cv2.imshow(frame_name, frame)
return 0
circles = np.uint16(np.around(circles))
for i in circles[0, :]:
center_x = i[0]
center_y = i[1]
crop_radius = i[2]
cv2.circle(frame, (center_x, center_y), 2, (0, 0, 255), 1)
cv2.circle(frame, (center_x, center_y), crop_radius, (0, 0, 255), 1)
# print circles
cv2.imshow(frame_name, frame)
cv2.imwrite('/home/pkrush/cents-circle-detect/' + str(frame_id).zfill(6) + frame_name + '.png', frame)
return center_x * (1 / ratio)
def get_cameras():
top_camera = None
bottom_camera = None
for camera_id in range(0, 4):
cap = cv2.VideoCapture(camera_id)
cap.set(3, 1920)
cap.set(4, 1080)
if cap.get(3) == 1920:
if top_camera is None:
top_camera = cap
else:
bottom_camera = cap
top, bottom = read_from_cameras(top_camera, bottom_camera)
if bottom[170, 170, 0] == bottom[170, 170, 1] == bottom[170, 170, 2]:
temp_camera = top_camera
top_camera = bottom_camera
bottom_camera = temp_camera
return top_camera, bottom_camera
#this is a one time function as the init scanning had issues.
#237 sets of 2 were bad 2500 were good. I have 5000 good sets of 57 images for 2500 coins.
def save_good_coin_ids():
good_coin_ids = set()
bad_coin_ids = set()
for coin_id in range(0, 5458, 2):
good_coin_ids.add(coin_id)
for side in [0, 3]:
for image_id in range(0, 56):
filename = get_filename(coin_id + side, image_id)
if not os.path.isfile(filename):
bad_coin_ids.add(coin_id)
continue
if os.path.getsize(filename) == 0:
bad_coin_ids.add(coin_id)
continue
test_image = cv2.imread(filename)
if test_image is None:
bad_coin_ids.add(coin_id)
continue
width, height, channels = test_image.shape
if not width == height == 448:
bad_coin_ids.add(coin_id)
continue
good_coin_ids = good_coin_ids - bad_coin_ids
for start_id in coin_id_starts:
if start_id != 0:
#-2 is bad: Why;
#-2 bad the for top coin_id is good,
#-1 good the bottom of -4 good,
#0 good top coin_id is good,
#1 bad bottom will never be read as it's the back of -2
#2 good top is new the back of 0
#3 good bottom is the back of #0
bad_coin_ids.add(start_id - 2)
print len(bad_coin_ids)
print len(good_coin_ids)
good_coin_ids.difference(bad_coin_ids)
home_dir = '/home/pkrush/cent-models/'
data_dir = home_dir + 'metadata/'
back_sides = set()
for coin_id in good_coin_ids:
back_sides.add(coin_id + 3)
good_coin_ids = good_coin_ids.union(back_sides)
print len(good_coin_ids)
pickle.dump(good_coin_ids, open(data_dir + 'seed_image_ids.pickle', "wb"))
pickle.dump(good_coin_ids, open(data_dir + 'test_image_ids.pickle', "wb"))
coin_id_starts = [0, 380, 1152, 1972, 2674, 2780, 2846, 2946, 3330, 5448]
def get_start_coin_id():
return coin_id_starts[len(coin_id_starts) - 1]
coin_id = get_start_coin_id()
top_camera, bottom_camera = get_cameras()
# files = glob.glob('/home/pkrush/cents-circle-detect/*')
# for f in files:
# os.remove(f)
# files = glob.glob('/home/pkrush/cents-test/*')
# for f in files:
# os.remove(f)
start_time = time.time()
ser = serial.Serial(port='/dev/ttyUSB0', baudrate=115200)
ser.write(str(102) + "\n")
cv.WaitKey(2)
ser.write(str(104) + "\n")
cv.WaitKey(2)
frame_count = 0
last_scan_frame_count = -100
found_coin = False
top_belt_on = True
bottom_belt_on = True
while (True):
status = ''
if top_belt_on and bottom_belt_on:
# This might be overkill to keep turning them on:
ser.write(str(102) + "\n")
cv.WaitKey(1)
ser.write(str(104) + "\n")
cv.WaitKey(1)
top, bottom = read_from_cameras(top_camera, bottom_camera)
after_scan_frame_delay = 30
if frame_count - last_scan_frame_count < after_scan_frame_delay:
frame_count += 1
continue
if top_belt_on:
center_x = get_moving_center_x(top, .1, 8, 'Top', frame_count)
if center_x != 0:
status += 'top' + ' ' + str(center_x) + '-'
if top_belt_on and center_x < 1691:
top_belt_on = False
status += str(top_belt_on) + ' ' + str(bottom_belt_on) + '-'
ser.write(str(105) + "\n")
cv.WaitKey(1)
ser.write(str(106) + "\n")
cv.WaitKey(10)
ser.write(str(107) + "\n")
cv.WaitKey(1)
status += 'Top belt off, reset hopper'
if bottom_belt_on:
center_x = get_moving_center_x(bottom, .11, -8, 'Bot', frame_count)
if center_x != 0:
status += 'bottom' + ' ' + str(center_x) + '-'
if bottom_belt_on and center_x > 0:
bottom_belt_on = False
status += str(top_belt_on) + ' ' + str(bottom_belt_on) + '-'
ser.write(str(103) + "\n")
cv.WaitKey(1)
status += 'Bottom belt off-'
if top_belt_on == False and bottom_belt_on == False:
# if first_top_scanned == True:
status += 'Scanning ' + str(coin_id) + ' with the LED lights-'
last_scan_frame_count = frame_count
top_captures, bottom_captures = scan(top_camera, bottom_camera, ser)
# t = threading.Thread(target=save, args=(top_captures, coin_id))
# t.start()
# t = threading.Thread(target=save, args=(bottom_captures, coin_id + 1))
# t.start()
# print 'pre save In %s seconds' % (time.time() - start_time,)
top_save = save(top_captures, coin_id)
bottom_save = save(bottom_captures, coin_id + 1)
# print 'save In %s seconds' % (time.time() - start_time,)
if top_save and bottom_save:
coin_id += 2
status += 'Cycle In %s seconds' % (time.time() - start_time,)
start_time = time.time()
status += 'Both belts on-'
top_belt_on = True
bottom_belt_on = True
status += str(top_belt_on) + ' ' + str(bottom_belt_on) + '-'
if status != '':
print frame_count, status
frame_count +=1
#ser.write(str(102) + "\n")
# cv.WaitKey(3500)
cv.WaitKey(35)
#ser.write(str(100) + "\n")
cv.WaitKey(100)
#ser.write(str(101) + "\n")
top_camera.release()
bottom_camera.release()
cv2.destroyAllWindows()
|
locators.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.python.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
test_threading_local.py
|
import unittest
from doctest import DocTestSuite
from test import test_support
import weakref
import gc
# Modules under test
_thread = test_support.import_module('thread')
threading = test_support.import_module('threading')
import _threading_local
class Weak(object):
pass
def target(local, weaklist):
weak = Weak()
local.weak = weak
weaklist.append(weakref.ref(weak))
class BaseLocalTest:
def test_local_refs(self):
self._local_refs(20)
self._local_refs(50)
self._local_refs(100)
def _local_refs(self, n):
local = self._local()
weaklist = []
for i in range(n):
t = threading.Thread(target=target, args=(local, weaklist))
t.start()
t.join()
del t
gc.collect()
self.assertEqual(len(weaklist), n)
# XXX _threading_local keeps the local of the last stopped thread alive.
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n))
# Assignment to the same thread local frees it sometimes (!)
local.someothervar = None
gc.collect()
deadlist = [weak for weak in weaklist if weak() is None]
self.assertIn(len(deadlist), (n-1, n), (n, len(deadlist)))
def test_derived(self):
# Issue 3088: if there is a threads switch inside the __init__
# of a threading.local derived class, the per-thread dictionary
# is created but not correctly set on the object.
# The first member set may be bogus.
import time
class Local(self._local):
def __init__(self):
time.sleep(0.01)
local = Local()
def f(i):
local.x = i
# Simply check that the variable is correctly set
self.assertEqual(local.x, i)
threads= []
for i in range(10):
t = threading.Thread(target=f, args=(i,))
t.start()
threads.append(t)
for t in threads:
t.join()
def test_derived_cycle_dealloc(self):
# http://bugs.python.org/issue6990
class Local(self._local):
pass
locals = None
passed = [False]
e1 = threading.Event()
e2 = threading.Event()
def f():
# 1) Involve Local in a cycle
cycle = [Local()]
cycle.append(cycle)
cycle[0].foo = 'bar'
# 2) GC the cycle (triggers threadmodule.c::local_clear
# before local_dealloc)
del cycle
gc.collect()
e1.set()
e2.wait()
# 4) New Locals should be empty
passed[0] = all(not hasattr(local, 'foo') for local in locals)
t = threading.Thread(target=f)
t.start()
e1.wait()
# 3) New Locals should recycle the original's address. Creating
# them in the thread overwrites the thread state and avoids the
# bug
locals = [Local() for i in range(10)]
e2.set()
t.join()
self.assertTrue(passed[0])
def test_arguments(self):
# Issue 1522237
from thread import _local as local
from _threading_local import local as py_local
for cls in (local, py_local):
class MyLocal(cls):
def __init__(self, *args, **kwargs):
pass
MyLocal(a=1)
MyLocal(1)
self.assertRaises(TypeError, cls, a=1)
self.assertRaises(TypeError, cls, 1)
def _test_one_class(self, c):
self._failed = "No error message set or cleared."
obj = c()
e1 = threading.Event()
e2 = threading.Event()
def f1():
obj.x = 'foo'
obj.y = 'bar'
del obj.y
e1.set()
e2.wait()
def f2():
try:
foo = obj.x
except AttributeError:
# This is expected -- we haven't set obj.x in this thread yet!
self._failed = "" # passed
else:
self._failed = ('Incorrectly got value %r from class %r\n' %
(foo, c))
sys.stderr.write(self._failed)
t1 = threading.Thread(target=f1)
t1.start()
e1.wait()
t2 = threading.Thread(target=f2)
t2.start()
t2.join()
# The test is done; just let t1 know it can exit, and wait for it.
e2.set()
t1.join()
self.assertFalse(self._failed, self._failed)
def test_threading_local(self):
self._test_one_class(self._local)
def test_threading_local_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_one_class(LocalSubclass)
def _test_dict_attribute(self, cls):
obj = cls()
obj.x = 5
self.assertEqual(obj.__dict__, {'x': 5})
with self.assertRaises(AttributeError):
obj.__dict__ = {}
with self.assertRaises(AttributeError):
del obj.__dict__
def test_dict_attribute(self):
self._test_dict_attribute(self._local)
def test_dict_attribute_subclass(self):
class LocalSubclass(self._local):
"""To test that subclasses behave properly."""
self._test_dict_attribute(LocalSubclass)
class ThreadLocalTest(unittest.TestCase, BaseLocalTest):
_local = _thread._local
# Fails for the pure Python implementation
def test_cycle_collection(self):
class X:
pass
x = X()
x.local = self._local()
x.local.x = x
wr = weakref.ref(x)
del x
gc.collect()
self.assertIs(wr(), None)
class PyThreadingLocalTest(unittest.TestCase, BaseLocalTest):
_local = _threading_local.local
def test_main():
suite = unittest.TestSuite()
suite.addTest(DocTestSuite('_threading_local'))
suite.addTest(unittest.makeSuite(ThreadLocalTest))
suite.addTest(unittest.makeSuite(PyThreadingLocalTest))
try:
from thread import _local
except ImportError:
pass
else:
import _threading_local
local_orig = _threading_local.local
def setUp(test):
_threading_local.local = _local
def tearDown(test):
_threading_local.local = local_orig
suite.addTest(DocTestSuite('_threading_local',
setUp=setUp, tearDown=tearDown)
)
test_support.run_unittest(suite)
if __name__ == '__main__':
test_main()
|
Remixatron.py
|
""" Classes for remixing audio files.
(c) 2017 - Dave Rensin - dave@rensin.com
This module contains classes for remixing audio files. It started
as an attempt to re-create the amazing Infinite Jukebox (http://www.infinitejuke.com)
created by Paul Lamere of Echo Nest.
The InfiniteJukebox class can do it's processing in a background thread and
reports progress via the progress_callback arg. To run in a thread, pass do_async=True
to the constructor. In that case, it exposes an Event named play_ready -- which will
be signaled when the processing is complete. The default mode is to run synchronously.
Async example:
def MyCallback(percentage_complete_as_float, string_message):
print "I am now %f percent complete with message: %s" % (percentage_complete_as_float * 100, string_message)
jukebox = InfiniteJukebox(filename='some_file.mp3', progress_callback=MyCallback, do_async=True)
jukebox.play_ready.wait()
<some work here...>
Non-async example:
def MyCallback(percentage_complete_as_float, string_message):
print "I am now %f percent complete with message: %s" % (percentage_complete_as_float * 100, string_message)
jukebox = InfiniteJukebox(filename='some_file.mp3', progress_callback=MyCallback, do_async=False)
<blocks until completion... some work here...>
"""
import collections
import librosa
import math
import random
import scipy
import threading
import numpy as np
import sklearn.cluster
import sklearn.metrics
class InfiniteJukebox(object):
""" Class to "infinitely" remix a song.
This class will take an audio file (wav, mp3, ogg, etc) and
(a) decompose it into individual beats, (b) find the tempo
of the track, and (c) create a play path that you can use
to play the song approx infinitely.
The idea is that it will find and cluster beats that are
musically similar and return them to you so you can automatically
'remix' the song.
Attributes:
play_ready: an Event that triggers when the processing/clustering is complete and
playback can begin. This is only defined if you pass do_async=True in the
constructor.
start_index: the start index of the original track before trimming (i.e. leading silence is before this start index)
duration: the duration (in seconds) of the track after the leading and trailing silences
have been removed.
raw_audio: an array of numpy.Int16 that is suitable for using for playback via pygame
or similar modules. If the audio is mono then the shape of the array will
be (bytes,). If it's stereo, then the shape will be (2,bytes).
sample_rate: the sample rate from the audio file. Usually 44100 or 48000
clusters: the number of clusters used to group the beats. If you pass in a value, then
this will be reflected here. If you let the algorithm decide, then auto-generated
value will be reflected here.
beats: a dictionary containing the individual beats of the song in normal order. Each
beat will have the following keys:
id: the ordinal position of the beat in the song
start: the time (in seconds) in the song where this beat occurs
duration: the duration (in seconds) of the beat
buffer: an array of audio bytes for this beat. it is just raw_audio[start:start+duration]
cluster: the cluster that this beat most closely belongs. Beats in the same cluster
have similar harmonic (timbre) and chromatic (pitch) characteristics. They
will "sound similar"
segment: the segment to which this beat belongs. A 'segment' is a contiguous block of
beats that belong to the same cluster.
amplitude: the loudness of the beat
next: the next beat to play after this one, if playing sequentially
jump_candidates: a list of the other beats in the song to which it is reasonable to jump. Those beats
(a) are in the same cluster as the NEXT oridnal beat, (b) are of the same segment position
as the next ordinal beat, (c) are in the same place in the measure as the NEXT beat,
(d) but AREN'T the next beat.
An example of playing the first 32 beats of a song:
from Remixatron import InfiniteJukebox
from pygame import mixer
import time
jukebox = InfiniteJukebox('some_file.mp3')
pygame.mixer.init(frequency=jukebox.sample_rate)
channel = pygame.mixer.Channel(0)
for beat in jukebox.beats[0:32]:
snd = pygame.Sound(buffer=beat['buffer'])
channel.queue(snd)
time.sleep(beat['duration'])
play_vector: a beat play list of 1024^2 items. This represents a pre-computed
remix of this song that will last beat['duration'] * 1024 * 1024
seconds long. A song that is 120bpm will have a beat duration of .5 sec,
so this playlist will last .5 * 1024 * 1024 seconds -- or 145.67 hours.
Each item contains:
beat: an index into the beats array of the beat to play
seq_len: the length of the musical sequence being played
in this part of play_vector.
seq_pos: this beat's position in seq_len. When
seq_len - seq_pos == 0 the song will "jump"
"""
def __init__(self, filename, start_beat=1, clusters=0, progress_callback=None,
do_async=False, use_v1=False):
""" The constructor for the class. Also starts the processing thread.
Args:
filename: the path to the audio file to process
start_beat: the first beat to play in the file. Should almost always be 1,
but you can override it to skip into a specific part of the song.
clusters: the number of similarity clusters to compute. The DEFAULT value
of 0 means that the code will try to automatically find an optimal
cluster. If you specify your own value, it MUST be non-negative. Lower
values will create more promiscuous jumps. Larger values will create higher quality
matches, but run the risk of jumps->0 -- which will just loop the
audio sequentially ~forever.
progress_callback: a callback function that will get periodic satatus updates as
the audio file is processed. MUST be a function that takes 2 args:
percent_complete: FLOAT between 0.0 and 1.0
message: STRING with the progress message
use_v1: set to True if you want to use the original auto clustering algorithm.
Otherwise, it will use the newer silhouette-based scheme.
"""
self.__progress_callback = progress_callback
self.__filename = filename
self.__start_beat = start_beat
self.clusters = clusters
self._extra_diag = ""
self._use_v1 = use_v1
if do_async == True:
self.play_ready = threading.Event()
self.__thread = threading.Thread(target=self.__process_audio)
self.__thread.start()
else:
self.play_ready = None
self.__process_audio()
def __process_audio(self):
""" The main audio processing routine for the thread.
This routine uses Laplacian Segmentation to find and
group similar beats in the song.
This code has been adapted from the sample created by Brian McFee at
https://librosa.github.io/librosa_gallery/auto_examples/plot_segmentation.html#sphx-glr-auto-examples-plot-segmentation-py
and is based on his 2014 paper published at http://bmcfee.github.io/papers/ismir2014_spectral.pdf
I have made some performance improvements, but the basic parts remain (mostly) unchanged
"""
self.__report_progress( .1, "loading file and extracting raw audio")
#
# load the file as stereo with a high sample rate and
# trim the silences from each end
#
y, sr = librosa.core.load(self.__filename, mono=False, sr=None)
y, index = librosa.effects.trim(y)
self.start_index = index[0]
self.duration = librosa.core.get_duration(y,sr)
self.raw_audio = (y * np.iinfo(np.int16).max).astype(np.int16).T.copy(order='C')
self.sample_rate = sr
# after the raw audio bytes are saved, convert the samples to mono
# because the beat detection algorithm in librosa requires it.
y = librosa.core.to_mono(y)
self.__report_progress( .2, "computing pitch data..." )
# Compute the constant-q chromagram for the samples.
BINS_PER_OCTAVE = 12 * 3
N_OCTAVES = 7
cqt = librosa.cqt(y=y, sr=sr, bins_per_octave=BINS_PER_OCTAVE, n_bins=N_OCTAVES * BINS_PER_OCTAVE)
C = librosa.amplitude_to_db( np.abs(cqt), ref=np.max)
self.__report_progress( .3, "Finding beats..." )
##########################################################
# To reduce dimensionality, we'll beat-synchronous the CQT
tempo, btz = librosa.beat.beat_track(y=y, sr=sr, trim=False)
# tempo, btz = librosa.beat.beat_track(y=y, sr=sr)
Csync = librosa.util.sync(C, btz, aggregate=np.median)
self.tempo = tempo
# For alignment purposes, we'll need the timing of the beats
# we fix_frames to include non-beat frames 0 and C.shape[1] (final frame)
beat_times = librosa.frames_to_time(librosa.util.fix_frames(btz,
x_min=0,
x_max=C.shape[1]),
sr=sr)
self.__report_progress( .4, "building recurrence matrix..." )
#####################################################################
# Let's build a weighted recurrence matrix using beat-synchronous CQT
# (Equation 1)
# width=3 prevents links within the same bar
# mode='affinity' here implements S_rep (after Eq. 8)
R = librosa.segment.recurrence_matrix(Csync, width=3, mode='affinity',
sym=True)
# Enhance diagonals with a median filter (Equation 2)
df = librosa.segment.timelag_filter(scipy.ndimage.median_filter)
Rf = df(R, size=(1, 7))
###################################################################
# Now let's build the sequence matrix (S_loc) using mfcc-similarity
#
# :math:`R_\text{path}[i, i\pm 1] = \exp(-\|C_i - C_{i\pm 1}\|^2 / \sigma^2)`
#
# Here, we take :math:`\sigma` to be the median distance between successive beats.
#
mfcc = librosa.feature.mfcc(y=y, sr=sr)
Msync = librosa.util.sync(mfcc, btz)
path_distance = np.sum(np.diff(Msync, axis=1)**2, axis=0)
sigma = np.median(path_distance)
path_sim = np.exp(-path_distance / sigma)
R_path = np.diag(path_sim, k=1) + np.diag(path_sim, k=-1)
##########################################################
# And compute the balanced combination (Equations 6, 7, 9)
deg_path = np.sum(R_path, axis=1)
deg_rec = np.sum(Rf, axis=1)
mu = deg_path.dot(deg_path + deg_rec) / np.sum((deg_path + deg_rec)**2)
A = mu * Rf + (1 - mu) * R_path
#####################################################
# Now let's compute the normalized Laplacian (Eq. 10)
L = scipy.sparse.csgraph.laplacian(A, normed=True)
# and its spectral decomposition
_, evecs = scipy.linalg.eigh(L)
# We can clean this up further with a median filter.
# This can help smooth over small discontinuities
evecs = scipy.ndimage.median_filter(evecs, size=(9, 1))
# cumulative normalization is needed for symmetric normalize laplacian eigenvectors
Cnorm = np.cumsum(evecs**2, axis=1)**0.5
# If we want k clusters, use the first k normalized eigenvectors.
# Fun exercise: see how the segmentation changes as you vary k
self.__report_progress( .5, "clustering..." )
# if a value for clusters wasn't passed in, then we need to auto-cluster
if self.clusters == 0:
# if we've been asked to use the original auto clustering alogrithm, otherwise
# use the new and improved one that accounts for silhouette scores.
if self._use_v1:
self.clusters, seg_ids = self.__compute_best_cluster(evecs, Cnorm)
else:
self.clusters, seg_ids = self.__compute_best_cluster_with_sil(evecs, Cnorm)
else: # otherwise, just use the cluster value passed in
k = self.clusters
self.__report_progress( .51, "using %d clusters" % self.clusters )
X = evecs[:, :k] / Cnorm[:, k-1:k]
seg_ids = sklearn.cluster.KMeans(n_clusters=k, max_iter=1000,
random_state=0, n_init=1000).fit_predict(X)
# Get the amplitudes and beat-align them
self.__report_progress( .6, "getting amplitudes" )
# newer versions of librosa have renamed the rmse function
if hasattr(librosa.feature,'rms'):
amplitudes = librosa.feature.rms(y=y)
else:
amplitudes = librosa.feature.rmse(y=y)
ampSync = librosa.util.sync(amplitudes, btz)
# create a list of tuples that include the ordinal position, the start time of the beat,
# the cluster to which the beat belongs and the mean amplitude of the beat
zbeat_tuples = zip(range(0,len(btz)), beat_times, seg_ids, ampSync[0].tolist())
beat_tuples =tuple(zbeat_tuples)
info = []
bytes_per_second = int(round(len(self.raw_audio) / self.duration))
last_cluster = -1
current_segment = -1
segment_beat = 0
for i in range(0, len(beat_tuples)):
final_beat = {}
final_beat['start'] = float(beat_tuples[i][1])
final_beat['cluster'] = int(beat_tuples[i][2])
final_beat['amplitude'] = float(beat_tuples[i][3])
if final_beat['cluster'] != last_cluster:
current_segment += 1
segment_beat = 0
else:
segment_beat += 1
final_beat['segment'] = current_segment
final_beat['is'] = segment_beat
last_cluster = final_beat['cluster']
if i == len(beat_tuples) - 1:
final_beat['duration'] = self.duration - final_beat['start']
else:
final_beat['duration'] = beat_tuples[i+1][1] - beat_tuples[i][1]
if ( (final_beat['start'] * bytes_per_second) % 2 > 1.5 ):
final_beat['start_index'] = int(math.ceil(final_beat['start'] * bytes_per_second))
else:
final_beat['start_index'] = int(final_beat['start'] * bytes_per_second)
final_beat['stop_index'] = int(math.ceil((final_beat['start'] + final_beat['duration']) * bytes_per_second))
# save pointers to the raw bytes for each beat with each beat.
final_beat['buffer'] = self.raw_audio[ final_beat['start_index'] : final_beat['stop_index'] ]
info.append(final_beat)
self.__report_progress( .7, "truncating to fade point..." )
# get the max amplitude of the beats
# max_amplitude = max([float(b['amplitude']) for b in info])
max_amplitude = sum([float(b['amplitude']) for b in info]) / len(info)
# assume that the fade point of the song is the last beat of the song that is >= 75% of
# the max amplitude.
self.max_amplitude = max_amplitude
fade = len(info) - 1
for b in reversed(info):
if b['amplitude'] >= (.75 * max_amplitude):
fade = info.index(b)
break
# truncate the beats to [start:fade + 1]
beats = info[self.__start_beat:fade + 1]
loop_bounds_begin = self.__start_beat
self.__report_progress( .8, "computing final beat array..." )
# assign final beat ids
for beat in beats:
beat['id'] = beats.index(beat)
beat['quartile'] = beat['id'] // (len(beats) / 4.0)
# compute a coherent 'next' beat to play. This is always just the next ordinal beat
# unless we're at the end of the song. Then it gets a little trickier.
for beat in beats:
if beat == beats[-1]:
# if we're at the last beat, then we want to find a reasonable 'next' beat to play. It should (a) share the
# same cluster, (b) be in a logical place in its measure, (c) be after the computed loop_bounds_begin, and
# is in the first half of the song. If we can't find such an animal, then just return the beat
# at loop_bounds_begin
beat['next'] = next( (b['id'] for b in beats if b['cluster'] == beat['cluster'] and
b['id'] % 4 == (beat['id'] + 1) % 4 and
b['id'] <= (.5 * len(beats)) and
b['id'] >= loop_bounds_begin), loop_bounds_begin )
else:
beat['next'] = beat['id'] + 1
# find all the beats that (a) are in the same cluster as the NEXT oridnal beat, (b) are of the same
# cluster position as the next ordinal beat, (c) are in the same place in the measure as the NEXT beat,
# (d) but AREN'T the next beat, and (e) AREN'T in the same cluster as the current beat.
#
# THAT collection of beats contains our jump candidates
jump_candidates = [bx['id'] for bx in beats[loop_bounds_begin:] if
(bx['cluster'] == beats[beat['next']]['cluster']) and
(bx['is'] == beats[beat['next']]['is']) and
(bx['id'] % 4 == beats[beat['next']]['id'] % 4) and
(bx['segment'] != beat['segment']) and
(bx['id'] != beat['next'])]
if jump_candidates:
beat['jump_candidates'] = jump_candidates
else:
beat['jump_candidates'] = []
# save off the segment count
self.segments = max([b['segment'] for b in beats]) + 1
# we don't want to ever play past the point where it's impossible to loop,
# so let's find the latest point in the song where there are still jump
# candidates and make sure that we can't play past it.
last_chance = len(beats) - 1
for b in reversed(beats):
if len(b['jump_candidates']) > 0:
last_chance = beats.index(b)
break
# if we play our way to the last beat that has jump candidates, then just skip
# to the earliest jump candidate rather than enter a section from which no
# jumping is possible.
beats[last_chance]['next'] = min(beats[last_chance]['jump_candidates'])
# store the beats that start after the last jumpable point. That's
# the outro to the song. We can use these
# beasts to create a sane ending for a fixed-length remix
outro_start = last_chance + 1 + self.__start_beat
if outro_start >= len(info):
self.outro = []
else:
self.outro = info[outro_start:]
#
# This section of the code computes the play_vector -- a 1024*1024 beat length
# remix of the current song.
#
random.seed()
# how long should our longest contiguous playback blocks be? One way to
# consider it is that higher bpm songs need longer blocks because
# each beat takes less time. A simple way to estimate a good value
# is to scale it by it's distance from 120bpm -- the canonical bpm
# for popular music. Find that value and round down to the nearest
# multiple of 4. (There almost always are 4 beats per measure in Western music).
max_sequence_len = int(round((self.tempo / 120.0) * 48.0))
max_sequence_len = max_sequence_len - (max_sequence_len % 4)
min_sequence = max(random.randrange(16, max_sequence_len, 4), loop_bounds_begin)
current_sequence = 0
beat = beats[0]
self.__report_progress( .9, "creating play vector" )
play_vector = []
play_vector.append( {'beat':0, 'seq_len':min_sequence, 'seq_pos':current_sequence} )
# we want to keep a list of recently played segments so we don't accidentally wind up in a local loop
#
# the number of segments in a song will vary so we want to set the number of recents to keep
# at 25% of the total number of segments. Eg: if there are 34 segments, then the depth will
# be set at round(8.5) == 9.
#
# On the off chance that the (# of segments) *.25 < 1 we set a floor queue depth of 1
recent_depth = int(round(self.segments * .25))
recent_depth = max( recent_depth, 1 )
recent = collections.deque(maxlen=recent_depth)
# keep track of the time since the last successful jump. If we go more than
# 10% of the song length since our last jump, then we will prioritize an
# immediate jump to a not recently played segment. Otherwise playback will
# be boring for the listener. This also has the advantage of busting out of
# local loops.
max_beats_between_jumps = int(round(len(beats) * .1))
beats_since_jump = 0
failed_jumps = 0
for i in range(0, 1024 * 1024):
if beat['segment'] not in recent:
recent.append(beat['segment'])
current_sequence += 1
# it's time to attempt a jump if we've played all the beats we wanted in the
# current sequence. Also, if we've gone more than 10% of the length of the song
# without jumping we need to immediately prioritze jumping to a non-recent segment.
will_jump = (current_sequence == min_sequence) or (beats_since_jump >= max_beats_between_jumps)
# since it's time to jump, let's find the most musically pleasing place
# to go
if ( will_jump ):
# find the jump candidates that haven't been recently played
non_recent_candidates = [c for c in beat['jump_candidates'] if beats[c]['segment'] not in recent]
# if there aren't any good jump candidates, then we need to fall back
# to another selection scheme.
if len(non_recent_candidates) == 0:
beats_since_jump += 1
failed_jumps += 1
# suppose we've been trying to jump but couldn't find a good non-recent candidate. If
# the length of time we've been trying (and failing) is >= 10% of the song length
# then it's time to relax our criteria. Let's find the jump candidate that's furthest
# from the current beat (irrespective if it's been played recently) and go there. Ideally
# we'd like to jump to a beat that is not in the same quartile of the song as the currently
# playing section. That way we maximize our chances of avoiding a long local loop -- such as
# might be found in the section preceeding the outro of a song.
non_quartile_candidates = [c for c in beat['jump_candidates'] if beats[c]['quartile'] != beat['quartile']]
if (failed_jumps >= (.1 * len(beats))) and (len(non_quartile_candidates) > 0):
furthest_distance = max([abs(beat['id'] - c) for c in non_quartile_candidates])
jump_to = next(c for c in non_quartile_candidates
if abs(beat['id'] - c) == furthest_distance)
beat = beats[jump_to]
beats_since_jump = 0
failed_jumps = 0
# uh oh! That fallback hasn't worked for yet ANOTHER 10%
# of the song length. Something is seriously broken. Time
# to punt and just start again from the first beat.
elif failed_jumps >= (.2 * len(beats)):
beats_since_jump = 0
failed_jumps = 0
beat = beats[loop_bounds_begin]
# asuuming we're not in one of the failure modes but haven't found a good
# candidate that hasn't been recently played, just play the next beat in the
# sequence
else:
beat = beats[beat['next']]
else:
# if it's time to jump and we have at least one good non-recent
# candidate, let's just pick randomly from the list and go there
beats_since_jump = 0
failed_jumps = 0
beat = beats[ random.choice(non_recent_candidates) ]
# reset our sequence position counter and pick a new target length
# between 16 and max_sequence_len, making sure it's evenly divisible by
# 4 beats
current_sequence = 0
min_sequence = random.randrange(16, max_sequence_len, 4)
# if we're in the place where we want to jump but can't because
# we haven't found any good candidates, then set current_sequence equal to
# min_sequence. During playback this will show up as having 00 beats remaining
# until we next jump. That's the signal that we'll jump as soon as we possibly can.
#
# Code that reads play_vector and sees this value can choose to visualize this in some
# interesting way.
if beats_since_jump >= max_beats_between_jumps:
current_sequence = min_sequence
# add an entry to the play_vector
play_vector.append({'beat':beat['id'], 'seq_len': min_sequence, 'seq_pos': current_sequence})
else:
# if we're not trying to jump then just add the next item to the play_vector
play_vector.append({'beat':beat['next'], 'seq_len': min_sequence, 'seq_pos': current_sequence})
beat = beats[beat['next']]
beats_since_jump += 1
# save off the beats array and play_vector. Signal
# the play_ready event (if it's been set)
self.beats = beats
self.play_vector = play_vector
self.__report_progress(1.0, "finished processing")
if self.play_ready:
self.play_ready.set()
def __report_progress(self, pct_done, message):
""" If a reporting callback was passed, call it in order
to mark progress.
"""
if self.__progress_callback:
self.__progress_callback( pct_done, message )
def __compute_best_cluster_with_sil(self, evecs, Cnorm):
''' Attempts to compute optimum clustering
Uses the the silhouette score to pick the best number of clusters.
See: https://en.wikipedia.org/wiki/Silhouette_(clustering)
PARAMETERS:
evecs: Eigen-vectors computed from the segmentation algorithm
Cnorm: Cumulative normalization of evecs. Easier to pass it in than
compute it from scratch here.
KEY DEFINITIONS:
Clusters: buckets of musical similarity
Segments: contiguous blocks of beats belonging to the same cluster
Silhouette: A score given to a cluster that measures how well the cluster
members fit together. The value is from -1 to +1. Higher values
indicate higher quality.
Orphans: Segments with only one beat. The presence of orphans is a potential
sign of overfitting.
SUMMARY:
There are lots of things that might indicate one cluster count is better than another.
High silhouette scores for the candidate clusters mean that the jumps will be higher
quality.
On the other hand, we could easily choose so many clusters that everyone has a great
silhouette score but none of the beats have other segments into which they can jump.
That will be a pretty boring result!
So, the cluster/segment ratio matters, too The higher the number, the more places (on average)
a beat can jump. However, if the beats aren't very similar (low silhouette scores) then
the jumps won't make any musical sense.
So, we can't just choose the cluster count with the highest average silhouette score or the
highest cluster/segment ratio.
Instead, we comput a simple fitness score of:
cluster_count * ratio * average_silhouette
Finally, segments with only one beat are a potential (but not definite) sign of overfitting.
We call these one-beat segments 'orphans'. We want to keep an eye out for those and slightly
penalize any candidate cluster count that contains orphans.
If we find an orphan, we scale the fitness score by .8 (ie. penalize it 20%). That's
enough to push any candidate cluster count down the stack rank if orphans aren't
otherwise very common across most of the other cluster count choices.
'''
self._clusters_list = []
best_cluster_size = 0
best_labels = None
best_cluster_score = 0
# we need at least 3 clusters for any song and shouldn't need to calculate more than
# 48 clusters for even a really complicated peice of music.
for n_clusters in range(48, 2, -1):
self.__report_progress(.51, "Testing a cluster value of %d..." % n_clusters)
# compute a matrix of the Eigen-vectors / their normalized values
X = evecs[:, :n_clusters] / Cnorm[:, n_clusters-1:n_clusters]
# create the candidate clusters and fit them
clusterer = sklearn.cluster.KMeans(n_clusters=n_clusters, max_iter=300,
random_state=0, n_init=20)
cluster_labels = clusterer.fit_predict(X)
# get some key statistics, including how well each beat in the cluster resemble
# each other (the silhouette average), the ratio of segments to clusters, and the
# length of the smallest segment in this cluster configuration
silhouette_avg = sklearn.metrics.silhouette_score(X, cluster_labels)
ratio, min_segment_len = self.__segment_stats_from_labels(cluster_labels.tolist())
# We need to grade each cluster according to how likely it is to produce a good
# result. There are a few factors to look at.
#
# First, we can look at how similar the beats in each cluster (on average) are for
# this candidate cluster size. This is known as the silhouette score. It ranges
# from -1 (very bad) to 1 (very good).
#
# Another thing we can look at is the ratio of clusters to segments. Higher ratios
# are preferred because they afford each beat in a cluster the opportunity to jump
# around to meaningful places in the song.
#
# All other things being equal, we prefer a higher cluster count to a lower one
# because it will tend to make the jumps more selective -- and therefore higher
# quality.
#
# Lastly, if we see that we have segments equal to just one beat, that might be
# a sign of overfitting. We call these one beat segments 'orphans'. Some songs,
# however, will have orphans no matter what cluster count you use. So, we don't
# want to throw out a cluster count just because it has orphans. Instead, we
# just de-rate its fitness score. If most of the cluster candidates have orphans
# then this won't matter in the overall scheme because everyone will be de-rated
# by the same scaler.
#
# Putting this all together, we muliply the cluster count * the average
# silhouette score for the clusters in this candidate * the ratio of clusters to
# segments. Then we scale (or de-rate) the fitness score by whether or not is has
# orphans in it.
orphan_scaler = .8 if min_segment_len == 1 else 1
cluster_score = n_clusters * silhouette_avg * ratio * orphan_scaler
#cluster_score = ((n_clusters/48.0) * silhouette_avg * (ratio/10.0)) * orphan_scaler
# if this cluster count has a score that's better than the best score so far, store
# it for later.
if cluster_score >= best_cluster_score:
best_cluster_score = cluster_score
best_cluster_size = n_clusters
best_labels = cluster_labels
# return the best results
return (best_cluster_size, best_labels)
@staticmethod
def __segment_count_from_labels(labels):
''' Computes the number of unique segments from a set of ordered labels. Segements are
contiguous beats that belong to the same cluster. '''
segment_count = 0
previous_label = -1
for label in labels:
if label != previous_label:
previous_label = label
segment_count += 1
return segment_count
def __segment_stats_from_labels(self, labels):
''' Computes the segment/cluster ratio and min segment size value given an array
of labels. '''
segment_count = 0.0
segment_length = 0
clusters = max(labels) + 1
previous_label = -1
segment_lengths = []
for label in labels:
if label != previous_label:
previous_label = label
segment_count += 1.0
if segment_length > 0:
segment_lengths.append(segment_length)
segment_length = 1
else:
segment_length +=1
# self.__report_progress( .52, "clusters: %d, ratio: %f, min_seg: %d" % (clusters, segment_count/len(labels), segment_length) )
return float(segment_count) / float(clusters), min(segment_lengths)
def __compute_best_cluster(self, evecs, Cnorm):
''' Attempts to compute optimum clustering from a set of simplified
hueristics. This method has been deprecated in favor of code above that takes into
account the average silhouette score of each cluster. You can force the code to use
this method by passing in use_v1=True in the constructor.
PARAMETERS:
evecs: Eigen-vectors computed from the segmentation algorithm
Cnorm: Cumulative normalization of evecs. Easier to pass it in than
compute it from scratch here.
KEY DEFINITIONS:
Clusters: buckets of musical similarity
Segments: contiguous blocks of beats belonging to the same cluster
Orphans: clusters that only belong to one segment
Stub: a cluster with less than N beats. Stubs are a sign of
overfitting
SUMMARY:
Group the beats in [8..64] clusters. They key metric is the segment:cluster ratio.
This value gives the avg number of different segments to which a cluster
might belong. The higher the value, the more diverse the playback because
the track can jump more freely. There is a balance, however, between this
ratio and the number of clusters. In general, we want to find the highest
numeric cluster that has a ratio of segments:clusters nearest 4.
That ratio produces the most musically pleasing results.
Basically, we're looking for the highest possible cluster # that doesn't
obviously overfit.
Someday I'll implement a proper RMSE algorithm...
'''
self._clusters_list = []
# We compute the clusters between 4 and 64. Owing to the inherent
# symmetry of Western popular music (including Jazz and Classical), the most
# pleasing musical results will often, though not always, come from even cluster values.
for ki in range(4,64, 2):
# compute a matrix of the Eigen-vectors / their normalized values
X = evecs[:, :ki] / Cnorm[:, ki-1:ki]
# cluster with candidate ki
labels = sklearn.cluster.KMeans(n_clusters=ki, max_iter=1000,
random_state=0, n_init=20).fit_predict(X)
entry = {'clusters':ki, 'labels':labels}
# create an array of dictionary entries containing (a) the cluster label,
# (b) the number of total beats that belong to that cluster, and
# (c) the number of segments in which that cluster appears.
lst = []
for i in range(0,ki):
lst.append( {'label':i, 'beats':0, 'segs':0} )
last_label = -1
for l in labels:
if l != last_label:
lst[l]['segs'] += 1
last_label = l
lst[l]['beats'] += 1
entry['cluster_map'] = lst
# get the average number of segments to which a cluster belongs
entry['seg_ratio'] = np.mean([l['segs'] for l in entry['cluster_map']])
self._clusters_list.append(entry)
# get the max cluster with the segments/cluster ratio nearest to 4. That
# will produce the most musically pleasing effect
max_seg_ratio = max( [cl['seg_ratio'] for cl in self._clusters_list] )
max_seg_ratio = min( max_seg_ratio, 4 )
final_cluster_size = max(cl['clusters'] for cl in self._clusters_list if cl['seg_ratio'] >= max_seg_ratio)
# compute a very high fidelity set of clusters using our selected cluster size.
X = evecs[:, :final_cluster_size] / Cnorm[:, final_cluster_size-1:final_cluster_size]
labels = sklearn.cluster.KMeans(n_clusters=final_cluster_size, max_iter=1000,
random_state=0, n_init=1000).fit_predict(X)
# labels = next(c['labels'] for c in self._clusters_list if c['clusters'] == final_cluster_size)
# return a tuple of (winning cluster size, [array of cluster labels for the beats])
return (final_cluster_size, labels)
def __add_log(self, line):
"""Convenience method to add debug logging info for later"""
self._extra_diag += line + "\n"
|
ircbot.py
|
'''Todo:
* Add multiple thread support for async_process functions
* Potentially thread each handler function? idk
'''
import sys
import socket
import re
import threading
import logging
import time
if sys.hexversion < 0x03000000:
#Python 2
import Queue as queue
BlockingIOError = socket.error
else:
import queue
from .ircclient import IRCClient
#Somewhat complex regex that accurately matches nick!username@host, with named groups for easy parsing and usage
user_re = re.compile(r'(?P<nick>[\w\d<-\[\]\^\{\}\~\-|]+)!(?P<user>[\w\d<-\[\]\^\{\}\~\-|]+)@(?P<host>.+)')
class IRCBot(IRCClient):
'''See `IRCClient` for basic client usage, here is usage for the bot system
Handler notation:
on_join(self, nick, host, channel)
on_topic(self, nick, host, channel, topic)
on_part(self, nick, host, channel, message)
on_msg(self, nick, host, channel, message)
on_privmsg(self, nick, host, message)
on_chanmsg(self, nick, host, channel, message)
on_notice(self, nick, host, channel, message)
on_nick(self, nick, new_nick, host)
'''
_handlers = {
'join': [],
'part': [],
'kick': [],
'topic': [],
'msg': [],
'privmsg': [],
'chanmsg': [],
'notice': [],
'nick': []
}
_process_thread = None
def _async_process(self):
while not self._stop_event.is_set():
time.sleep(0.01)
try:
args = self._in_queue.get_nowait()
#These "msg"s will be raw irc received lines, which have several forms
# basically, we should be looking for
# :User!Name@host COMMAND <ARGS>
logging.debug(args)
userhost = user_re.search(args[0][1:])
if userhost:
nick, host, user = userhost.groups()
command = args[1]
if command == 'JOIN':
channel = args[2][1:] #JOIN Channels are : prefixed
for handler in self._handlers['join']:
handler(self, nick, host, channel)
elif command == 'TOPIC':
channel = args[2]
topic = ' '.join(args[3:])
for handler in self._handlers['topic']:
handler(self, nick, host, channel, topic)
elif command == 'PART':
channel = args[2]
message = ' '.join(args[3:])
for handler in self._handlers['part']:
handler(self, nick, host, channel, message)
elif command == 'PRIVMSG':
channel = args[2]
message = ' '.join(args[3:])[1:]
for handler in self._handlers['msg']:
handler(self, nick, host, channel, message)
if channel[0] == '#':
#this is a channel
for handler in self._handlers['chanmsg']:
handler(self, nick, host, channel, message)
else:
#private message
for handler in self._handlers['privmsg']:
handler(self, nick, host, message)
elif command == 'KICK':
channel = args[2]
kicked_nick = args[3]
reason = ' '.join(args[4:])[1:]
for handler in self._handlers['kick']:
handler(self, nick, host, channel, kicked_nick, reason)
elif command == 'NICK':
new_nick = args[2][1:]
for handler in self._handlers['nick']:
handler(self, nick, new_nick, host)
elif command == 'NOTICE':
#:nick!user@host NOTICE <userchan> :message
channel = args[2]
message = ' '.join(args[3:])
for handler in self._handlers['notice']:
handler(self, nick, host, channel, message)
else:
logging.warning("Unhandled command %s" % command)
self._in_queue.task_done()
except queue.Empty as e: pass
except Exception as e:
logging.debug(e.args)
self.stop()
sys.exit(1)
def start(self):
IRCClient.start(self)
self._process_thread = threading.Thread(target=self._async_process)
self._process_thread.start()
def on(self, type):
'''Decorator function'''
def decorator(self, func):
'''decorated functions should be written as class methods
@on('join')
def on_join(self, channel):
print("Joined channel %s" % channel)
'''
self._handlers[type].append(func)
return func
return decorator
def on_join(self, func):
self._handlers['join'].append(func)
return func
def on_part(self, func):
self._handlers['part'].append(func)
return func
def on_kick(self, func):
self._handlers['kick'].append(func)
return func
def on_msg(self, func):
self._handlers['msg'].append(func)
return func
def on_privmsg(self, func):
self._handlers['privmsg'].append(func)
return func
def on_chanmsg(self, func):
self._handlers['chanmsg'].append(func)
return func
def on_notice(self, func):
self._handlers['notice'].append(func)
return func
def on_nick(self, func):
self._handlers['nick'].append(func)
return func
__all__ = ['IRCBot']
|
Stats.py
|
import sys, csv, pysam, os
from File_Output import SiteToJSON
import json
import multiprocessing as mp
from Params import stats_params
class JSONSerializable(object):
def __repr__(self):
return json.dumps(self.__dict__, default = lambda o: o.__dict__)
class Site(JSONSerializable):
"""
Class Site is a representation of a genomic site and may hold information relating to that site,
such as the reference base and alternative bases (from BAM file), type of site (SNP, undetermined, etc)
and the Sample objects associated with the specific site.
"""
def __init__(self, chrom, pos, ref, alts, kind, samples, sample_names):
"""
Args:
chrom (str): chromosome
pos (int): genomic position (zero-indexed)
ref (str): reference base (e.g. A, C, G, T)
alts (dict): dict of alternative bases (A1, A2, A3) where the read depth of A1 >= A2 >= A3
(eg. {'A1' : 'C', 'A2' : 'G', 'A3' : 'T'} if ref is 'A')
kind (str): type of site (e.g. SNP, homozygous, heterozygous, undefined)
samples (dict): mapping of a sample name to a Sample object
sample_names (list): list of sample names in the same order as the BAM file
Attributes:
true_pos (int): one-indexed genomic position
bulk (dict): dictionary of read depth for bases in bulk (e.g. {'A' : 30, 'C' : 40, 'G' : 0, 'T' : 0, 'SUM' : 70})
snp_tuple_star (dict): tp*
"""
self.chrom = chrom
self.pos = pos
self.ref = ref
self.alts = alts
self.kind = kind
self.samples = samples
if not samples:
self.init_samples(sample_names)
self.true_pos = pos + 1
self.bulk = dict()
self.snp_tuple_star = dict()
def init_samples(self, sample_names):
for sample_name in sample_names:
self.samples[sample_name] = Sample(sample_name, {'A' : 0, 'C' : 0, 'G' : 0, 'T' : 0}, dict(), 'None')
class Sample(JSONSerializable):
"""
Class Sample is a representation of a genomic site for a specific sample, and may hold information relating
to that sample for that specific site.
"""
def __init__(self, name, AD, tuples, info):
"""
Args:
name (str): name of the sample
AD (dict): allelic distribution of the following form: {'A' : 0, 'C' : 0, 'G' : 0, 'T' : 0}
tuples (dict): tuples in the form: {'AA' : 0, 'AR' : 0, 'RA' : 0, 'RR' : 0}
info (str) : information regarding the genotype (can be unknown), e.g. 'HOMO-C2', 'HET-C1', 'UNKNOWN'
Attributes:
snp_reads (int): number of reads covering site and the SNV (todo: should be the sum of AD dict values)
indels (int): number of reads that are indels
"""
self.name = name
self.AD = AD
self.tuples = tuples
self.info = info
self.snp_reads = 0
self.indels = 0
#todo: move to File_Output.py
def get_AD(self, site):
str_AD = ""
alts = [k for k, v in site.alts.items() if v != None and self.AD[v] > 0]
alts.sort()
alts_AD = [k + ":" + str(self.AD[site.alts[k]]) for k in alts]
if self.AD[site.ref] != 0 and len(alts) != 0:
str_AD = 'R:{R}/{alts}'.format(R = self.AD[site.ref], alts = ",".join(alts_AD))
elif self.AD[site.ref] == 0 and len(alts) != 0:
str_AD = "./{alts}".format(alts = ",".join(alts_AD))
elif self.AD[site.ref] != 0 and len(alts) == 0:
str_AD = 'R:{R}/.'.format(R = self.AD[site.ref])
elif self.AD[site.ref] == 0 and len(alts) == 0:
str_AD = "./."
alts_value = [v for k, v in site.alts.items() if v != None and self.AD[v] > 0]
other = sum([v for k, v in self.AD.items() if k != site.ref and k not in alts_value and v > 0])
str_AD += ", O: {other}".format(other = other)
return str_AD
class Read(object):
"""
Class Read is a representation of an aligned read, based on pysam.AlignmentSegment.
It holds information regarding the mate, start and endposition, bases, base and mapping qualities
and whether the read contains an SNV.
"""
def __init__(self, id, mate, sequence, ind_pos, start, end, base_quality, mapping_quality, has_snp):
"""
Args:
id (str): query_name (pysam.AlignmentSegment: the query template name (None if not present))
mate (Read): the Read-mate (None if not present)
sequence (list): query_sequence (pysam.AlignmentSegment: read sequence bases, including soft
clipped bases (None if not present))
ind_pos (list of tuples) : get_aligned_pairs (pysam.AlignmentSegment: a list of aligned read
(query) and reference positions. For inserts, deletions, skipping either query or reference position may be None.)
start (int) : reference_start (pysam.AlignmentSegment: 0-based leftmost coordinate)
end (int) : reference_end - 1 (pysam.AlignmentSegment: aligned reference position of the read on the reference genome.
reference_end points to one past the last aligned residue. Returns None if not available (read is unmapped or no cigar
alignment present)). This will thus be the last position in the sequence.
base_quality (dict): query_qualities (pysam.AlignmentSegment: read sequence base qualities, including* – term* – soft
clipped bases (None if not present). Quality scores are returned as a python array of unsigned chars.) Note that we
have transformed this to a dict, where the key is the position and value is the quality score.
mapping_quality (int): mapping_quality (pysam.AlignmentSegment: mapping quality for read)
has_snp (bool): if the read or read mate covers an SNV
"""
self.id = id
self.mate = mate
self.start = start
self.end = end
self.bases = self.init_bases(ind_pos, sequence)
self.base_quality = self.init_base_quality(ind_pos, base_quality)
self.mapping_quality = mapping_quality
self.has_snp = has_snp
def __str__(self):
return "ID: {ID}, MATE: {MATE}, SEQ: {SEQ}, START: {START}, END: {END}".format(ID = self.id, MATE = self.mate.id, SEQ = self.bases, START = self.start, END = self.end)
def init_bases(self,ind_pos,sequence):
bases = dict()
base_quality = dict()
for i, p in ind_pos:
if p != None and i != None:
bases[p] = sequence[i]
elif p != None and i == None: # D
bases[p] = 'D'
# elif p == None and i != None: #I
# bases[p] == 'I'
# if self.start <= 76111775 and self.end >= 76111775 and p==None:
return bases
def init_base_quality(self, ind_pos, base_quality_list):
base_quality = dict()
for i, p in ind_pos:
if p != None and i != None:
base_quality[p] = base_quality_list[i]
elif p != None and i == None:
base_quality[p] = 0
return base_quality
def get_reads(snp, bams):
"""
get_reads: given an SNV position, this method returns all reads within a left and right interval
of "fragment_length" in Params.py that has "mapping_quality" in Params.py
Args:
snp (SNP object): the SNP object
bams (list of tuples of (str, pysam.AlignmentFile)): name and pysam.AlignmentFile for all BAM-files
Returns:
returns a dict of names as keys and a list of Read-objects as values
"""
sample_reads = dict()
for bam_name, bam_file in bams:
reads = list()
mates = dict()
left = max(0, snp.pos-stats_params["fragment_length"])
right = snp.pos+stats_params["fragment_length"]
# TODO try except (get_ref..)
for read in bam_file.fetch(snp.chrom, left, right):
if read.mapping_quality >= stats_params["mapping_quality"] and read.is_paired and read.is_proper_pair:
r = Read(read.query_name, None, read.query_sequence, read.get_aligned_pairs(),
read.reference_start, read.reference_end-1, read.query_qualities, read.mapping_quality, False)
if snp.pos in r.bases.keys():
r.has_snp = True
try:
m = mates[r.id]
if m.mate != None:
continue
r.mate = m
m.mate = r
if r.has_snp or r.mate.has_snp:
snp.samples[bam_name].snp_reads += 1
r.has_snp = True
r.mate.has_snp = True
except KeyError:
mates[r.id] = r
reads.append(r)
sample_reads[bam_name] = reads
return sample_reads
def get_references(chrom, start, end, ref_file):
"""
Args:
chrom (str): chromosome
start (int): start position
end (int): ending position
ref_file (pysam.FastaFile): reference file reader
Returns:
dict of key (int): position and value (str): base
"""
try:
sequence = ref_file.fetch(chrom, start, end+1)
except KeyError:
sequence = ref_file.fetch("chr"+chrom, start, end+1)
aligned_pairs = dict()
for i in range(0, end-start+1):
pos = i + start
base = sequence[i]
if base not in stats_params["acceptable_bases"]:
base = None
aligned_pairs[pos] = base
return aligned_pairs
def init_site(snp, sample_names, reference, pos):
ref = reference[pos]
if pos == snp.pos:
site = snp
else:
site = Site(snp.chrom, pos, ref, None, '', dict(), sample_names)
return site
def snp_limits(snp, reads):
"""
Args:
snp (SNP object): SNP object
reads (list of tuples (str, Read obects)): sample name and corresponding Reads
Returns:
min and max position for all Reads belonging to a SNV, otherwise None
"""
start = list()
end = list()
found_snp = False
for sample_name, sample_reads in reads.items():
if snp.samples[sample_name].snp_reads > stats_params["snp_read_limit"]:
found_snp = True
start_sample = list()
end_sample = list()
for read in sample_reads:
if read.has_snp:
start_sample.append(read.start)
end_sample.append(read.end)
start.append(min(start_sample))
end.append(max(end_sample))
if found_snp:
return min(start), max(end)
else:
return None, None
def allele_counter(reads, site):
"""
allele_counter: performs allele counter, i.e. counting the allelic distribution
Args:
reads (list of tuples (str, Read obects)): sample name and corresponding Reads
site (Site object): Site object
"""
for sample_name, sample_reads in reads.items():
for read in sample_reads:
if site.pos in read.bases.keys():
base = read.bases[site.pos].upper()
if base in stats_params["acceptable_bases"] and read.bases[site.pos] != None and read.base_quality[site.pos] > stats_params["base_quality"]:
site.samples[sample_name].AD[base] += 1
elif base in {'D', 'I'}:
site.samples[sample_name].indels += 1
def is_indel(site):
"""
is_indel: returns a Boolean whether a site is an indel or not (performed over all samples)
Args:
site (Site object): Site object
"""
tot_indel_ratio = 0.0
for sample in site.samples.values():
tot_indel_ratio += float(sample.indels)/sum(sample.AD.values()) if sum(sample.AD.values()) > 0 else 0
return (tot_indel_ratio/len(site.samples)) > stats_params["indel_ratio"]
def ratio(num1, num2):
if (num1 + num2) > 0:
return min(num1, num2)/(num1 + num2)
else:
return 0.0
def define_altenative(site):
"""
define_alternative: assigns a Site as either alternative, homogenous or undefined
this function depends on four parameters: alt_ratio_limit, dp_limit, vote_ratio_limit and sample_vote_limit
Args:
site (Site object): Site object
"""
if site.alts is None:
allele_vote = {'A' : 0, 'C' : 0, 'G' : 0, 'T' : 0}
for sample in site.samples.values():
lst = []
for b in sample.AD.keys():
if b != site.ref:
if ratio(sample.AD[b], sample.AD[site.ref]) >= stats_params["alt_ratio_limit"] and \
(sample.AD[site.ref]+sample.AD[b]) >= stats_params["dp_limit"]:
lst.append((b, sample.AD[b]))
elif ratio(sample.AD[b], sample.AD[site.ref]) <= stats_params["alt_ratio_limit"] and sample.AD[b] >= stats_params["dp_limit"]:
lst.append((b, sample.AD[b]))
if lst != []:
lst.sort(key = lambda t: t[1], reverse=True)
#R:10/A1:5, A2:5 <-- will not be allowed to vote
if len(lst) == 1 or (lst[0][1] > lst[1][1]):
allele_vote[lst[0][0]] += 1
max_vote_allele = sorted([b for b in allele_vote.items() if b[0] != site.ref], reverse = True, key = lambda t: t[1])
sum_votes = sum([b[1] for b in max_vote_allele])
vote_ratio = 0
if sum_votes > 0:
vote_ratio = max_vote_allele[0][1]/sum_votes
site.alts = {'A1' : None, 'A2': None, 'A3' : None}
if vote_ratio >= stats_params["vote_ratio_limit"] and sum_votes >= stats_params["sample_vote_limit"]:
i = 1
for b in max_vote_allele:
if b[1] != 0:
alt_str = "A" + str(i)
site.alts.update({alt_str:b[0]})
i += 1
else:
site.kind = 'UNDEF'
alts = [alt for alt in site.alts.values() if alt != None]
if len(alts) == 0:
site.kind = 'HOMO'
def count_tuple(site, snp, site_base, snp_base, sample_name):
"""
count_tuple: count tuples for a given sample
Args:
site (Site object): Site object
snp (Site object): Site object
site_base (str): base for the site
snp_base (str): base for the snp
sample_name (str): name of sample
"""
if snp.pos not in site.samples[sample_name].tuples.keys():
site.samples[sample_name].tuples[snp.pos] = {'RR':0, 'RA':0, 'AR':0, 'AA':0}
if site_base == site.ref and snp_base == snp.ref:
site.samples[sample_name].tuples[snp.pos]['RR'] += 1
elif site_base == site.ref and snp_base == snp.alts['A1']:
site.samples[sample_name].tuples[snp.pos]['RA'] += 1
elif site_base == site.alts['A1'] and snp_base == snp.ref:
site.samples[sample_name].tuples[snp.pos]['AR'] += 1
elif site_base == site.alts['A1'] and snp_base == snp.alts['A1']:
site.samples[sample_name].tuples[snp.pos]['AA'] += 1
def tuple_counter(snp, sites, reads):
"""
tuple_counter: count tuples for all samples (for a given a region with an SNP)
Args:
snp (Site object): Site object
sites (list): list of Site objects
reads (list): list of Read objects
"""
for sample_name, sample_reads in reads.items():
for read in sample_reads:
if read.has_snp:
snp_read = read
if snp.pos not in snp_read.bases.keys():
snp_read = read.mate
if snp.pos not in snp_read.bases.keys():
print(snp_read.id)
print("Base not in snp mate pos", snp.pos)
for pos in read.bases.keys():
if pos in sites.keys() and sites[pos].kind != 'SNP' and read.base_quality[pos] >= stats_params["base_quality"]:
count_tuple(sites[pos], snp, read.bases[pos], snp_read.bases[snp.pos], sample_name)
def bulk_stats(site, bulk_bam):
"""
bulk_stats: counts base distribution for the bulk in a given site
Args:
site (Site object): Site object
bulk_bam (pysam.AlignmentFile): pysam AlignmentFile for bulk
"""
sample_reads = dict()
bases = {"A" : 0, "C" : 0, "G" : 0, "T" : 0}
for read in bulk_bam.fetch(site.chrom, site.pos, site.pos+1):
if read.mapping_quality >= stats_params["mapping_quality"]:
r = Read(read.query_name, None, read.query_sequence, read.get_aligned_pairs(),
read.reference_start, read.reference_end-1, read.query_qualities, read.mapping_quality, False)
if site.pos in r.base_quality.keys() and r.base_quality[site.pos] >= stats_params["base_quality"]:
base = r.bases[site.pos]
if base in bases.keys():
bases[base] += 1
T = sum(list(bases.values()))
if T > 0:
site.bulk['A'] = bases['A']
site.bulk['C'] = bases['C']
site.bulk['G'] = bases['G']
site.bulk['T'] = bases['T']
site.bulk['SUM'] = T
bulk_bases = 0
if site.kind != 'SNP' and float(site.bulk[site.ref])/T < stats_params["bulk_ref_limit"]:
site.kind = 'ERROR'
def get_bams(bam_paths):
"""
get_bams: returns pysam AlignmentFile objects for given bam paths
Args:
bam_paths (str): paths to bam files
"""
bam_reader = csv.DictReader(open(bam_paths, 'rU'), delimiter='\t')
bams = []
bam_bulk = None
for row in bam_reader:
# print(row['NAME'])
if row['NAME'] == 'BULK':
bam_bulk = pysam.AlignmentFile(row['PATH'], 'rb')
else:
bams.append((row['NAME'], pysam.AlignmentFile(row['PATH'], 'rb')))
return bams, bam_bulk
def stats_to_json(i, snps_chunk_path, bams_path, sample_names, reference_path, output_name, queue):
"""
stats_to_json: runs main stats for a given snp chunk and creates a json-file
with the stats results
Args:
i (int): chunk ID
snps_chunk_path (str): file name for chunk path
bams_path (str): path to BAM files
sample_names (list): list of strings for sample names
reference_path (str): path to reference
output_name (str): name of main json-output file
queue (Queue object): queue needed for multithreading
"""
bams, bam_bulk = get_bams(bams_path)
reference_genome_file = pysam.Fastafile(reference_path)
sites = dict()
old_end = 0
json_path = './.conbase/' + output_name + '_chunk_' + str(i) + '.json'
json_file = SiteToJSON(json_path)
snps_reader = csv.DictReader(open(snps_chunk_path, 'rU'), delimiter='\t')
for row in snps_reader:
snp = Site(row['CHROM'], int(row['POS']) - 1, (row['REF'].strip()), {"A1":row['ALT'].strip()}, 'SNP', dict(), sample_names)
reads = get_reads(snp, bams)
new_start, new_end = snp_limits(snp, reads)
if (new_start == None and new_end == None):
for s in sites.values():
if s.kind == '':
bulk_stats(s, bam_bulk)
json_file.write(s)
sites = dict()
old_end = 0
continue
if new_start - old_end > stats_params["fragment_length"]:
for s in sites.values():
if s.kind == '':
bulk_stats(s, bam_bulk)
json_file.write(s)
sites = dict()
reference = get_references(snp.chrom, new_start, new_end, reference_genome_file)
for pos in range(new_start, new_end+1):
if pos not in sites.keys() and reference[pos] != None:
site = init_site(snp, sample_names, reference, pos)
allele_counter(reads, site)
if not is_indel(site):
sites[pos] = site
define_altenative(sites[pos])
tuple_counter(snp, sites, reads)
old_end = new_end
queue.put(1)
for s in sites.values():
if s.kind == '':
bulk_stats(s, bam_bulk)
json_file.write(s)
json_file.close()
# Yield successive n-sized chunks from l.
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
def blocks(files, size=65536):
while True:
b = files.read(size)
if not b: break
yield b
def new_chunk_file(chunk_number, output_name):
chunk_path = './.conbase/' + output_name + '_snp_chunk_' + str(chunk_number) + '.tsv'
chunk_file = open(chunk_path, 'w')
chunk_file.write('CHROM' + '\t' + 'POS' + '\t' + 'REF' + '\t' + 'ALT' + '\n')
return chunk_file, chunk_path
def snps_to_chunks(snps_path, nodes, output_name):
"""
snps_to_chunks: splits snps from a file into even-sized chunks
Args:
snps_path (str): path to snp files
nodes (int): number of nodes/chunks
output_name (str): name of output file
"""
print('Loading SNPS ...')
if nodes == 1:
return [snps_path]
f = open(snps_path, 'r')
snp_count = sum([ bl.count("\n") for bl in blocks(f)]) - 1
chunk_size = int(snp_count/nodes)
current_chunk_number = 0
current_chunk_file, current_chunk_path = new_chunk_file(current_chunk_number, output_name)
chunks_path = [current_chunk_path]
i = 0
prev_row_pos = None
prev_row_chrom = None
snp_reader = csv.DictReader(open(snps_path, 'rU'), delimiter='\t')
for row in snp_reader:
current_row_pos = int(row['POS'])
current_row_chrom = int(row['CHROM'])
if i > chunk_size:
if (prev_row_pos != None and prev_row_chrom != None) and abs(current_row_pos - prev_row_pos) >= stats_params["fragment_length"]*2 or current_row_chrom != prev_row_chrom:
i = 0
current_chunk_file.close()
current_chunk_number += 1
current_chunk_file, current_chunk_path = new_chunk_file(current_chunk_number, output_name)
chunks_path.append(current_chunk_path)
current_chunk_file.write(row['CHROM'] + '\t' + row['POS'] + '\t' + row['REF'] + '\t' + row['ALT'] + '\n')
prev_row_pos = current_row_pos
prev_row_chrom = current_row_chrom
i += 1
current_chunk_file.close()
return chunks_path, snp_count
def get_sample_names(bam_paths):
print('Loading BAMS ...')
bam_reader = csv.DictReader(open(bam_paths, 'rU'), delimiter='\t')
sample_names = []
for row in bam_reader:
print(row['NAME'])
if row['NAME'] != 'BULK':
sample_names.append(row['NAME'])
return sample_names
def progress_bar(nr_snps, queue, bar_width=100):
sys.stdout.write("[{}]".format(" " * bar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (bar_width+1))
counter = 0
prev_progress = 0
while True :
if queue.get() == 'Done':
sys.stdout.write("\n")
break
else:
counter += 1
current_progress = int(counter/nr_snps*bar_width)
if current_progress != prev_progress:
sys.stdout.write("#" * (current_progress - prev_progress))
sys.stdout.flush()
prev_progress = current_progress
def stats(snps_path, bam_paths, reference_path, nodes, output_name):
"""
stats: main function for Stats.py
Args:
snps_path (str): path to snp files
bam_paths (str): path to bam files
reference_path (str): path to reference file
nodes (int): number of nodes/chunks
output_name (str): name of output file
"""
if not os.path.exists("./.conbase"):
os.makedirs("./.conbase")
if not os.path.exists('../results/'):
os.makedirs('../results/')
# TODO: print that remove files
os.system("rm ./.conbase/" + output_name + "_chunk_*")
os.system("rm ./.conbase/" + output_name + "_snp_chunk_*")
sample_names = get_sample_names(bam_paths)
snps_chunks_path, nr_snps = snps_to_chunks(snps_path, nodes, output_name)
nr_chunks = len(snps_chunks_path)
jobs = []
queue = mp.Queue()
for i, snps_chunk_path in enumerate(snps_chunks_path):
p = mp.Process(target=stats_to_json, args=(i, snps_chunk_path, bam_paths, sample_names, reference_path, output_name, queue))
jobs.append(p)
p.start()
p = mp.Process(target=progress_bar, args=(nr_snps, queue))
jobs.append(p)
p.start()
for i, job in enumerate(jobs):
if i == len(jobs) - 1:
queue.put('Done')
job.join()
print('All done')
f = open( '../results/' + output_name + '.json', 'w')
f.write('{' + '"samples":' + json.dumps(sample_names) + '}\n')
f.write('{' + '"stats_params":' + json.dumps(stats_params) + '}\n')
f.close()
for i in range(nr_chunks):
f = './.conbase/' + output_name + '_chunk_' + str(i) + '.json'
os.system('cat '+f+' >> ../results/' + output_name + '.json')
os.system("rm ./.conbase/" + output_name + "_chunk_*")
os.system("rm ./.conbase/" + output_name + "_snp_chunk_*")
|
pc_util.py
|
""" Utility functions for processing point clouds.
Heavily borrowed from pointnet2
Author: Charles R. Qi, Hao Su
Date: November 2016
"""
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
# Point cloud IO
import numpy as np
from plyfile import PlyData, PlyElement
import operator
import matplotlib.pyplot as pyplot
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.neighbors import KDTree
from multiprocessing import Process, Manager
def write_ply_color_multic(points, labels, out_filename):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
N = points.shape[0]
fout = open(out_filename, 'w')
### Write header here
fout.write("ply\n")
fout.write("format ascii 1.0\n")
fout.write("element vertex %d\n" % N)
fout.write("property float x\n")
fout.write("property float y\n")
fout.write("property float z\n")
fout.write("property uchar red\n")
fout.write("property uchar green\n")
fout.write("property uchar blue\n")
fout.write("end_header\n")
for i in range(N):
c = pyplot.cm.hsv(labels[i])
c = [int(x*255) for x in c]
fout.write('%f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
def surface_variant_para(stored, pcndex, pc):
num_neighbour = 10
pca = PCA()
kdt = KDTree(pc, leaf_size=100, metric='euclidean')
### For each point we get the surface variant
hm = np.zeros(pc.shape[0])
idx = kdt.query(pc,k=num_neighbour)[1]
for i in range(len(idx)):
data = pc[idx[i],:]
pca.fit(data)
lambdas = pca.singular_values_
hm[i] = lambdas[2]/float(sum(lambdas))
if np.isnan(hm[i]):
hm[i] = 0
### Normalize the surface variant here
minv = np.min(hm)
maxv = np.max(hm)
if float(maxv - minv) == 0:
stored[pcndex] = np.ones(hm.shape)
else:
stored[pcndex] = (hm-minv)/float(maxv - minv)*0.9+0.1
def sample_multi(pc):
### Do multi-threading here to reduce time
numP = 16
result = []
proc = []
stored = Manager().dict()
chunk = len(pc)//numP
for i in range(numP):
newbdata = pc[i*chunk:(i+1)*chunk,...]
p = Process(target=surface_variant_para, args=(stored, i, newbdata))
p.start()
proc.append(p)
for p in proc:
p.join()
for ndex in sorted(stored.keys()):
result.append(stored[ndex])
result = np.concatenate(result, 0)
return result
### Multi-view to point cloud conversion
def DepthToPointCloud(image, label, pred, intrinsic):
# depth image to point cloud
h, w = image.shape[0], image.shape[1]
ys, xs = np.meshgrid(range(h),range(w),indexing='ij')
vals = image[ys, xs]
labels = label[ys, xs]
preds = pred[ys, xs]
valid = (vals != 0)
ys, xs, vals, labels, preds = ys[valid], xs[valid], vals[valid], labels[valid], preds[valid]
points = np.zeros([len(ys), 3])
points[:,0] = (xs-w/2.0) / intrinsic[0] * vals
points[:,1] = (ys-h/2.0) / intrinsic[0] * vals
points[:,2] = vals
return points, labels, preds
def mv_to_pc(batch_image, batch_label, pred_label, batch_pose, intrinsic):
pcall = []
labelall = []
predall = []
R_base = np.linalg.inv(batch_pose[0])
for i in range(len(batch_image)):
pc, label, pred = DepthToPointCloud(np.squeeze(batch_image[i][:,:,0]), np.squeeze(batch_label[i]), np.squeeze(pred_label[i]), intrinsic)
R = np.matmul(R_base, batch_pose[i])
pc = np.matmul(R[:3,:3], pc.T)+R[:3,3:4]
pcall.append(pc.copy())
labelall.append(label)
predall.append(pred)
pcall = (np.concatenate(pcall, 1)).T
labelall = np.concatenate(labelall, 0)
predall = np.concatenate(predall, 0)
pc2obj(pcall[::100,:].T)
return pcall[::100,:], labelall[::100], predall[::100]
def voting_pc(pc, pred):
pc_smaller = pc[::10,:]
pred_smaller = pred[::10]
pc_reduced = pc[::100,:]
newpc_dict = {i:{} for i in range(pc_reduced.shape[0])}
for i in range(len(pc_smaller)):
if i % 1000 == 0:
print ("done with pc:", i)
dist2 = np.sum((pc_reduced - pc_smaller[i,:])**2, axis=1)
idx = np.argmin(dist2)
if pred_smaller[i] in newpc_dict[idx].keys():
newpc_dict[idx][pred_smaller[i]] += 1
else:
newpc_dict[idx][pred_smaller[i]] = 1
newpred = []
for i in range(len(pc_reduced)):
newpred.append(max(newpc_dict[i].iteritems(), key=operator.itemgetter(1))[0])
return np.array(newpred)
def mv_to_pc_voting(batch_image, batch_label, pred_label, batch_pose, intrinsic):
pcall = []
labelall = []
predall = []
R_base = np.linalg.inv(batch_pose[0])
for i in range(len(batch_image)):
pc, label, pred = DepthToPointCloud(np.squeeze(batch_image[i][:,:,0]), np.squeeze(batch_label[i]), np.squeeze(pred_label[i]), intrinsic)
R = np.matmul(R_base, batch_pose[i])
pc = np.matmul(R[:3,:3], pc.T)+R[:3,3:4]
pcall.append(pc.copy())
labelall.append(label)
predall.append(pred)
pcall = (np.concatenate(pcall, 1)).T
labelall = np.concatenate(labelall, 0)
predall = np.concatenate(predall, 0)
pred_reduced = voting_pc(pcall, predall)
pc2obj(pcall[::100,:].T)
return pcall[::100,:], labelall[::100], pred_reduced
def pc2obj(pc, filepath='test.obj'):
nverts = pc.shape[1]
with open(filepath, 'w') as f:
f.write("# OBJ file\n")
for v in range(nverts):
f.write("v %.4f %.4f %.4f\n" % (pc[0,v],pc[1,v],pc[2,v]))
# ----------------------------------------
# Point Cloud/Volume Conversions
# ----------------------------------------
def point_cloud_label_to_surface_voxel_label(point_cloud, label, res=0.0484):
coordmax = np.max(point_cloud,axis=0)
coordmin = np.min(point_cloud,axis=0)
nvox = np.ceil((coordmax-coordmin)/res)
vidx = np.ceil((point_cloud-coordmin)/res)
vidx = vidx[:,0]+vidx[:,1]*nvox[0]+vidx[:,2]*nvox[0]*nvox[1]
uvidx = np.unique(vidx)
if label.ndim==1:
uvlabel = [np.argmax(np.bincount(label[vidx==uv].astype(np.uint32))) for uv in uvidx]
else:
assert(label.ndim==2)
uvlabel = np.zeros(len(uvidx),label.shape[1])
for i in range(label.shape[1]):
uvlabel[:,i] = np.array([np.argmax(np.bincount(label[vidx==uv,i].astype(np.uint32))) for uv in uvidx])
return uvidx, uvlabel, nvox
def point_cloud_label_to_surface_voxel_label_fast(point_cloud, label, res=0.0484):
coordmax = np.max(point_cloud,axis=0)
coordmin = np.min(point_cloud,axis=0)
nvox = np.ceil((coordmax-coordmin)/res)
vidx = np.ceil((point_cloud-coordmin)/res)
vidx = vidx[:,0]+vidx[:,1]*nvox[0]+vidx[:,2]*nvox[0]*nvox[1]
uvidx, vpidx = np.unique(vidx,return_index=True)
if label.ndim==1:
uvlabel = label[vpidx]
else:
assert(label.ndim==2)
uvlabel = label[vpidx,:]
return uvidx, uvlabel, nvox
def point_cloud_to_volume_batch(point_clouds, vsize=12, radius=1.1, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume(np.squeeze(point_clouds[b,:,:]), vsize, radius)
if flatten:
vol_list.append(vol.flatten())
else:
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
if flatten:
return np.vstack(vol_list)
else:
return np.concatenate(vol_list, 0)
def point_cloud_label_to_volume_batch(point_clouds, labels, weights, vsize=12, radius=1.1, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
label_list = []
weight_list = []
for b in range(point_clouds.shape[0]):
vol, label, weight = point_cloud_label_to_volume(np.squeeze(point_clouds[b,:,:]), np.squeeze(labels[b,:]), np.squeeze(weights[b,:]), vsize, radius)
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
label_list.append(np.expand_dims(label, 0))
weight_list.append(np.expand_dims(weight, 0))
return np.concatenate(vol_list, 0), np.concatenate(label_list, 0), np.concatenate(weight_list, 0)
def point_cloud_label_to_volume_batch_exact(point_clouds, vsize=12, radius=1.1, flatten=True):
""" Input is BxNx3 batch of point cloud
Output is Bx(vsize^3)
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (np.squeeze(point_clouds[b,:,:]) + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0))
return np.concatenate(vol_list, 0)
def point_cloud_label_to_volume(points, label, weight, vsize, radius=1.1):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
la = np.zeros((vsize,vsize,vsize))
we = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
la[locations[:,0],locations[:,1],locations[:,2]] = label
we[locations[:,0],locations[:,1],locations[:,2]] = weight
return vol, la, we
def point_cloud_to_volume(points, vsize, radius=1.0):
""" input is Nx3 points.
output is vsize*vsize*vsize
assumes points are in range [-radius, radius]
"""
vol = np.zeros((vsize,vsize,vsize))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
vol[locations[:,0],locations[:,1],locations[:,2]] = 1.0
return vol
def volume_topc_batch(pred_val, batch_label_vol, batch_smpw_vol):
bsize = pred_val.shape[0]
pred_pc = []
label_pc = []
smpw_pc = []
other_pc = []
aug_data = []
vsize = pred_val.shape[1]
for i in range(bsize):
points = []
points_label = []
points_smpw = []
points_other = []
points_aug = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if batch_label_vol[i,a,b,c] > 0:
points.append(pred_val[i,a,b,c])
points_label.append(batch_label_vol[i,a,b,c])
points_smpw.append(batch_smpw_vol[i,a,b,c])
points_aug.append(np.array([a,b,c]))
elif batch_label_vol[i,a,b,c] == 0 and batch_smpw_vol[i,a,b,c] > 0:
points_other.append(pred_val[i,a,b,c])
if len(points) == 0:
continue
pred_pc.append(np.array(points))
label_pc.append(np.array(points_label))
smpw_pc.append(np.array(points_smpw))
other_pc.append(np.array(points_other))
aug_data.append(np.array(points_aug))
return pred_pc, label_pc, smpw_pc, other_pc, aug_data
def volume_topc_batch_exact(pred_val, batch_data, radius=1.1, vsize=32):
bsize = pred_val.shape[0]
pred_pc = []
label_pc = []
for i in range(bsize):
cur_data = batch_data[i,:,:]
cur_val = pred_val[i,:,:,:]
voxel = 2*radius/float(vsize)
cur_data = (np.squeeze(cur_data) + radius)/voxel
cur_data = cur_data.astype(int)
points_label = cur_val[cur_data[:,0], cur_data[:,1], cur_data[:,2]]
pred_pc.append(points_label)
return np.array(pred_pc)
def volume_to_point_cloud(vol):
""" vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize
return Nx3 numpy array.
"""
vsize = vol.shape[0]
assert(vol.shape[1] == vsize and vol.shape[1] == vsize)
points = []
for a in range(vsize):
for b in range(vsize):
for c in range(vsize):
if vol[a,b,c] == 1:
points.append(np.array([a,b,c]))
if len(points) == 0:
return np.zeros((0,3))
points = np.vstack(points)
return points
def point_cloud_to_volume_v2_batch(point_clouds, vsize=12, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxVxVxVxnum_samplex3
Added on Feb 19
"""
vol_list = []
for b in range(point_clouds.shape[0]):
vol = point_cloud_to_volume_v2(point_clouds[b,:,:], vsize, radius, num_sample)
vol_list.append(np.expand_dims(vol, 0))
return np.concatenate(vol_list, 0)
def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is vsize*vsize*vsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each voxel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
vol = np.zeros((vsize,vsize,vsize,num_sample,3))
voxel = 2*radius/float(vsize)
locations = (points + radius)/voxel
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(vsize):
for j in range(vsize):
for k in range(vsize):
if (i,j,k) not in loc2pc:
vol[i,j,k,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j,k)] # a list of (3,) arrays
pc = np.vstack(pc) # kx3
# Sample/pad to num_sample points
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
# Normalize
pc_center = (np.array([i,j,k])+0.5)*voxel - radius
#print 'pc center: ', pc_center
pc = (pc - pc_center) / voxel # shift and scale
vol[i,j,k,:,:] = pc
#print (i,j,k), vol[i,j,k,:,:]
return vol
def point_cloud_to_image_batch(point_clouds, imgsize, radius=1.0, num_sample=128):
""" Input is BxNx3 a batch of point cloud
Output is BxIxIxnum_samplex3
Added on Feb 19
"""
img_list = []
for b in range(point_clouds.shape[0]):
img = point_cloud_to_image(point_clouds[b,:,:], imgsize, radius, num_sample)
img_list.append(np.expand_dims(img, 0))
return np.concatenate(img_list, 0)
def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128):
""" input is Nx3 points
output is imgsize*imgsize*num_sample*3
assumes points are in range [-radius, radius]
samples num_sample points in each pixel, if there are less than
num_sample points, replicate the points
Added on Feb 19
"""
img = np.zeros((imgsize, imgsize, num_sample, 3))
pixel = 2*radius/float(imgsize)
locations = (points[:,0:2] + radius)/pixel # Nx2
locations = locations.astype(int)
loc2pc = {}
for n in range(points.shape[0]):
loc = tuple(locations[n,:])
if loc not in loc2pc:
loc2pc[loc] = []
loc2pc[loc].append(points[n,:])
for i in range(imgsize):
for j in range(imgsize):
if (i,j) not in loc2pc:
img[i,j,:,:] = np.zeros((num_sample,3))
else:
pc = loc2pc[(i,j)]
pc = np.vstack(pc)
if pc.shape[0]>num_sample:
choices = np.random.choice(pc.shape[0], num_sample, replace=False)
pc = pc[choices,:]
elif pc.shape[0]<num_sample:
pc = np.lib.pad(pc, ((0,num_sample-pc.shape[0]),(0,0)), 'edge')
pc_center = (np.array([i,j])+0.5)*pixel - radius
pc[:,0:2] = (pc[:,0:2] - pc_center)/pixel
img[i,j,:,:] = pc
return img
# ----------------------------------------
# Point cloud IO
# ----------------------------------------
def read_ply(filename):
""" read XYZ point cloud from filename PLY file """
plydata = PlyData.read(filename)
pc = plydata['vertex'].data
pc_array = np.array([[x, y, z] for x,y,z in pc])
return pc_array
def write_ply(points, filename, text=True):
""" input: Nx3, write points to filename as PLY format. """
points = [(points[i,0], points[i,1], points[i,2]) for i in range(points.shape[0])]
vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'),('z', 'f4')])
el = PlyElement.describe(vertex, 'vertex', comments=['vertices'])
PlyData([el], text=text).write(filename)
# ----------------------------------------
# Simple Point cloud and Volume Renderers
# ----------------------------------------
def draw_point_cloud(input_points, canvasSize=500, space=200, diameter=25,
xrot=0, yrot=0, zrot=0, switch_xyz=[0,1,2], normalize=True):
""" Render point cloud to image with alpha channel.
Input:
points: Nx3 numpy array (+y is up direction)
Output:
gray image as numpy array of size canvasSizexcanvasSize
"""
image = np.zeros((canvasSize, canvasSize))
if input_points is None or input_points.shape[0] == 0:
return image
points = input_points[:, switch_xyz]
M = euler2mat(zrot, yrot, xrot)
points = (np.dot(M, points.transpose())).transpose()
# Normalize the point cloud
# We normalize scale to fit points in a unit sphere
if normalize:
centroid = np.mean(points, axis=0)
points -= centroid
furthest_distance = np.max(np.sqrt(np.sum(abs(points)**2,axis=-1)))
points /= furthest_distance
# Pre-compute the Gaussian disk
radius = (diameter-1)/2.0
disk = np.zeros((diameter, diameter))
for i in range(diameter):
for j in range(diameter):
if (i - radius) * (i-radius) + (j-radius) * (j-radius) <= radius * radius:
disk[i, j] = np.exp((-(i-radius)**2 - (j-radius)**2)/(radius**2))
mask = np.argwhere(disk > 0)
dx = mask[:, 0]
dy = mask[:, 1]
dv = disk[disk > 0]
# Order points by z-buffer
zorder = np.argsort(points[:, 2])
points = points[zorder, :]
points[:, 2] = (points[:, 2] - np.min(points[:, 2])) / (np.max(points[:, 2] - np.min(points[:, 2])))
max_depth = np.max(points[:, 2])
for i in range(points.shape[0]):
j = points.shape[0] - i - 1
x = points[j, 0]
y = points[j, 1]
xc = canvasSize/2 + (x*space)
yc = canvasSize/2 + (y*space)
xc = int(np.round(xc))
yc = int(np.round(yc))
px = dx + xc
py = dy + yc
image[px, py] = image[px, py] * 0.7 + dv * (max_depth - points[j, 2]) * 0.3
image = image / np.max(image)
return image
def point_cloud_three_views(points):
""" input points Nx3 numpy array (+y is up direction).
return an numpy array gray image of size 500x1500. """
# +y is up direction
# xrot is azimuth
# yrot is in-plane
# zrot is elevation
img1 = draw_point_cloud(points, zrot=110/180.0*np.pi, xrot=45/180.0*np.pi, yrot=0/180.0*np.pi)
img2 = draw_point_cloud(points, zrot=70/180.0*np.pi, xrot=135/180.0*np.pi, yrot=0/180.0*np.pi)
img3 = draw_point_cloud(points, zrot=180.0/180.0*np.pi, xrot=90/180.0*np.pi, yrot=0/180.0*np.pi)
image_large = np.concatenate([img1, img2, img3], 1)
return image_large
def point_cloud_three_views_demo():
""" Demo for draw_point_cloud function """
from PIL import Image
points = read_ply('../third_party/mesh_sampling/piano.ply')
im_array = point_cloud_three_views(points)
img = Image.fromarray(np.uint8(im_array*255.0))
img.save('piano.jpg')
if __name__=="__main__":
point_cloud_three_views_demo()
def pyplot_draw_point_cloud(points, output_filename):
""" points is a Nx3 numpy array """
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(points[:,0], points[:,1], points[:,2])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
#savefig(output_filename)
def pyplot_draw_volume(vol, output_filename):
""" vol is of size vsize*vsize*vsize
output an image to output_filename
"""
points = volume_to_point_cloud(vol)
pyplot_draw_point_cloud(points, output_filename)
def write_ply_color(points, labels, out_filename, num_classes=None):
""" Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ file """
import matplotlib.pyplot as pyplot
labels = labels.astype(int)
N = points.shape[0]
if num_classes is None:
num_classes = np.max(labels)+1
else:
assert(num_classes>np.max(labels))
fout = open(out_filename, 'w')
colors = [pyplot.cm.hsv(i/float(num_classes)) for i in range(num_classes)]
for i in range(N):
c = colors[labels[i]]
c = [int(x*255) for x in c]
fout.write('v %f %f %f %d %d %d\n' % (points[i,0],points[i,1],points[i,2],c[0],c[1],c[2]))
fout.close()
|
LLRPConnector.py
|
#!/usr/bin/env python
import six
from six.moves.queue import Queue, Empty
import socket
import datetime
import threading
from .pyllrp import *
class LLRPConnector( object ):
#--------------------------------------------------------------------------
#
# A simple LLRP reader connection manager.
#
# Supports connecting to the reader, transacting commands, message handlers,
# and asynchronous/synchronous monitoring of the reader socket.
#
def __init__( self ):
self.TimeoutSecs = 6 # Time for the reader to respond.
self._reset()
self.handlers = {}
def _reset( self ):
''' Reset all internal fields. '''
self.host = None
self.port = None
self.readerSocket = None
self.thread = None
self.shutdownQ = None # Used to shutdown the thread.
self.keepGoing = False
self.timeCorrection = None
def connect( self, host, port = 5084 ):
''' Connect to a reader. '''
self._reset()
self.readerSocket = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
# Set a timeout for the socket. This is also the maximum time it will take to shut down the listener.
self.readerSocket.settimeout( self.TimeoutSecs )
self.readerSocket.connect( (host, port) )
self.host = host
self.port = port
# Expecting READER_EVENT_NOTIFICATION message.
response = UnpackMessageFromSocket( self.readerSocket )
tNow = datetime.datetime.now() # Get the time here to minimize latency.
# Check if the connection succeeded.
connectionAttemptEvent = response.getFirstParameterByClass(ConnectionAttemptEvent_Parameter)
if connectionAttemptEvent and connectionAttemptEvent.Status != ConnectionAttemptStatusType.Success:
self.disconnect()
raise EnvironmentError(
connectionAttemptEvent.Status,
ConnectionAttemptStatusType.getName(connectionAttemptEvent.Status).replace('_',' ')
)
self.keepGoing = True
# Compute a correction between the reader's time and the computer's time.
self.timeCorrection = None
try:
microseconds = response.getFirstParameterByClass(UTCTimestamp_Parameter).Microseconds
readerTime = datetime.datetime.utcfromtimestamp( microseconds / 1000000.0 )
self.timeCorrection = tNow - readerTime
except Exception as e:
self.disconnect()
raise ValueError('Missing Timestamp: ' + response.__repr__())
return response
def tagTimeToComputerTime( self, tagTime ):
# Time is in microseconds from Jan 1, 1970.
return datetime.datetime.utcfromtimestamp( tagTime / 1000000.0 ) + self.timeCorrection
def disconnect( self ):
''' Disconnect from a reader. Also stops the listener. '''
self.timeCorrection = None
if not self.readerSocket:
return None
if self.isListening():
self.stopListener()
# Send the reader a disconnect message.
response = None
try:
response = self.transact( CLOSE_CONNECTION_Message() )
except:
pass
self.readerSocket.close()
self.readerSocket = None
return response
def addHandler( self, messageClass, handlerFunc ):
''' Add a handler for a specific message type. '''
''' Support multiple handlers for the same message type. '''
self.handlers.setdefault( messageClass, [] ).append( handlerFunc )
def removeHandler( self, messageClass, handlerFunc = None ):
''' Remove a handler for a specific message type from the reader.
If handlerFunc is None, all handlers for the given messageClass will be removed.
If handlerFunc is not None, only the specific handler is removed.
'''
if handlerFunc is None:
try:
del self.handlers[messageClass]
except KeyError:
pass
else:
while 1:
try:
self.handlers[messageClass].remove( handlerFunc )
except (KeyError, ValueError):
break
def removeAllHandlers( self ):
self.handlers = {}
def transact( self, message ):
''' Send a message to the reader and wait for the response. '''
assert not self.isListening(), 'Cannot perform transact() while listen thread is active. Stop it first with stopListener().'
message.send( self.readerSocket )
response = WaitForMessage( message.MessageID, self.readerSocket, self.callHandler )
return response
def send( self, message ):
''' Send a message to the reader. '''
message.send( self.readerSocket )
def checkKeepGoing( self ):
''' Check if we should continue the reader thread. '''
if not self.keepGoing:
return False
try:
# Check the shutdown queue for a message. If there is one, shutdown.
d = self.shutdownQ.get( False )
self.shutdownQ.task_done()
self.keepGoing = False
return False
except (AttributeError, Empty):
return True
def callHandler( self, message ):
''' Call all the handlers for this message. '''
for cb in (self.handlers.get(message.__class__, None) or self.handlers.get('default', [])):
cb( self, message )
def listen( self ):
''' Listen for messages from the reader. '''
# Calling this by itself will block.
# Recommended usage is to use startListener and stopListener.
while self.checkKeepGoing():
try:
response = UnpackMessageFromSocket( self.readerSocket )
except socket.timeout:
continue
self.callHandler( response )
def startListener( self ):
''' Starts a thread to listen to the reader. '''
assert self.readerSocket, 'Cannot start listener without a successful connection.'
assert not self.thread, 'Listener is already running. Stop it first with stopListener().'
self.shutdownQ = Queue()
self.keepGoing = True
self.thread = threading.Thread( target = self.listen, name='LLRP Listener' )
self.thread.daemon = True
self.thread.start()
def stopListener( self ):
''' Stops the thread listening to the reader. '''
self.shutdownQ.put( 'Shutdown' )
self.thread.join() # Wait for the thread to terminate.
self.shutdownQ = None
self.thread = None
def isListening( self ):
return self.thread and self.thread.is_alive()
|
test_streaming_pull_manager.py
|
# Copyright 2018, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import threading
import time
import types as stdlib_types
import mock
import pytest
from google.api_core import bidi
from google.api_core import exceptions
from google.cloud.pubsub_v1 import types
from google.cloud.pubsub_v1.subscriber import client
from google.cloud.pubsub_v1.subscriber import message
from google.cloud.pubsub_v1.subscriber import scheduler
from google.cloud.pubsub_v1.subscriber._protocol import dispatcher
from google.cloud.pubsub_v1.subscriber._protocol import heartbeater
from google.cloud.pubsub_v1.subscriber._protocol import leaser
from google.cloud.pubsub_v1.subscriber._protocol import messages_on_hold
from google.cloud.pubsub_v1.subscriber._protocol import requests
from google.cloud.pubsub_v1.subscriber._protocol import streaming_pull_manager
from google.pubsub_v1 import types as gapic_types
import grpc
@pytest.mark.parametrize(
"exception,expected_cls",
[
(ValueError("meep"), ValueError),
(
mock.create_autospec(grpc.RpcError, instance=True),
exceptions.GoogleAPICallError,
),
({"error": "RPC terminated"}, Exception),
("something broke", Exception),
],
)
def test__wrap_as_exception(exception, expected_cls):
assert isinstance(
streaming_pull_manager._wrap_as_exception(exception), expected_cls
)
def test__wrap_callback_errors_no_error():
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock()
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
callback.assert_called_once_with(msg)
msg.nack.assert_not_called()
on_callback_error.assert_not_called()
def test__wrap_callback_errors_error():
callback_error = ValueError("meep")
msg = mock.create_autospec(message.Message, instance=True)
callback = mock.Mock(side_effect=callback_error)
on_callback_error = mock.Mock()
streaming_pull_manager._wrap_callback_errors(callback, on_callback_error, msg)
msg.nack.assert_called_once()
on_callback_error.assert_called_once_with(callback_error)
def test_constructor_and_default_state():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client, mock.sentinel.subscription
)
# Public state
assert manager.is_active is False
assert manager.flow_control == types.FlowControl()
assert manager.dispatcher is None
assert manager.leaser is None
assert manager.ack_histogram is not None
assert manager.ack_deadline == 10
assert manager.load == 0
# Private state
assert manager._client == mock.sentinel.client
assert manager._subscription == mock.sentinel.subscription
assert manager._scheduler is not None
assert manager._messages_on_hold is not None
assert manager._client_id is not None
def test_constructor_with_options():
manager = streaming_pull_manager.StreamingPullManager(
mock.sentinel.client,
mock.sentinel.subscription,
flow_control=mock.sentinel.flow_control,
scheduler=mock.sentinel.scheduler,
)
assert manager.flow_control == mock.sentinel.flow_control
assert manager._scheduler == mock.sentinel.scheduler
def make_manager(**kwargs):
client_ = mock.create_autospec(client.Client, instance=True)
scheduler_ = mock.create_autospec(scheduler.Scheduler, instance=True)
return streaming_pull_manager.StreamingPullManager(
client_, "subscription-name", scheduler=scheduler_, **kwargs
)
def fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10):
"""Add a simplified fake add() method to a leaser instance.
The fake add() method actually increases the leaser's internal message count
by one for each message, and the total bytes by ``assumed_msg_size`` for
each message (regardless of the actual message size).
"""
def fake_add(self, items):
self.message_count += len(items)
self.bytes += len(items) * assumed_msg_size
leaser.message_count = init_msg_count
leaser.bytes = init_msg_count * assumed_msg_size
leaser.add = stdlib_types.MethodType(fake_add, leaser)
def test__obtain_ack_deadline_no_custom_flow_control_setting():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
# Make sure that max_duration_per_lease_extension is disabled.
manager._flow_control = types.FlowControl(max_duration_per_lease_extension=0)
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE
# When we get some historical data, the deadline is adjusted.
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE * 2)
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE * 2
# Adding just a single additional data point does not yet change the deadline.
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE)
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE * 2
def test__obtain_ack_deadline_with_max_duration_per_lease_extension():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
manager._flow_control = types.FlowControl(
max_duration_per_lease_extension=histogram.MIN_ACK_DEADLINE + 1
)
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE * 3) # make p99 value large
# The deadline configured in flow control should prevail.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE + 1
def test__obtain_ack_deadline_with_max_duration_per_lease_extension_too_low():
from google.cloud.pubsub_v1.subscriber._protocol import histogram
manager = make_manager()
manager._flow_control = types.FlowControl(
max_duration_per_lease_extension=histogram.MIN_ACK_DEADLINE - 1
)
manager.ack_histogram.add(histogram.MIN_ACK_DEADLINE * 3) # make p99 value large
# The deadline configured in flow control should be adjusted to the minimum allowed.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == histogram.MIN_ACK_DEADLINE
def test__obtain_ack_deadline_no_value_update():
manager = make_manager()
# Make sure that max_duration_per_lease_extension is disabled.
manager._flow_control = types.FlowControl(max_duration_per_lease_extension=0)
manager.ack_histogram.add(21)
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert deadline == 21
for _ in range(5):
manager.ack_histogram.add(35) # Gather some new ACK data.
deadline = manager._obtain_ack_deadline(maybe_update=False)
assert deadline == 21 # still the same
# Accessing the value through the ack_deadline property has no side effects either.
assert manager.ack_deadline == 21
# Updating the ack deadline is reflected on ack_deadline wrapper, too.
deadline = manager._obtain_ack_deadline(maybe_update=True)
assert manager.ack_deadline == deadline == 35
def test_client_id():
manager1 = make_manager()
request1 = manager1._get_initial_request(stream_ack_deadline_seconds=10)
client_id_1 = request1.client_id
assert client_id_1
manager2 = make_manager()
request2 = manager2._get_initial_request(stream_ack_deadline_seconds=10)
client_id_2 = request2.client_id
assert client_id_2
assert client_id_1 != client_id_2
def test_streaming_flow_control():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
request = manager._get_initial_request(stream_ack_deadline_seconds=10)
assert request.max_outstanding_messages == 10
assert request.max_outstanding_bytes == 1000
def test_streaming_flow_control_use_legacy_flow_control():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000),
use_legacy_flow_control=True,
)
request = manager._get_initial_request(stream_ack_deadline_seconds=10)
assert request.max_outstanding_messages == 0
assert request.max_outstanding_bytes == 0
def test_maybe_pause_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_pause_consumer() # no raise
# Ensure load > 1
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 100
_leaser.bytes = 10000
manager.maybe_pause_consumer() # no raise
def test_lease_load_and_pause():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# This should mean that our messages count is at 10%, and our bytes
# are at 15%; load should return the higher (0.15), and shouldn't cause
# the consumer to pause.
manager.leaser.add(
[requests.LeaseRequest(ack_id="one", byte_size=150, ordering_key="")]
)
assert manager.load == 0.15
manager.maybe_pause_consumer()
manager._consumer.pause.assert_not_called()
# After this message is added, the messages should be higher at 20%
# (versus 16% for bytes).
manager.leaser.add(
[requests.LeaseRequest(ack_id="two", byte_size=10, ordering_key="")]
)
assert manager.load == 0.2
# Returning a number above 100% is fine, and it should cause this to pause.
manager.leaser.add(
[requests.LeaseRequest(ack_id="three", byte_size=1000, ordering_key="")]
)
assert manager.load == 1.16
manager.maybe_pause_consumer()
manager._consumer.pause.assert_called_once()
def test_drop_and_resume():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._leaser = leaser.Leaser(manager)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = True
# Add several messages until we're over the load threshold.
manager.leaser.add(
[
requests.LeaseRequest(ack_id="one", byte_size=750, ordering_key=""),
requests.LeaseRequest(ack_id="two", byte_size=250, ordering_key=""),
]
)
assert manager.load == 1.0
# Trying to resume now should have no effect as we're over the threshold.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
# Drop the 200 byte message, which should put us under the resume
# threshold.
manager.leaser.remove(
[requests.DropRequest(ack_id="two", byte_size=250, ordering_key="")]
)
manager.maybe_resume_consumer()
manager._consumer.resume.assert_called_once()
def test_resume_not_paused():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_paused = False
# Resuming should have no effect is the consumer is not actually paused.
manager.maybe_resume_consumer()
manager._consumer.resume.assert_not_called()
def test_maybe_resume_consumer_wo_consumer_set():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager.maybe_resume_consumer() # no raise
def test__maybe_release_messages_on_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=11)
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = msg.size
# Ensure load is exactly 1.0 (to verify that >= condition is used)
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 10
_leaser.bytes = 1000 + msg.size
manager._maybe_release_messages()
assert manager._messages_on_hold.size == 1
manager._leaser.add.assert_not_called()
manager._scheduler.schedule.assert_not_called()
def test__maybe_release_messages_below_overload():
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._callback = mock.sentinel.callback
# Init leaser message count to 11, so that when subtracting the 3 messages
# that are on hold, there is still room for another 2 messages before the
# max load is hit.
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
fake_leaser_add(_leaser, init_msg_count=11, assumed_msg_size=10)
messages = [
mock.create_autospec(message.Message, instance=True, ack_id="ack_foo", size=10),
mock.create_autospec(message.Message, instance=True, ack_id="ack_bar", size=10),
mock.create_autospec(message.Message, instance=True, ack_id="ack_baz", size=10),
]
for msg in messages:
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = 3 * 10
# the actual call of MUT
manager._maybe_release_messages()
assert manager._messages_on_hold.size == 1
msg = manager._messages_on_hold.get()
assert msg.ack_id == "ack_baz"
schedule_calls = manager._scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for _, call_args, _ in schedule_calls:
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].ack_id in ("ack_foo", "ack_bar")
def test__maybe_release_messages_negative_on_hold_bytes_warning(caplog):
manager = make_manager(
flow_control=types.FlowControl(max_messages=10, max_bytes=1000)
)
manager._callback = lambda msg: msg # pragma: NO COVER
msg = mock.create_autospec(message.Message, instance=True, ack_id="ack", size=17)
manager._messages_on_hold.put(msg)
manager._on_hold_bytes = 5 # too low for some reason
_leaser = manager._leaser = mock.create_autospec(leaser.Leaser)
_leaser.message_count = 3
_leaser.bytes = 150
with caplog.at_level(logging.WARNING):
manager._maybe_release_messages()
expected_warnings = [
record.message.lower()
for record in caplog.records
if "unexpectedly negative" in record.message
]
assert len(expected_warnings) == 1
assert "on hold bytes" in expected_warnings[0]
assert "-12" in expected_warnings[0]
assert manager._on_hold_bytes == 0 # should be auto-corrected
def test_send_unary():
manager = make_manager()
manager.send(
gapic_types.StreamingPullRequest(
ack_ids=["ack_id1", "ack_id2"],
modify_deadline_ack_ids=["ack_id3", "ack_id4", "ack_id5"],
modify_deadline_seconds=[10, 20, 20],
)
)
manager._client.acknowledge.assert_called_once_with(
subscription=manager._subscription, ack_ids=["ack_id1", "ack_id2"]
)
manager._client.modify_ack_deadline.assert_has_calls(
[
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id3"],
ack_deadline_seconds=10,
),
mock.call(
subscription=manager._subscription,
ack_ids=["ack_id4", "ack_id5"],
ack_deadline_seconds=20,
),
],
any_order=True,
)
def test_send_unary_empty():
manager = make_manager()
manager.send(gapic_types.StreamingPullRequest())
manager._client.acknowledge.assert_not_called()
manager._client.modify_ack_deadline.assert_not_called()
def test_send_unary_api_call_error(caplog):
caplog.set_level(logging.DEBUG)
manager = make_manager()
error = exceptions.GoogleAPICallError("The front fell off")
manager._client.acknowledge.side_effect = error
manager.send(gapic_types.StreamingPullRequest(ack_ids=["ack_id1", "ack_id2"]))
assert "The front fell off" in caplog.text
def test_send_unary_retry_error(caplog):
caplog.set_level(logging.DEBUG)
manager, _, _, _, _, _ = make_running_manager()
error = exceptions.RetryError(
"Too long a transient error", cause=Exception("Out of time!")
)
manager._client.acknowledge.side_effect = error
with pytest.raises(exceptions.RetryError):
manager.send(gapic_types.StreamingPullRequest(ack_ids=["ack_id1", "ack_id2"]))
assert "RetryError while sending unary RPC" in caplog.text
assert "signaled streaming pull manager shutdown" in caplog.text
def test_heartbeat():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = True
result = manager.heartbeat()
manager._rpc.send.assert_called_once_with(gapic_types.StreamingPullRequest())
assert result
def test_heartbeat_inactive():
manager = make_manager()
manager._rpc = mock.create_autospec(bidi.BidiRpc, instance=True)
manager._rpc.is_active = False
manager.heartbeat()
result = manager._rpc.send.assert_not_called()
assert not result
@mock.patch("google.api_core.bidi.ResumableBidiRpc", autospec=True)
@mock.patch("google.api_core.bidi.BackgroundConsumer", autospec=True)
@mock.patch("google.cloud.pubsub_v1.subscriber._protocol.leaser.Leaser", autospec=True)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.dispatcher.Dispatcher", autospec=True
)
@mock.patch(
"google.cloud.pubsub_v1.subscriber._protocol.heartbeater.Heartbeater", autospec=True
)
def test_open(heartbeater, dispatcher, leaser, background_consumer, resumable_bidi_rpc):
manager = make_manager()
with mock.patch.object(
type(manager), "ack_deadline", new=mock.PropertyMock(return_value=18)
):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
heartbeater.assert_called_once_with(manager)
heartbeater.return_value.start.assert_called_once()
assert manager._heartbeater == heartbeater.return_value
dispatcher.assert_called_once_with(manager, manager._scheduler.queue)
dispatcher.return_value.start.assert_called_once()
assert manager._dispatcher == dispatcher.return_value
leaser.assert_called_once_with(manager)
leaser.return_value.start.assert_called_once()
assert manager.leaser == leaser.return_value
background_consumer.assert_called_once_with(manager._rpc, manager._on_response)
background_consumer.return_value.start.assert_called_once()
assert manager._consumer == background_consumer.return_value
resumable_bidi_rpc.assert_called_once_with(
start_rpc=manager._client.streaming_pull,
initial_request=mock.ANY,
should_recover=manager._should_recover,
should_terminate=manager._should_terminate,
throttle_reopen=True,
)
initial_request_arg = resumable_bidi_rpc.call_args.kwargs["initial_request"]
assert initial_request_arg.func == manager._get_initial_request
assert initial_request_arg.args[0] == 18
assert not manager._client.get_subscription.called
resumable_bidi_rpc.return_value.add_done_callback.assert_called_once_with(
manager._on_rpc_done
)
assert manager._rpc == resumable_bidi_rpc.return_value
manager._consumer.is_active = True
assert manager.is_active is True
def test_open_already_active():
manager = make_manager()
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
with pytest.raises(ValueError, match="already open"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def test_open_has_been_closed():
manager = make_manager()
manager._closed = True
with pytest.raises(ValueError, match="closed"):
manager.open(mock.sentinel.callback, mock.sentinel.on_callback_error)
def make_running_manager(**kwargs):
manager = make_manager(**kwargs)
manager._consumer = mock.create_autospec(bidi.BackgroundConsumer, instance=True)
manager._consumer.is_active = True
manager._dispatcher = mock.create_autospec(dispatcher.Dispatcher, instance=True)
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._heartbeater = mock.create_autospec(heartbeater.Heartbeater, instance=True)
return (
manager,
manager._consumer,
manager._dispatcher,
manager._leaser,
manager._heartbeater,
manager._scheduler,
)
def await_manager_shutdown(manager, timeout=None):
# NOTE: This method should be called after manager.close(), i.e. after the shutdown
# thread has been created and started.
shutdown_thread = manager._regular_shutdown_thread
if shutdown_thread is None: # pragma: NO COVER
raise Exception("Shutdown thread does not exist on the manager instance.")
shutdown_thread.join(timeout=timeout)
if shutdown_thread.is_alive(): # pragma: NO COVER
pytest.fail("Shutdown not completed in time.")
def test_close():
(
manager,
consumer,
dispatcher,
leaser,
heartbeater,
scheduler,
) = make_running_manager()
manager.close()
await_manager_shutdown(manager, timeout=3)
consumer.stop.assert_called_once()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
assert manager.is_active is False
def test_close_inactive_consumer():
(
manager,
consumer,
dispatcher,
leaser,
heartbeater,
scheduler,
) = make_running_manager()
consumer.is_active = False
manager.close()
await_manager_shutdown(manager, timeout=3)
consumer.stop.assert_not_called()
leaser.stop.assert_called_once()
dispatcher.stop.assert_called_once()
heartbeater.stop.assert_called_once()
scheduler.shutdown.assert_called_once()
def test_close_idempotent():
manager, _, _, _, _, scheduler = make_running_manager()
manager.close()
manager.close()
await_manager_shutdown(manager, timeout=3)
assert scheduler.shutdown.call_count == 1
class FakeDispatcher(object):
def __init__(self, manager, error_callback):
self._manager = manager
self._error_callback = error_callback
self._thread = None
self._stop = False
def start(self):
self._thread = threading.Thread(target=self._do_work)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._stop = True
self._thread.join()
self._thread = None
def _do_work(self):
while not self._stop:
try:
self._manager.leaser.add([mock.Mock()])
except Exception as exc: # pragma: NO COVER
self._error_callback(exc)
time.sleep(0.1)
# also try to interact with the leaser after the stop flag has been set
try:
self._manager.leaser.remove([mock.Mock()])
except Exception as exc: # pragma: NO COVER
self._error_callback(exc)
def test_close_no_dispatcher_error():
manager, _, _, _, _, _ = make_running_manager()
error_callback = mock.Mock(name="error_callback")
dispatcher = FakeDispatcher(manager=manager, error_callback=error_callback)
manager._dispatcher = dispatcher
dispatcher.start()
manager.close()
await_manager_shutdown(manager, timeout=3)
error_callback.assert_not_called()
def test_close_callbacks():
manager, _, _, _, _, _ = make_running_manager()
callback = mock.Mock()
manager.add_close_callback(callback)
manager.close(reason="meep")
await_manager_shutdown(manager, timeout=3)
callback.assert_called_once_with(manager, "meep")
def test_close_blocking_scheduler_shutdown():
manager, _, _, _, _, _ = make_running_manager(await_callbacks_on_shutdown=True)
scheduler = manager._scheduler
manager.close()
await_manager_shutdown(manager, timeout=3)
scheduler.shutdown.assert_called_once_with(await_msg_callbacks=True)
def test_close_nonblocking_scheduler_shutdown():
manager, _, _, _, _, _ = make_running_manager(await_callbacks_on_shutdown=False)
scheduler = manager._scheduler
manager.close()
await_manager_shutdown(manager, timeout=3)
scheduler.shutdown.assert_called_once_with(await_msg_callbacks=False)
def test_close_nacks_internally_queued_messages():
nacked_messages = []
def fake_nack(self):
nacked_messages.append(self.data)
MockMsg = functools.partial(mock.create_autospec, message.Message, instance=True)
messages = [MockMsg(data=b"msg1"), MockMsg(data=b"msg2"), MockMsg(data=b"msg3")]
for msg in messages:
msg.nack = stdlib_types.MethodType(fake_nack, msg)
manager, _, _, _, _, _ = make_running_manager()
dropped_by_scheduler = messages[:2]
manager._scheduler.shutdown.return_value = dropped_by_scheduler
manager._messages_on_hold._messages_on_hold.append(messages[2])
manager.close()
await_manager_shutdown(manager, timeout=3)
assert sorted(nacked_messages) == [b"msg1", b"msg2", b"msg3"]
def test__get_initial_request():
manager = make_manager()
manager._leaser = mock.create_autospec(leaser.Leaser, instance=True)
manager._leaser.ack_ids = ["1", "2"]
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, gapic_types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == ["1", "2"]
assert initial_request.modify_deadline_seconds == [10, 10]
def test__get_initial_request_wo_leaser():
manager = make_manager()
manager._leaser = None
initial_request = manager._get_initial_request(123)
assert isinstance(initial_request, gapic_types.StreamingPullRequest)
assert initial_request.subscription == "subscription-name"
assert initial_request.stream_ack_deadline_seconds == 123
assert initial_request.modify_deadline_ack_ids == []
assert initial_request.modify_deadline_seconds == []
def test__on_response_delivery_attempt():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
delivery_attempt=6,
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
manager._on_response(response)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
msg1 = schedule_calls[0][1][1]
assert msg1.delivery_attempt is None
msg2 = schedule_calls[1][1][1]
assert msg2.delivery_attempt == 6
def test__on_response_modifies_ack_deadline():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="ack_1",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="ack_2",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=80)
# Actually run the method and chack that correct MODACK value is used.
with mock.patch.object(
type(manager), "ack_deadline", new=mock.PropertyMock(return_value=18)
):
manager._on_response(response)
dispatcher.modify_ack_deadline.assert_called_once_with(
[requests.ModAckRequest("ack_1", 18), requests.ModAckRequest("ack_2", 18)]
)
def test__on_response_no_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
]
)
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=42)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
dispatcher.modify_ack_deadline.assert_called_once_with(
[requests.ModAckRequest("fack", 10), requests.ModAckRequest("back", 10)]
)
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
for call in schedule_calls:
assert call[1][0] == mock.sentinel.callback
assert isinstance(call[1][1], message.Message)
# the leaser load limit not hit, no messages had to be put on hold
assert manager._messages_on_hold.size == 0
def test__on_response_with_leaser_overload():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(data=b"foo", message_id="1"),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(data=b"bar", message_id="2"),
),
gapic_types.ReceivedMessage(
ack_id="zack",
message=gapic_types.PubsubMessage(data=b"baz", message_id="3"),
),
]
)
# Adjust message bookkeeping in leaser. Pick 999 messages, which is just below
# the default FlowControl.max_messages limit.
fake_leaser_add(leaser, init_msg_count=999, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule
# are called in the expected way.
manager._on_response(response)
# all messages should be added to the lease management and have their ACK
# deadline extended, even those not dispatched to callbacks
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10),
requests.ModAckRequest("back", 10),
requests.ModAckRequest("zack", 10),
]
)
# one message should be scheduled, the flow control limits allow for it
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 1
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
# the rest of the messages should have been put on hold
assert manager._messages_on_hold.size == 2
while True:
msg = manager._messages_on_hold.get()
if msg is None:
break
else:
assert isinstance(msg, message.Message)
assert msg.message_id in ("2", "3")
def test__on_response_none_data(caplog):
caplog.set_level(logging.DEBUG)
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# adjust message bookkeeping in leaser
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)
manager._on_response(response=None)
scheduler.schedule.assert_not_called()
assert "callback invoked with None" in caplog.text
def test__on_response_with_ordering_keys():
manager, _, dispatcher, leaser, _, scheduler = make_running_manager()
manager._callback = mock.sentinel.callback
# Set up the messages.
response = gapic_types.StreamingPullResponse(
received_messages=[
gapic_types.ReceivedMessage(
ack_id="fack",
message=gapic_types.PubsubMessage(
data=b"foo", message_id="1", ordering_key=""
),
),
gapic_types.ReceivedMessage(
ack_id="back",
message=gapic_types.PubsubMessage(
data=b"bar", message_id="2", ordering_key="key1"
),
),
gapic_types.ReceivedMessage(
ack_id="zack",
message=gapic_types.PubsubMessage(
data=b"baz", message_id="3", ordering_key="key1"
),
),
]
)
# Make leaser with zero initial messages, so we don't test lease management
# behavior.
fake_leaser_add(leaser, init_msg_count=0, assumed_msg_size=10)
# Actually run the method and prove that modack and schedule are called in
# the expected way.
manager._on_response(response)
# All messages should be added to the lease management and have their ACK
# deadline extended, even those not dispatched to callbacks.
dispatcher.modify_ack_deadline.assert_called_once_with(
[
requests.ModAckRequest("fack", 10),
requests.ModAckRequest("back", 10),
requests.ModAckRequest("zack", 10),
]
)
# The first two messages should be scheduled, The third should be put on
# hold because it's blocked by the completion of the second, which has the
# same ordering key.
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 2
call_args = schedule_calls[0][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "1"
call_args = schedule_calls[1][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "2"
# Message 3 should have been put on hold.
assert manager._messages_on_hold.size == 1
# No messages available because message 2 (with "key1") has not completed yet.
assert manager._messages_on_hold.get() is None
# Complete message 2 (with "key1").
manager.activate_ordering_keys(["key1"])
# Completing message 2 should release message 3.
schedule_calls = scheduler.schedule.mock_calls
assert len(schedule_calls) == 3
call_args = schedule_calls[2][1]
assert call_args[0] == mock.sentinel.callback
assert isinstance(call_args[1], message.Message)
assert call_args[1].message_id == "3"
# No messages available in the queue.
assert manager._messages_on_hold.get() is None
def test__should_recover_true():
manager = make_manager()
details = "UNAVAILABLE. Service taking nap."
exc = exceptions.ServiceUnavailable(details)
assert manager._should_recover(exc) is True
def test__should_recover_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_recover(exc) is False
def test__should_terminate_true():
manager = make_manager()
details = "Cancelled. Go away, before I taunt you a second time."
exc = exceptions.Cancelled(details)
assert manager._should_terminate(exc) is True
def test__should_terminate_false():
manager = make_manager()
exc = TypeError("wahhhhhh")
assert manager._should_terminate(exc) is False
@mock.patch("threading.Thread", autospec=True)
def test__on_rpc_done(thread):
manager = make_manager()
manager._on_rpc_done(mock.sentinel.error)
thread.assert_called_once_with(
name=mock.ANY, target=manager._shutdown, kwargs={"reason": mock.ANY}
)
_, kwargs = thread.call_args
reason = kwargs["kwargs"]["reason"]
assert isinstance(reason, Exception)
assert reason.args == (mock.sentinel.error,) # Exception wraps the original error
def test_activate_ordering_keys():
manager = make_manager()
manager._messages_on_hold = mock.create_autospec(
messages_on_hold.MessagesOnHold, instance=True
)
manager.activate_ordering_keys(["key1", "key2"])
manager._messages_on_hold.activate_ordering_keys.assert_called_once_with(
["key1", "key2"], mock.ANY
)
def test_activate_ordering_keys_stopped_scheduler():
manager = make_manager()
manager._messages_on_hold = mock.create_autospec(
messages_on_hold.MessagesOnHold, instance=True
)
manager._scheduler = None
manager.activate_ordering_keys(["key1", "key2"])
manager._messages_on_hold.activate_ordering_keys.assert_not_called()
|
best-channel.py
|
#!/usr/bin/env python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up Scapy
from scapy.all import *
conf.verb = 0 # Scapy I thought I told you to shut up
import os
import sys
import time
from threading import Thread, Lock
from subprocess import Popen, PIPE
from signal import SIGINT, signal
import argparse
import socket
import struct
import fcntl
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
channels = {1:{}, 6:{}, 11:{}}
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--interface", help="Choose monitor mode interface. By default script \
will find the most powerful interface and starts monitor mode on it. Example: -i mon5")
return parser.parse_args()
########################################
# Begin interface info and manipulation
########################################
def get_mon_iface(args):
global monitor_on
monitors, interfaces = iwconfig()
if args.interface:
monitor_on = True
return args.interface
if len(monitors) > 0:
monitor_on = True
return monitors[0]
else:
# Start monitor mode on a wireless interface
print '['+G+'*'+W+'] Finding the most powerful interface...'
interface = get_iface(interfaces)
monmode = start_mon_mode(interface)
return monmode
def iwconfig():
monitors = []
interfaces = {}
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Isn't wired
iface = line[:line.find(' ')] # is the interface
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def get_iface(interfaces):
scanned_aps = []
if len(interfaces) < 1:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again')
if len(interfaces) == 1:
for interface in interfaces:
return interface
# Find most powerful interface
for iface in interfaces:
count = 0
proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, iface))
print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W
try:
interface = max(scanned_aps)[1]
return interface
except Exception as e:
for iface in interfaces:
interface = iface
print '['+R+'-'+W+'] Minor error:',e
print ' Starting monitor mode on '+G+interface+W
return interface
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode on '+G+interface+W
try:
os.system('/sbin/ifconfig %s down' % interface)
os.system('/sbin/iwconfig %s mode monitor' % interface)
os.system('/sbin/ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
raise ####################
def remove_mon_iface(mon_iface):
os.system('/sbin/ifconfig %s down' % mon_iface)
os.system('/sbin/iwconfig %s mode managed' % mon_iface)
os.system('/sbin/ifconfig %s up' % mon_iface)
def mon_mac(mon_iface):
'''
http://stackoverflow.com/questions/159137/getting-mac-address
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W
return mac
########################################
# End of interface info and manipulation
########################################
def cb(pkt):
'''
Look for dot11 packets that aren't to or from broadcast address,
are type 1 or 2 (control, data), and append the addr1 and addr2
to the list of deauth targets.
'''
# We're adding the AP and channel to the deauth list at time of creation rather
# than updating on the fly in order to avoid costly for loops that require a lock
if pkt.haslayer(Dot11):
if pkt.addr1 and pkt.addr2:
# Check if it's added to our AP list
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
APs(pkt)
def APs(pkt):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3
try:
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11']
# http://stackoverflow.com/questions/10818661/scapy-retrieving-rssi-from-wifi-packets
sig_str = -(256-ord(pkt.notdecoded[-4:-3]))
# airoscapy
ap_channel = str(ord(pkt[Dot11Elt:3].info))
if ap_channel in chans:
if ap_channel in ['1','2','3']:
# Set to {MAC address:avg power signal}
if bssid in channels[1]:
channels[1][bssid].append(sig_str)
else:
channels[1][bssid] = [sig_str]
elif ap_channel in ['4','5','6','7','8']:
# Set to {MAC address:avg power signal}
if bssid in channels[6]:
channels[6][bssid].append(sig_str)
else:
channels[6][bssid] = [sig_str]
elif ap_channel in ['9', '10', '11']:
# Set to {MAC address:avg power signal}
if bssid in channels[11]:
channels[11][bssid].append(sig_str)
else:
channels[11][bssid] = [sig_str]
except Exception:
raise
def output(err, num_aps, chan_interference, best_channel):
os.system('clear')
if err:
print err
else:
print '[Channels '+G+'1-3'+W+'] Number of APs: '+T+'%d' % num_aps[1], W+' | Interference level: '+R+'%d' % chan_interference[1], W
print '[Channels '+G+'4-8'+W+'] Number of APs: '+T+'%d' % num_aps[6], W+' | Interference level: '+R+'%d' % chan_interference[6], W
print '[Channels '+G+'9-11'+W+'] Number of APs: '+T+'%d' % num_aps[11], W+' | Interference level: '+R+'%d' % chan_interference[11], W
print '['+G+'+'+W+'] Recommended channel: '+G+'%s' % best_channel, W
def channel_hop(mon_iface):
'''
First time it runs through the channels it stays on each channel for 5 seconds
in order to populate the deauth list nicely. After that it goes as fast as it can
'''
global monchannel
channelNum = 0
maxChan = 11
err = None
while 1:
channelNum +=1
if channelNum > maxChan:
channelNum = 1
monchannel = str(channelNum)
try:
proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', monchannel], stdout=DN, stderr=PIPE)
except OSError:
print '['+R+'-'+W+'] Could not execute "iw"'
os.kill(os.getpid(),SIGINT)
sys.exit(1)
for line in proc.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W
# avg_pwr_per_ap = {1:{bssid:ap_pwr}, 6:[...], 11:[...]}
avg_pwr_per_ap = get_ap_pwr()
num_aps = get_num_aps(avg_pwr_per_ap)
chan_interference = get_chan_interference(avg_pwr_per_ap)
best_channel = get_best_channel(chan_interference)
output(err, num_aps, chan_interference, best_channel)
time.sleep(.5)
def get_best_channel(chan_interference):
'''
Get the channel with the least interference
'''
if chan_interference[1] == chan_interference[6] == chan_interference[11]:
return 'All the same'
least = min([chan_interference[1], chan_interference[6], chan_interference[11]])
if least == chan_interference[1]:
return '1'
elif least == chan_interference[6]:
return '6'
elif least == chan_interference[11]:
return '11'
def get_chan_interference(avg_pwr_per_ap):
'''
Add together all the BSSID's avg pwr levels
'''
interference_val = {1:0, 6:0, 11:0}
total_power = {1:[], 6:[], 11:[]}
for chan in [1, 6, 11]:
for bssid in avg_pwr_per_ap[chan]:
total_power[chan].append(avg_pwr_per_ap[chan][bssid])
interference_val[chan] = -sum(total_power[chan])
return interference_val
def get_ap_pwr():
'''
Returns a dict of nonoverlapping channels which contains
a dict of each BSSID's average power
'''
avg_pwr_per_ap = {1:{}, 6:{}, 11:{}}
for chan in [1, 6, 11]:
# channels[chan] = {'bssid':[avgpwr, avgpwr]}
for bssid in channels[chan]:
bssid_avg_pwr = float(sum(channels[chan][bssid]))/len(channels[chan][bssid]) if len(channels[chan][bssid]) > 0 else 0
avg_pwr_per_ap[chan][bssid] = bssid_avg_pwr
return avg_pwr_per_ap
def get_num_aps(avg_pwr_per_ap):
num_aps = {1:0, 6:0, 11:0}
for chan in [1, 6, 11]:
num_aps[chan] = len(avg_pwr_per_ap[chan])
return num_aps
def stop(signal, frame):
if monitor_on:
sys.exit(
'\n['+R+'!'+W+'] Closing... You will probably have to reconnect to your \
wireless network if you were on one prior to running this script')
else:
remove_mon_iface(mon_iface)
sys.exit('\n['+R+'!'+W+'] Closing... You will probably have to reconnect to your \
wireless network if you were on one prior to running this script')
if __name__ == "__main__":
if os.geteuid():
sys.exit('['+R+'-'+W+'] Please run as root')
args = parse_args()
DN = open(os.devnull, 'w')
monitor_on = None
mon_iface = get_mon_iface(args)
conf.iface = mon_iface
mon_MAC = mon_mac(mon_iface)
hop = Thread(target=channel_hop, args=(mon_iface,))
hop.daemon = True
hop.start()
signal(SIGINT, stop)
try:
sniff(iface=mon_iface, store=0, prn=cb)
except Exception as msg:
remove_mon_iface(mon_iface)
print '\n['+R+'!'+W+'] Sniffing failed: %s' % str(msg)
sys.exit(0)
|
server_simple.py
|
from flask import Flask, render_template, send_file, safe_join, request,jsonify
from threading import Thread
from flask_socketio import SocketIO
import settings
app = Flask(__name__)
app.config.from_object('settings')
socketio = SocketIO(app)
@app.route('/', methods=['GET', "POST"])
def index():
return render_template('index.html')
@app.route('/input', methods=["POST"])
def input():
print "input..."
#print request.json()
values = request.values
#print values
inputS = None
try:
inputS = values['inputS']
except TypeError:
return jsonify({'status': 'no-input', 'response': get_answer()})
print inputS
#text = request.json['inputS']
return render_template('index.html')
def launch():
def start_websocket_server():
socketio.run(app, port=app.config['PORT_WEBSOCKET'])
#Thread(target=start_websocket_server).start()
app.run(debug=app.config['DEBUG'], host=app.config['HOST'], port=app.config['PORT'])
#logger.debug("Application terminates")
# RUN APP
if __name__ == "__main__":
launch()
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from utils.general import xyxy2xywh, xywh2xyxy
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=stride,
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImages: # for inference
def __init__(self, path, img_size=640):
path = str(Path(path)) # os-agnostic
files = []
if os.path.isdir(path):
files = sorted(glob.glob(os.path.join(path, '*.*')))
elif os.path.isfile(path):
files = [path]
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
nI, nV = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nF = nI + nV # number of files
self.video_flag = [False] * nI + [True] * nV
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nF > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(path, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nF:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nF: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nF, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nF, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nF # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
path = str(Path(path)) # os-agnostic
parent = str(Path(path).parent) + os.sep
print('**', path,parent)
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
else:
raise Exception('%s does not exist' % path)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
except:
# raise Exception('Error loading data from %s. See %s' % (path, help_url))
path = str(Path(path)) # os-agnostic
# parent = str(Path(path).parent) + os.sep
parent = '../data/'
print('**', path,parent)
if os.path.isfile(path): # file
with open(path, 'r') as f:
f = f.read().splitlines()
f = [x.replace('./', parent) if x.startswith('./') else x for x in f] # local to global path
elif os.path.isdir(path): # folder
f = glob.iglob(path + os.sep + '*.*')
else:
raise Exception('%s does not exist' % path)
self.img_files = [x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats]
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt')
for x in self.img_files]
# Read image shapes (wh)
sp = path.replace('.txt', '') + '.shapes' # shapefile path
try:
with open(sp, 'r') as f: # read existing shapefile
s = [x.split() for x in f.read().splitlines()]
assert len(s) == n, 'Shapefile out of sync'
except:
s = [exif_size(Image.open(f)) for f in tqdm(self.img_files, desc='Reading image shapes')]
np.savetxt(sp, s, fmt='%g') # overwrites existing (if any)
self.shapes = np.array(s, dtype=np.float64)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
self.imgs = [None] * n
self.labels = [np.zeros((0, 5), dtype=np.float32)] * n
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
np_labels_path = str(Path(self.label_files[0]).parent) + '.npy' # saved labels in *.npy file
if os.path.isfile(np_labels_path):
s = np_labels_path # print string
x = np.load(np_labels_path, allow_pickle=True)
if len(x) == n:
self.labels = x
labels_loaded = True
else:
s = path.replace('images', 'labels')
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
if labels_loaded:
l = self.labels[i]
# np.savetxt(file, l, '%g') # save *.txt from *.npy file
else:
try:
with open(file, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
except:
nm += 1 # print('missing labels for image %s' % self.img_files[i]) # file missing
continue
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Caching labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
s, nf, nm, ne, nd, n)
assert nf > 0 or n == 20288, 'No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
if not labels_loaded and n > 1000:
print('Saving labels to %s for faster future loading' % np_labels_path)
np.save(np_labels_path, self.labels) # save for next time
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
if cache_images: # if training
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
# Detect corrupted images https://medium.com/joelthchao/programmatically-detect-corrupted-image-8c1b2006c3d3
detect_corrupted_images = False
if detect_corrupted_images:
from skimage import io # conda install -c conda-forge scikit-image
for file in tqdm(self.img_files, desc='Detecting corrupted images'):
try:
_ = io.imread(file)
except:
print('Corrupted image detected: %s' % file)
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_affine(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
# convert xyxy to xywh
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5])
# Normalize coordinates 0 - 1
labels[:, [2, 4]] /= img.shape[0] # height
labels[:, [1, 3]] /= img.shape[1] # width
if self.augment:
# random left-right flip
lr_flip = True
if lr_flip and random.random() < 0.5:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
# random up-down flip
ud_flip = False
if ud_flip and random.random() < 0.5:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = [int(random.uniform(-x, 2 * s + x)) for x in self.mosaic_border] # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_affine(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=self.mosaic_border) # border to remove
return img4, labels4
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = new_shape
ratio = new_shape[0] / shape[1], new_shape[1] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_affine(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# https://medium.com/uruvideo/dataset-augmentation-with-random-homographies-a8f4b44830d4
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(-translate, translate) * img.shape[1] + border[1] # x translation (pixels)
T[1, 2] = random.uniform(-translate, translate) * img.shape[0] + border[0] # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Combined rotation matrix
M = S @ T @ R # ORDER IS IMPORTANT HERE!!
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
img = cv2.warpAffine(img, M[:2], dsize=(width, height), flags=cv2.INTER_LINEAR, borderValue=(114, 114, 114))
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# reject warped points outside of image
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
area0 = (targets[:, 3] - targets[:, 1]) * (targets[:, 4] - targets[:, 2])
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16)) # aspect ratio
i = (w > 2) & (h > 2) & (area / (area0 * s + 1e-16) > 0.2) & (ar < 20)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def cutout(image, labels):
# https://arxiv.org/abs/1708.04552
# https://github.com/hysts/pytorch_cutout/blob/master/dataloader.py
# https://towardsdatascience.com/when-conventional-wisdom-fails-revisiting-data-augmentation-for-self-driving-cars-4831998c5509
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='../data/sm4/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def convert_images2bmp(): # from utils.datasets import *; convert_images2bmp()
# Save images
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
# for path in ['../coco/images/val2014', '../coco/images/train2014']:
for path in ['../data/sm4/images', '../data/sm4/background']:
create_folder(path + 'bmp')
for ext in formats: # ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.dng']
for f in tqdm(glob.glob('%s/*%s' % (path, ext)), desc='Converting %s' % ext):
cv2.imwrite(f.replace(ext.lower(), '.bmp').replace(path, path + 'bmp'), cv2.imread(f))
# Save labels
# for path in ['../coco/trainvalno5k.txt', '../coco/5k.txt']:
for file in ['../data/sm4/out_train.txt', '../data/sm4/out_test.txt']:
with open(file, 'r') as f:
lines = f.read()
# lines = f.read().replace('2014/', '2014bmp/') # coco
lines = lines.replace('/images', '/imagesbmp')
lines = lines.replace('/background', '/backgroundbmp')
for ext in formats:
lines = lines.replace(ext, '.bmp')
with open(file.replace('.txt', 'bmp.txt'), 'w') as f:
f.write(lines)
def recursive_dataset2bmp(dataset='../data/sm4_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='data/coco_64img.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new_folder'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
interpreterv2.py
|
# Trying server.start(custom_read_f-> tasks.append etc.)
import arachnoid as ara
import interpreter as inter
IP_ADDR = '192.168.1.94'
PORT_NUMBER = 1234
for arg in ara.sys.argv:
if arg.startswith('ip='):
IP_ADDR = arg[3:]
print('ip: {}'.format(IP_ADDR))
elif arg.startswith('port='):
PORT_NUMBER = int(arg[5:])
print('port: {}'.format(PORT_NUMBER))
PID = ara.get_pid()
open(ara.os.path.join(ara.ROOT_DIR, 'pid.txt'), 'w').write(str(PID))
print('PID: {}'.format(PID))
def main():
server = ara.Web(IP_ADDR, PORT_NUMBER, verbose=0)
server.df_client_thread = server.client_thread2
server.init()
thread = ara.Thread(target=server.start)
thread.start()
inter.VERBOSE = server.verbose
inter.interpreter2(server, additional_data={'inter': inter, 'arachnoid': server})
if __name__ == '__main__':
main()
|
inventory.py
|
from collections import Counter
from flask_login import current_user
from git import Repo
from io import BytesIO
from logging import info
from os import environ
from sqlalchemy import and_
from subprocess import Popen
from threading import Thread
from uuid import uuid4
from werkzeug.utils import secure_filename
from xlrd import open_workbook
from xlrd.biffh import XLRDError
from xlwt import Workbook
from eNMS.controller.base import BaseController
from eNMS.controller.ssh import SshConnection
from eNMS.database import db
from eNMS.models import models, model_properties, property_types
class InventoryController(BaseController):
ssh_port = -1
configuration_properties = {"configuration": "Configuration"}
def get_ssh_port(self):
self.ssh_port += 1
start = self.settings["ssh"]["start_port"]
end = self.settings["ssh"]["end_port"]
return start + self.ssh_port % (end - start)
def web_connection(self, device_id, **kwargs):
device = db.fetch("device", id=device_id, rbac="connect")
cmd = [str(self.path / "files" / "apps" / "gotty"), "-w"]
port, protocol = self.get_ssh_port(), kwargs["protocol"]
address = getattr(device, kwargs["address"])
cmd.extend(["-p", str(port)])
if "accept-once" in kwargs:
cmd.append("--once")
if "multiplexing" in kwargs:
cmd.extend(f"tmux new -A -s gotty{port}".split())
if self.settings["ssh"]["bypass_key_prompt"]:
options = "-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
else:
options = ""
if protocol == "telnet":
nested_cmd = f"telnet {address}"
elif "authentication" in kwargs:
login, environ["SSHPASS"] = (
(device.username, self.get_password(device.password))
if kwargs["credentials"] == "device"
else (current_user.name, self.get_password(current_user.password))
if kwargs["credentials"] == "user"
else (kwargs["username"], kwargs["password"])
)
nested_cmd = f"sshpass -e ssh {options} {login}@{address} -p {device.port}"
else:
nested_cmd = f"ssh {options} {address} -p {device.port}"
if "multiplexing" in kwargs:
cmd.append(nested_cmd)
else:
cmd.extend(nested_cmd.split())
Popen(cmd)
return {
"device": device.name,
"port": port,
"redirection": self.settings["ssh"]["port_redirection"],
"server_addr": self.settings["app"]["address"],
}
def get_device_logs(self, device_id):
device_logs = [
log.name
for log in db.fetch_all("log")
if log.source == db.fetch("device", id=device_id).ip_address
]
return "\n".join(device_logs)
def desktop_connection(self, id, **kwargs):
device = db.fetch("device", id=id, rbac="connect")
credentials = (
(device.username, self.get_password(device.password))
if kwargs["credentials"] == "device"
else (current_user.name, self.get_password(current_user.password))
if kwargs["credentials"] == "user"
else (kwargs["username"], kwargs["password"])
)
uuid, port = str(uuid4()), self.get_ssh_port()
session = db.factory(
"session",
name=uuid,
user=current_user.name,
timestamp=self.get_time(),
device=device.id,
)
db.session.commit()
try:
ssh_connection = SshConnection(
device.ip_address, *credentials, session.id, uuid, port
)
Thread(
target=ssh_connection.start_session, args=(session.id, uuid, port),
).start()
return {
"port": port,
"username": uuid,
"device_name": device.name,
"device_ip": device.ip_address,
}
except Exception as exc:
return {"error": exc.args}
def get_git_history(self, device_id):
device = db.fetch("device", id=device_id)
repo = Repo(self.path / "network_data")
path = self.path / "network_data" / device.name
return {
data_type: [
{"hash": str(commit), "date": commit.committed_datetime}
for commit in list(repo.iter_commits(paths=path / data_type))
]
for data_type in self.configuration_properties
}
def get_git_network_data(self, device_name, hash):
tree, result = Repo(self.path / "network_data").commit(hash).tree, {}
for property in self.configuration_properties:
file = tree / device_name / property
with BytesIO(file.data_stream.read()) as f:
result[property] = f.read().decode("utf-8")
return result
def get_device_network_data(self, device_id):
device = db.fetch("device", id=device_id)
return {p: getattr(device, p) for p in self.configuration_properties}
def get_session_log(self, session_id):
return db.fetch("session", id=session_id).content
def counters(self, property, type):
return Counter(
str(getattr(instance, property)) for instance in db.fetch_all(type)
)
def export_topology(self, **kwargs):
workbook = Workbook()
filename = kwargs["export_filename"]
if "." not in filename:
filename += ".xls"
for obj_type in ("device", "link"):
sheet = workbook.add_sheet(obj_type)
for index, property in enumerate(model_properties[obj_type]):
if property in db.dont_migrate[obj_type]:
continue
sheet.write(0, index, property)
for obj_index, obj in enumerate(db.fetch_all(obj_type), 1):
value = getattr(obj, property)
if type(value) == bytes:
value = str(self.decrypt(value), "utf-8")
sheet.write(obj_index, index, str(value))
workbook.save(self.path / "files" / "spreadsheets" / filename)
def topology_import(self, file):
book = open_workbook(file_contents=file.read())
status = "Topology successfully imported."
for obj_type in ("device", "link"):
try:
sheet = book.sheet_by_name(obj_type)
except XLRDError:
continue
properties = sheet.row_values(0)
for row_index in range(1, sheet.nrows):
values = {"dont_update_pools": True}
for index, property in enumerate(properties):
if not property:
continue
func = db.field_conversion[property_types.get(property, "str")]
values[property] = func(sheet.row_values(row_index)[index])
try:
db.factory(obj_type, **values).serialized
except Exception as exc:
info(f"{str(values)} could not be imported ({str(exc)})")
status = "Partial import (see logs)."
db.session.commit()
for pool in db.fetch_all("pool"):
pool.compute_pool()
self.log("info", status)
return status
def import_topology(self, **kwargs):
file = kwargs["file"]
if kwargs["replace"]:
db.delete_all("device")
if self.allowed_file(secure_filename(file.filename), {"xls", "xlsx"}):
result = self.topology_import(file)
info("Inventory import: Done.")
return result
def save_pool_objects(self, pool_id, **kwargs):
pool = db.fetch("pool", id=pool_id)
for obj_type in ("device", "link"):
string_objects = kwargs[f"string_{obj_type}s"]
if string_objects:
objects = []
for name in [obj.strip() for obj in string_objects.split(",")]:
obj = db.fetch(obj_type, allow_none=True, name=name)
if not obj:
return {
"alert": f"{obj_type.capitalize()} '{name}' does not exist."
}
if obj not in objects:
objects.append(obj)
else:
objects = db.objectify(obj_type, kwargs[f"{obj_type}s"])
setattr(pool, f"{obj_type}_number", len(objects))
setattr(pool, f"{obj_type}s", objects)
pool.last_modified = self.get_time()
return pool.serialized
def update_pool(self, pool_id):
db.fetch("pool", id=int(pool_id)).compute_pool()
def update_all_pools(self):
for pool in db.fetch_all("pool"):
pool.compute_pool()
def get_view_topology(self):
return {
"devices": [d.view_properties for d in db.fetch_all("device")],
"links": [d.view_properties for d in db.fetch_all("link")],
}
def view_filtering(self, **kwargs):
return {
obj_type: [
d.view_properties
for d in db.session.query(models[obj_type])
.filter(and_(*self.build_filtering_constraints(obj_type, **form)))
.all()
]
for obj_type, form in kwargs.items()
}
|
test_state.py
|
import glob
import logging
import os
import shutil
import threading
import time
import pytest
from saltfactories.utils.tempfiles import temp_file
from tests.support.case import SSHCase
from tests.support.runtests import RUNTIME_VARS
SSH_SLS = "ssh_state_tests"
SSH_SLS_FILE = "/tmp/salt_test_file"
log = logging.getLogger(__name__)
class SSHStateTest(SSHCase):
"""
testing the state system with salt-ssh
"""
def _check_dict_ret(self, ret, val, exp_ret, equal=True):
self.assertIsInstance(ret, dict)
for key, value in ret.items():
self.assertIsInstance(value, dict)
if equal:
self.assertEqual(value[val], exp_ret)
else:
self.assertNotEqual(value[val], exp_ret)
def _check_request(self, empty=False):
check = self.run_function("state.check_request", wipe=False)
if empty:
self.assertFalse(bool(check), "bool({}) is not False".format(check))
else:
self._check_dict_ret(
ret=check["default"]["test_run"]["local"]["return"],
val="__sls__",
exp_ret=SSH_SLS,
)
@pytest.mark.slow_test
def test_state_apply(self):
"""
test state.apply with salt-ssh
"""
ret = self.run_function("state.apply", [SSH_SLS])
self._check_dict_ret(ret=ret, val="__sls__", exp_ret=SSH_SLS)
check_file = self.run_function("file.file_exists", [SSH_SLS_FILE])
self.assertTrue(check_file)
@pytest.mark.slow_test
def test_state_sls_id(self):
"""
test state.sls_id with salt-ssh
"""
# check state.sls_id with test=True
ret = self.run_function("state.sls_id", ["ssh-file-test", SSH_SLS, "test=True"])
self._check_dict_ret(
ret=ret,
val="comment",
exp_ret=(
"The file {} is set to be changed\n"
"Note: No changes made, actual changes may\n"
"be different due to other states."
).format(SSH_SLS_FILE),
)
# check state.sls_id without test=True
ret = self.run_function("state.sls_id", ["ssh-file-test", SSH_SLS])
self._check_dict_ret(ret=ret, val="__sls__", exp_ret=SSH_SLS)
# make sure the other id in the state was not run
self._check_dict_ret(ret=ret, val="__id__", exp_ret="second_id", equal=False)
check_file = self.run_function("file.file_exists", [SSH_SLS_FILE])
self.assertTrue(check_file)
@pytest.mark.slow_test
def test_state_sls_wrong_id(self):
"""
test state.sls_id when id does not exist
"""
# check state.sls_id with test=True
ret = self.run_function("state.sls_id", ["doesnotexist", SSH_SLS])
assert "No matches for ID" in ret
@pytest.mark.slow_test
def test_state_sls_id_with_pillar(self):
"""
test state.sls_id with pillar data
"""
self.run_function(
"state.sls_id",
["ssh-file-test", SSH_SLS, 'pillar=\'{"test_file_suffix": "_pillar"}\''],
)
check_file = self.run_function(
"file.file_exists", ["/tmp/salt_test_file_pillar"]
)
self.assertTrue(check_file)
@pytest.mark.slow_test
def test_state_show_sls(self):
"""
test state.show_sls with salt-ssh
"""
ret = self.run_function("state.show_sls", [SSH_SLS])
self._check_dict_ret(ret=ret, val="__sls__", exp_ret=SSH_SLS)
check_file = self.run_function("file.file_exists", [SSH_SLS_FILE], wipe=False)
self.assertFalse(check_file)
@pytest.mark.slow_test
def test_state_show_top(self):
"""
test state.show_top with salt-ssh
"""
top_sls = """
base:
'*':
- core
"""
core_state = """
{}/testfile:
file:
- managed
- source: salt://testfile
- makedirs: true
""".format(
RUNTIME_VARS.TMP
)
with temp_file(
"top.sls", top_sls, RUNTIME_VARS.TMP_BASEENV_STATE_TREE
), temp_file("core.sls", core_state, RUNTIME_VARS.TMP_BASEENV_STATE_TREE):
ret = self.run_function("state.show_top")
self.assertEqual(ret, {"base": ["core", "master_tops_test"]})
@pytest.mark.slow_test
def test_state_single(self):
"""
state.single with salt-ssh
"""
ret_out = {"name": "itworked", "result": True, "comment": "Success!"}
single = self.run_function(
"state.single", ["test.succeed_with_changes name=itworked"]
)
self.assertIsInstance(single, dict)
for key, value in single.items():
self.assertIsInstance(value, dict)
self.assertEqual(value["name"], ret_out["name"])
self.assertEqual(value["result"], ret_out["result"])
self.assertEqual(value["comment"], ret_out["comment"])
@pytest.mark.slow_test
def test_show_highstate(self):
"""
state.show_highstate with salt-ssh
"""
top_sls = """
base:
'*':
- core
"""
core_state = """
{}/testfile:
file:
- managed
- source: salt://testfile
- makedirs: true
""".format(
RUNTIME_VARS.TMP
)
with temp_file(
"top.sls", top_sls, RUNTIME_VARS.TMP_BASEENV_STATE_TREE
), temp_file("core.sls", core_state, RUNTIME_VARS.TMP_BASEENV_STATE_TREE):
high = self.run_function("state.show_highstate")
destpath = os.path.join(RUNTIME_VARS.TMP, "testfile")
self.assertIsInstance(high, dict)
self.assertIn(destpath, high)
self.assertEqual(high[destpath]["__env__"], "base")
@pytest.mark.slow_test
def test_state_high(self):
"""
state.high with salt-ssh
"""
ret_out = {"name": "itworked", "result": True, "comment": "Success!"}
high = self.run_function(
"state.high", ['"{"itworked": {"test": ["succeed_with_changes"]}}"']
)
self.assertIsInstance(high, dict)
for key, value in high.items():
self.assertIsInstance(value, dict)
self.assertEqual(value["name"], ret_out["name"])
self.assertEqual(value["result"], ret_out["result"])
self.assertEqual(value["comment"], ret_out["comment"])
@pytest.mark.slow_test
def test_show_lowstate(self):
"""
state.show_lowstate with salt-ssh
"""
top_sls = """
base:
'*':
- core
"""
core_state = """
{}/testfile:
file:
- managed
- source: salt://testfile
- makedirs: true
""".format(
RUNTIME_VARS.TMP
)
with temp_file(
"top.sls", top_sls, RUNTIME_VARS.TMP_BASEENV_STATE_TREE
), temp_file("core.sls", core_state, RUNTIME_VARS.TMP_BASEENV_STATE_TREE):
low = self.run_function("state.show_lowstate")
self.assertIsInstance(low, list)
self.assertIsInstance(low[0], dict)
@pytest.mark.slow_test
def test_state_low(self):
"""
state.low with salt-ssh
"""
ret_out = {"name": "itworked", "result": True, "comment": "Success!"}
low = self.run_function(
"state.low",
['"{"state": "test", "fun": "succeed_with_changes", "name": "itworked"}"'],
)
self.assertIsInstance(low, dict)
for key, value in low.items():
self.assertIsInstance(value, dict)
self.assertEqual(value["name"], ret_out["name"])
self.assertEqual(value["result"], ret_out["result"])
self.assertEqual(value["comment"], ret_out["comment"])
@pytest.mark.slow_test
def test_state_request_check_clear(self):
"""
test state.request system with salt-ssh
while also checking and clearing request
"""
request = self.run_function("state.request", [SSH_SLS], wipe=False)
self._check_dict_ret(ret=request, val="__sls__", exp_ret=SSH_SLS)
self._check_request()
clear = self.run_function("state.clear_request", wipe=False)
self._check_request(empty=True)
@pytest.mark.slow_test
def test_state_run_request(self):
"""
test state.request system with salt-ssh
while also running the request later
"""
request = self.run_function("state.request", [SSH_SLS], wipe=False)
self._check_dict_ret(ret=request, val="__sls__", exp_ret=SSH_SLS)
run = self.run_function("state.run_request", wipe=False)
check_file = self.run_function("file.file_exists", [SSH_SLS_FILE], wipe=False)
self.assertTrue(check_file)
@pytest.mark.flaky(max_runs=4)
@pytest.mark.slow_test
def test_state_running(self):
"""
test state.running with salt-ssh
"""
def _run_in_background():
self.run_function("state.sls", ["running"], wipe=False)
bg_thread = threading.Thread(target=_run_in_background)
bg_thread.start()
expected = 'The function "state.pkg" is running as'
state_ret = []
for _ in range(30):
time.sleep(5)
get_sls = self.run_function("state.running", wipe=False)
state_ret.append(get_sls)
if expected in " ".join(get_sls):
# We found the expected return
break
else:
self.fail(
"Did not find '{}' in state.running return: {}".format(
expected, state_ret
)
)
# make sure we wait until the earlier state is complete
future = time.time() + 120
while True:
if expected not in " ".join(self.run_function("state.running", wipe=False)):
break
if time.time() > future:
self.fail(
"state.pkg is still running overtime. Test did not clean up"
" correctly."
)
def tearDown(self):
"""
make sure to clean up any old ssh directories
"""
salt_dir = self.run_function("config.get", ["thin_dir"], wipe=False)
self.assertIsInstance(salt_dir, (str,))
if os.path.exists(salt_dir):
shutil.rmtree(salt_dir)
for test_file_path in glob.glob(SSH_SLS_FILE + "*"):
os.remove(test_file_path)
|
__init__.py
|
# -*- coding: utf-8 -*-
'''
Set up the Salt integration test suite
'''
# Import Python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import stat
import errno
import signal
import shutil
import pprint
import atexit
import socket
import logging
import tempfile
import threading
import subprocess
import multiprocessing
from datetime import datetime, timedelta
try:
import pwd
except ImportError:
pass
# Import salt tests support dirs
from tests.support.paths import * # pylint: disable=wildcard-import
from tests.support.processes import * # pylint: disable=wildcard-import
from tests.support.unit import TestCase
from tests.support.case import ShellTestCase
from tests.support.parser import PNUM, print_header, SaltTestcaseParser
from tests.support.helpers import requires_sshd_server, RedirectStdStreams
from tests.support.cli_scripts import ScriptPathMixin
from tests.support.mixins import CheckShellBinaryNameAndVersionMixin, ShellCaseCommonTestsMixin
from tests.support.mixins import AdaptedConfigurationTestCaseMixin, SaltClientTestCaseMixin
from tests.support.mixins import SaltMinionEventAssertsMixin, SaltReturnAssertsMixin
from tests.support.runtests import RUNTIME_VARS
# Import Salt libs
import salt
import salt.config
import salt.master
import salt.minion
import salt.runner
import salt.output
import salt.version
import salt.utils.color
import salt.utils.files
import salt.utils.path
import salt.utils.platform
import salt.utils.process
import salt.utils.stringutils
import salt.utils.yaml
import salt.log.setup as salt_log_setup
from salt.utils.verify import verify_env
from salt.utils.immutabletypes import freeze
from salt.exceptions import SaltClientError
# Import 3rd-party libs
import msgpack
from salt.ext import six
try:
import salt.ext.six.moves.socketserver as socketserver
except ImportError:
import socketserver
# Import salt tests support libs
from tests.support.processes import SaltMaster, SaltMinion, SaltSyndic
log = logging.getLogger(__name__)
_RUNTESTS_PORTS = {}
def get_unused_localhost_port():
'''
Return a random unused port on localhost
'''
usock = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)
usock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
usock.bind(('127.0.0.1', 0))
port = usock.getsockname()[1]
if port in (54505, 54506, 64505, 64506, 64510, 64511, 64520, 64521):
# These ports are hardcoded in the test configuration
port = get_unused_localhost_port()
usock.close()
return port
DARWIN = True if sys.platform.startswith('darwin') else False
BSD = True if 'bsd' in sys.platform else False
if DARWIN and port in _RUNTESTS_PORTS:
port = get_unused_localhost_port()
usock.close()
return port
_RUNTESTS_PORTS[port] = usock
if DARWIN or BSD:
usock.close()
return port
def close_open_sockets(sockets_dict):
for port in list(sockets_dict):
sock = sockets_dict.pop(port)
sock.close()
atexit.register(close_open_sockets, _RUNTESTS_PORTS)
SALT_LOG_PORT = get_unused_localhost_port()
class ThreadingMixIn(socketserver.ThreadingMixIn):
daemon_threads = True
class ThreadedSocketServer(ThreadingMixIn, socketserver.TCPServer):
allow_reuse_address = True
def server_activate(self):
self.shutting_down = threading.Event()
socketserver.TCPServer.server_activate(self)
#super(ThreadedSocketServer, self).server_activate()
def server_close(self):
if hasattr(self, 'shutting_down'):
self.shutting_down.set()
socketserver.TCPServer.server_close(self)
#super(ThreadedSocketServer, self).server_close()
class SocketServerRequestHandler(socketserver.StreamRequestHandler):
def handle(self):
unpacker = msgpack.Unpacker(encoding='utf-8')
while not self.server.shutting_down.is_set():
try:
wire_bytes = self.request.recv(1024)
if not wire_bytes:
break
unpacker.feed(wire_bytes)
for record_dict in unpacker:
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
del record_dict
except (EOFError, KeyboardInterrupt, SystemExit):
break
except socket.error as exc:
try:
if exc.errno == errno.WSAECONNRESET:
# Connection reset on windows
break
except AttributeError:
# We're not on windows
pass
log.exception(exc)
except Exception as exc:
log.exception(exc)
class TestDaemon(object):
'''
Set up the master and minion daemons, and run related cases
'''
MINIONS_CONNECT_TIMEOUT = MINIONS_SYNC_TIMEOUT = 300
def __init__(self, parser):
self.parser = parser
self.colors = salt.utils.color.get_colors(self.parser.options.no_colors is False)
if salt.utils.platform.is_windows():
# There's no shell color support on windows...
for key in self.colors:
self.colors[key] = ''
def __enter__(self):
'''
Start a master and minion
'''
# Setup the multiprocessing logging queue listener
salt_log_setup.setup_multiprocessing_logging_listener(
self.master_opts
)
# Set up PATH to mockbin
self._enter_mockbin()
if self.parser.options.transport == 'zeromq':
self.start_zeromq_daemons()
elif self.parser.options.transport == 'tcp':
self.start_tcp_daemons()
self.minion_targets = set(['minion', 'sub_minion'])
self.pre_setup_minions()
self.setup_minions()
if getattr(self.parser.options, 'ssh', False):
self.prep_ssh()
self.wait_for_minions(time.time(), self.MINIONS_CONNECT_TIMEOUT)
if self.parser.options.sysinfo:
try:
print_header(
'~~~~~~~ Versions Report ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Versions Report ', inline=True)
print('\n'.join(salt.version.versions_report()))
try:
print_header(
'~~~~~~~ Minion Grains Information ', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('~~~~~~~ Minion Grains Information ', inline=True)
grains = self.client.cmd('minion', 'grains.items')
minion_opts = self.minion_opts.copy()
minion_opts['color'] = self.parser.options.no_colors is False
salt.output.display_output(grains, 'grains', minion_opts)
try:
print_header(
'=', sep='=', inline=True,
width=getattr(self.parser.options, 'output_columns', PNUM)
)
except TypeError:
print_header('', sep='=', inline=True)
try:
return self
finally:
self.post_setup_minions()
def start_zeromq_daemons(self):
'''
Fire up the daemons used for zeromq tests
'''
self.log_server = ThreadedSocketServer(('localhost', SALT_LOG_PORT), SocketServerRequestHandler)
self.log_server_process = threading.Thread(target=self.log_server.serve_forever)
self.log_server_process.daemon = True
self.log_server_process.start()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.master_process = start_daemon(
daemon_name='salt-master',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-master/{}'.format(self.master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.minion_process = start_daemon(
daemon_name='salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-minion/{}'.format(self.minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting sub salt-minion ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.sub_minion_process = start_daemon(
daemon_name='sub salt-minion',
daemon_id=self.master_opts['id'],
daemon_log_prefix='sub-salt-minion/{}'.format(self.sub_minion_opts['id']),
daemon_cli_script_name='minion',
daemon_config=self.sub_minion_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR,
daemon_class=SaltMinion,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting sub salt-minion ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting sub salt-minion ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting syndic salt-master ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.smaster_process = start_daemon(
daemon_name='salt-smaster',
daemon_id=self.syndic_master_opts['id'],
daemon_log_prefix='salt-smaster/{}'.format(self.syndic_master_opts['id']),
daemon_cli_script_name='master',
daemon_config=self.syndic_master_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
daemon_class=SaltMaster,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting syndic salt-master ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting syndic salt-master ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-syndic ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.syndic_process = start_daemon(
daemon_name='salt-syndic',
daemon_id=self.syndic_opts['id'],
daemon_log_prefix='salt-syndic/{}'.format(self.syndic_opts['id']),
daemon_cli_script_name='syndic',
daemon_config=self.syndic_opts,
daemon_config_dir=RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR,
daemon_class=SaltSyndic,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
event_listener_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-syndic ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-syndic ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
if self.parser.options.proxy:
try:
sys.stdout.write(
' * {LIGHT_YELLOW}Starting salt-proxy ... {ENDC}'.format(**self.colors)
)
sys.stdout.flush()
self.proxy_process = start_daemon(
daemon_name='salt-proxy',
daemon_id=self.master_opts['id'],
daemon_log_prefix='salt-proxy/{}'.format(self.proxy_opts['id']),
daemon_cli_script_name='proxy',
daemon_config=self.proxy_opts,
daemon_config_dir=RUNTIME_VARS.TMP_CONF_DIR,
daemon_class=SaltProxy,
bin_dir_path=SCRIPT_DIR,
fail_hard=True,
start_timeout=60)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_GREEN}Starting salt-proxy ... STARTED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
except (RuntimeWarning, RuntimeError):
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
sys.stdout.write(
' * {LIGHT_RED}Starting salt-proxy ... FAILED!\n{ENDC}'.format(**self.colors)
)
sys.stdout.flush()
start_tcp_daemons = start_zeromq_daemons
def prep_ssh(self):
'''
Generate keys and start an ssh daemon on an alternate port
'''
sys.stdout.write(
' * {LIGHT_GREEN}Starting {0} ... {ENDC}'.format(
'SSH server',
**self.colors
)
)
keygen = salt.utils.path.which('ssh-keygen')
sshd = salt.utils.path.which('sshd')
if not (keygen and sshd):
print('WARNING: Could not initialize SSH subsystem. Tests for salt-ssh may break!')
return
if not os.path.exists(RUNTIME_VARS.TMP_CONF_DIR):
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
# Generate client key
pub_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
priv_key_test_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test')
if os.path.exists(pub_key_test_file):
os.remove(pub_key_test_file)
if os.path.exists(priv_key_test_file):
os.remove(priv_key_test_file)
keygen_process = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'key_test',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, keygen_err = keygen_process.communicate()
if keygen_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_err)))
sshd_config_path = os.path.join(FILES, 'conf/_ssh/sshd_config')
shutil.copy(sshd_config_path, RUNTIME_VARS.TMP_CONF_DIR)
auth_key_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'key_test.pub')
# Generate server key
server_key_dir = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'server')
if not os.path.exists(server_key_dir):
os.makedirs(server_key_dir)
server_dsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key')
server_dsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_dsa_key.pub')
server_ecdsa_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key')
server_ecdsa_pub_key_file = os.path.join(server_key_dir, 'ssh_host_ecdsa_key.pub')
server_ed25519_priv_key_file = os.path.join(server_key_dir, 'ssh_host_ed25519_key')
server_ed25519_pub_key_file = os.path.join(server_key_dir, 'ssh_host.ed25519_key.pub')
for server_key_file in (server_dsa_priv_key_file,
server_dsa_pub_key_file,
server_ecdsa_priv_key_file,
server_ecdsa_pub_key_file,
server_ed25519_priv_key_file,
server_ed25519_pub_key_file):
if os.path.exists(server_key_file):
os.remove(server_key_file)
keygen_process_dsa = subprocess.Popen(
[keygen, '-t',
'dsa',
'-b',
'1024',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_dsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_dsa_err = keygen_process_dsa.communicate()
if keygen_dsa_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_dsa_err)))
keygen_process_ecdsa = subprocess.Popen(
[keygen, '-t',
'ecdsa',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ecdsa_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_escda_err = keygen_process_ecdsa.communicate()
if keygen_escda_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_escda_err)))
keygen_process_ed25519 = subprocess.Popen(
[keygen, '-t',
'ed25519',
'-b',
'521',
'-C',
'"$(whoami)@$(hostname)-$(date -I)"',
'-f',
'ssh_host_ed25519_key',
'-P',
''],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=server_key_dir
)
_, keygen_ed25519_err = keygen_process_ed25519.communicate()
if keygen_ed25519_err:
print('ssh-keygen had errors: {0}'.format(salt.utils.stringutils.to_str(keygen_ed25519_err)))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd_config'), 'a') as ssh_config:
ssh_config.write('AuthorizedKeysFile {0}\n'.format(auth_key_file))
if not keygen_dsa_err:
ssh_config.write('HostKey {0}\n'.format(server_dsa_priv_key_file))
if not keygen_escda_err:
ssh_config.write('HostKey {0}\n'.format(server_ecdsa_priv_key_file))
if not keygen_ed25519_err:
ssh_config.write('HostKey {0}\n'.format(server_ed25519_priv_key_file))
self.sshd_pidfile = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'sshd.pid')
self.sshd_process = subprocess.Popen(
[sshd, '-f', 'sshd_config', '-oPidFile={0}'.format(self.sshd_pidfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True,
cwd=RUNTIME_VARS.TMP_CONF_DIR
)
_, sshd_err = self.sshd_process.communicate()
if sshd_err:
print('sshd had errors on startup: {0}'.format(salt.utils.stringutils.to_str(sshd_err)))
else:
os.environ['SSH_DAEMON_RUNNING'] = 'True'
roster_path = os.path.join(FILES, 'conf/_ssh/roster')
syndic_roster_path = os.path.join(FILES, 'conf/_ssh/syndic_roster')
shutil.copy(roster_path, RUNTIME_VARS.TMP_CONF_DIR)
shutil.copy(syndic_roster_path, os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'roster'))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'roster'), 'a') as roster:
roster.write(' user: {0}\n'.format(RUNTIME_VARS.RUNNING_TESTS_USER))
roster.write(' priv: {0}/{1}'.format(RUNTIME_VARS.TMP_CONF_DIR, 'key_test'))
sys.stdout.write(
' {LIGHT_GREEN}STARTED!\n{ENDC}'.format(
**self.colors
)
)
@classmethod
def config(cls, role):
'''
Return a configuration for a master/minion/syndic.
Currently these roles are:
* master
* minion
* syndic
* syndic_master
* sub_minion
* proxy
'''
return RUNTIME_VARS.RUNTIME_CONFIGS[role]
@classmethod
def config_location(cls):
return RUNTIME_VARS.TMP_CONF_DIR
@property
def client(self):
'''
Return a local client which will be used for example to ping and sync
the test minions.
This client is defined as a class attribute because its creation needs
to be deferred to a latter stage. If created it on `__enter__` like it
previously was, it would not receive the master events.
'''
if 'runtime_client' not in RUNTIME_VARS.RUNTIME_CONFIGS:
RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client'] = salt.client.get_local_client(
mopts=self.master_opts
)
return RUNTIME_VARS.RUNTIME_CONFIGS['runtime_client']
@classmethod
def transplant_configs(cls, transport='zeromq'):
if os.path.isdir(RUNTIME_VARS.TMP):
shutil.rmtree(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP)
os.makedirs(RUNTIME_VARS.TMP_ROOT_DIR)
os.makedirs(RUNTIME_VARS.TMP_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR)
os.makedirs(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR)
print(' * Transplanting configuration files to \'{0}\''.format(RUNTIME_VARS.TMP_CONF_DIR))
tests_known_hosts_file = os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'salt_ssh_known_hosts')
with salt.utils.files.fopen(tests_known_hosts_file, 'w') as known_hosts:
known_hosts.write('')
# This master connects to syndic_master via a syndic
master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'master'))
master_opts['known_hosts_file'] = tests_known_hosts_file
master_opts['cachedir'] = 'cache'
master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
master_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
master_opts['pki_dir'] = 'pki'
master_opts['syndic_master'] = 'localhost'
pytest_stop_sending_events_file = os.path.join(TMP_ROOT_DIR, 'pytest_stop_sending_events_file_master')
with salt.utils.files.fopen(pytest_stop_sending_events_file, 'w') as wfh:
wfh.write('')
master_opts['pytest_stop_sending_events_file'] = pytest_stop_sending_events_file
# This minion connects to master
minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'minion'))
minion_opts['cachedir'] = 'cache'
minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
minion_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
minion_opts['pki_dir'] = 'pki'
minion_opts['hosts.file'] = os.path.join(TMP_ROOT_DIR, 'hosts')
minion_opts['aliases.file'] = os.path.join(TMP_ROOT_DIR, 'aliases')
# This sub_minion also connects to master
sub_minion_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'sub_minion'))
sub_minion_opts['cachedir'] = 'cache'
sub_minion_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
sub_minion_opts['root_dir'] = os.path.join(TMP, 'rootdir-sub-minion')
sub_minion_opts['pki_dir'] = 'pki'
sub_minion_opts['hosts.file'] = os.path.join(TMP_ROOT_DIR, 'hosts')
sub_minion_opts['aliases.file'] = os.path.join(TMP_ROOT_DIR, 'aliases')
# This is the master of masters
syndic_master_opts = salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic_master'))
syndic_master_opts['cachedir'] = 'cache'
syndic_master_opts['user'] = RUNTIME_VARS.RUNNING_TESTS_USER
syndic_master_opts['root_dir'] = os.path.join(TMP, 'rootdir-syndic-master')
syndic_master_opts['pki_dir'] = 'pki'
pytest_stop_sending_events_file = os.path.join(TMP_ROOT_DIR, 'pytest_stop_sending_events_file_syndic_master')
with salt.utils.files.fopen(pytest_stop_sending_events_file, 'w') as wfh:
wfh.write('')
syndic_master_opts['pytest_stop_sending_events_file'] = pytest_stop_sending_events_file
# This is the syndic for master
# Let's start with a copy of the syndic master configuration
syndic_opts = copy.deepcopy(syndic_master_opts)
# Let's update with the syndic configuration
syndic_opts.update(salt.config._read_conf_file(os.path.join(RUNTIME_VARS.CONF_DIR, 'syndic')))
syndic_opts['cachedir'] = 'cache'
syndic_opts['root_dir'] = os.path.join(TMP_ROOT_DIR)
# This proxy connects to master
proxy_opts = salt.config._read_conf_file(os.path.join(CONF_DIR, 'proxy'))
proxy_opts['cachedir'] = 'cache'
# proxy_opts['user'] = running_tests_user
proxy_opts['root_dir'] = os.path.join(TMP, 'rootdir-proxy')
proxy_opts['pki_dir'] = 'pki'
proxy_opts['hosts.file'] = os.path.join(TMP, 'rootdir-proxy', 'hosts')
proxy_opts['aliases.file'] = os.path.join(TMP, 'rootdir-proxy', 'aliases')
if transport == 'tcp':
master_opts['transport'] = 'tcp'
minion_opts['transport'] = 'tcp'
sub_minion_opts['transport'] = 'tcp'
syndic_master_opts['transport'] = 'tcp'
proxy_opts['transport'] = 'tcp'
# Set up config options that require internal data
master_opts['pillar_roots'] = syndic_master_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
minion_opts['pillar_roots'] = {
'base': [
RUNTIME_VARS.TMP_PILLAR_TREE,
os.path.join(FILES, 'pillar', 'base'),
]
}
master_opts['file_roots'] = syndic_master_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
minion_opts['file_roots'] = {
'base': [
os.path.join(FILES, 'file', 'base'),
# Let's support runtime created files that can be used like:
# salt://my-temp-file.txt
RUNTIME_VARS.TMP_STATE_TREE
],
# Alternate root to test __env__ choices
'prod': [
os.path.join(FILES, 'file', 'prod'),
RUNTIME_VARS.TMP_PRODENV_STATE_TREE
]
}
master_opts.setdefault('reactor', []).append(
{
'salt/minion/*/start': [
os.path.join(FILES, 'reactor-sync-minion.sls')
],
}
)
master_opts.setdefault('reactor', []).append(
{
'salt/test/reactor': [
os.path.join(FILES, 'reactor-test.sls')
],
}
)
for opts_dict in (master_opts, syndic_master_opts):
if 'ext_pillar' not in opts_dict:
opts_dict['ext_pillar'] = []
if salt.utils.platform.is_windows():
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'type {0}'.format(os.path.join(FILES, 'ext.yaml'))})
else:
opts_dict['ext_pillar'].append(
{'cmd_yaml': 'cat {0}'.format(os.path.join(FILES, 'ext.yaml'))})
# all read, only owner write
autosign_file_permissions = stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IWUSR
for opts_dict in (master_opts, syndic_master_opts):
# We need to copy the extension modules into the new master root_dir or
# it will be prefixed by it
new_extension_modules_path = os.path.join(opts_dict['root_dir'], 'extension_modules')
if not os.path.exists(new_extension_modules_path):
shutil.copytree(
os.path.join(
INTEGRATION_TEST_DIR, 'files', 'extension_modules'
),
new_extension_modules_path
)
opts_dict['extension_modules'] = os.path.join(opts_dict['root_dir'], 'extension_modules')
# Copy the autosign_file to the new master root_dir
new_autosign_file_path = os.path.join(opts_dict['root_dir'], 'autosign_file')
shutil.copyfile(
os.path.join(INTEGRATION_TEST_DIR, 'files', 'autosign_file'),
new_autosign_file_path
)
os.chmod(new_autosign_file_path, autosign_file_permissions)
# Point the config values to the correct temporary paths
for name in ('hosts', 'aliases'):
optname = '{0}.file'.format(name)
optname_path = os.path.join(TMP, name)
master_opts[optname] = optname_path
minion_opts[optname] = optname_path
sub_minion_opts[optname] = optname_path
syndic_opts[optname] = optname_path
syndic_master_opts[optname] = optname_path
proxy_opts[optname] = optname_path
master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
sub_minion_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_opts['runtests_conn_check_port'] = get_unused_localhost_port()
syndic_master_opts['runtests_conn_check_port'] = get_unused_localhost_port()
proxy_opts['runtests_conn_check_port'] = get_unused_localhost_port()
for conf in (master_opts, minion_opts, sub_minion_opts, syndic_opts, syndic_master_opts, proxy_opts):
if 'engines' not in conf:
conf['engines'] = []
conf['engines'].append({'salt_runtests': {}})
if 'engines_dirs' not in conf:
conf['engines_dirs'] = []
conf['engines_dirs'].insert(0, ENGINES_DIR)
if 'log_handlers_dirs' not in conf:
conf['log_handlers_dirs'] = []
conf['log_handlers_dirs'].insert(0, LOG_HANDLERS_DIR)
conf['runtests_log_port'] = SALT_LOG_PORT
# ----- Transcribe Configuration ---------------------------------------------------------------------------->
for entry in os.listdir(RUNTIME_VARS.CONF_DIR):
if entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
# These have runtime computed values and will be handled
# differently
continue
entry_path = os.path.join(RUNTIME_VARS.CONF_DIR, entry)
if os.path.isfile(entry_path):
shutil.copy(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
elif os.path.isdir(entry_path):
shutil.copytree(
entry_path,
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry)
)
for entry in ('master', 'minion', 'sub_minion', 'syndic', 'syndic_master', 'proxy'):
computed_config = copy.deepcopy(locals()['{0}_opts'.format(entry)])
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, entry), 'w') as fp_:
salt.utils.yaml.safe_dump(computed_config, fp_, default_flow_style=False)
sub_minion_computed_config = copy.deepcopy(sub_minion_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(sub_minion_computed_config, wfh, default_flow_style=False)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'master'))
syndic_master_computed_config = copy.deepcopy(syndic_master_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'), 'w') as wfh:
salt.utils.yaml.safe_dump(syndic_master_computed_config, wfh, default_flow_style=False)
syndic_computed_config = copy.deepcopy(syndic_opts)
with salt.utils.files.fopen(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'), 'w') as wfh:
salt.utils.yaml.safe_dump(syndic_computed_config, wfh, default_flow_style=False)
shutil.copyfile(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'), os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'))
# <---- Transcribe Configuration -----------------------------------------------------------------------------
# ----- Verify Environment ---------------------------------------------------------------------------------->
master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'master'))
minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'minion'))
syndic_opts = salt.config.syndic_config(
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'master'),
os.path.join(RUNTIME_VARS.TMP_SYNDIC_MINION_CONF_DIR, 'minion'),
)
sub_minion_opts = salt.config.minion_config(os.path.join(RUNTIME_VARS.TMP_SUB_MINION_CONF_DIR, 'minion'))
syndic_master_opts = salt.config.master_config(os.path.join(RUNTIME_VARS.TMP_SYNDIC_MASTER_CONF_DIR, 'master'))
proxy_opts = salt.config.proxy_config(os.path.join(RUNTIME_VARS.TMP_CONF_DIR, 'proxy'))
RUNTIME_VARS.RUNTIME_CONFIGS['master'] = freeze(master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['minion'] = freeze(minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic'] = freeze(syndic_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['sub_minion'] = freeze(sub_minion_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['syndic_master'] = freeze(syndic_master_opts)
RUNTIME_VARS.RUNTIME_CONFIGS['proxy'] = freeze(proxy_opts)
verify_env([os.path.join(master_opts['pki_dir'], 'minions'),
os.path.join(master_opts['pki_dir'], 'minions_pre'),
os.path.join(master_opts['pki_dir'], 'minions_rejected'),
os.path.join(master_opts['pki_dir'], 'minions_denied'),
os.path.join(master_opts['cachedir'], 'jobs'),
os.path.join(master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(syndic_master_opts['pki_dir'], 'minions'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_pre'),
os.path.join(syndic_master_opts['pki_dir'], 'minions_rejected'),
os.path.join(syndic_master_opts['cachedir'], 'jobs'),
os.path.join(syndic_master_opts['root_dir'], 'cache', 'tokens'),
os.path.join(master_opts['pki_dir'], 'accepted'),
os.path.join(master_opts['pki_dir'], 'rejected'),
os.path.join(master_opts['pki_dir'], 'pending'),
os.path.join(syndic_master_opts['pki_dir'], 'accepted'),
os.path.join(syndic_master_opts['pki_dir'], 'rejected'),
os.path.join(syndic_master_opts['pki_dir'], 'pending'),
os.path.join(minion_opts['pki_dir'], 'accepted'),
os.path.join(minion_opts['pki_dir'], 'rejected'),
os.path.join(minion_opts['pki_dir'], 'pending'),
os.path.join(sub_minion_opts['pki_dir'], 'accepted'),
os.path.join(sub_minion_opts['pki_dir'], 'rejected'),
os.path.join(sub_minion_opts['pki_dir'], 'pending'),
os.path.dirname(master_opts['log_file']),
minion_opts['extension_modules'],
sub_minion_opts['extension_modules'],
sub_minion_opts['pki_dir'],
master_opts['sock_dir'],
syndic_master_opts['sock_dir'],
sub_minion_opts['sock_dir'],
minion_opts['sock_dir'],
RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE,
RUNTIME_VARS.TMP_PRODENV_STATE_TREE,
TMP,
],
RUNTIME_VARS.RUNNING_TESTS_USER,
root_dir=master_opts['root_dir'],
)
cls.master_opts = master_opts
cls.minion_opts = minion_opts
# cls.proxy_opts = proxy_opts
cls.sub_minion_opts = sub_minion_opts
cls.syndic_opts = syndic_opts
cls.syndic_master_opts = syndic_master_opts
cls.proxy_opts = proxy_opts
# <---- Verify Environment -----------------------------------------------------------------------------------
def __exit__(self, type, value, traceback):
'''
Kill the minion and master processes
'''
self.sub_minion_process.terminate()
self.minion_process.terminate()
if hasattr(self, 'proxy_process'):
self.proxy_process.terminate()
self.master_process.terminate()
try:
self.syndic_process.terminate()
except AttributeError:
pass
try:
self.smaster_process.terminate()
except AttributeError:
pass
#salt.utils.process.clean_proc(self.sub_minion_process, wait_for_kill=50)
#self.sub_minion_process.join()
#salt.utils.process.clean_proc(self.minion_process, wait_for_kill=50)
#self.minion_process.join()
#salt.utils.process.clean_proc(self.master_process, wait_for_kill=50)
#self.master_process.join()
#try:
# salt.utils.process.clean_proc(self.syndic_process, wait_for_kill=50)
# self.syndic_process.join()
#except AttributeError:
# pass
#try:
# salt.utils.process.clean_proc(self.smaster_process, wait_for_kill=50)
# self.smaster_process.join()
#except AttributeError:
# pass
self.log_server.server_close()
self.log_server.shutdown()
self._exit_mockbin()
self._exit_ssh()
self.log_server_process.join()
# Shutdown the multiprocessing logging queue listener
salt_log_setup.shutdown_multiprocessing_logging()
salt_log_setup.shutdown_multiprocessing_logging_listener(daemonizing=True)
def pre_setup_minions(self):
'''
Subclass this method for additional minion setups.
'''
def setup_minions(self):
'''
Minions setup routines
'''
def post_setup_minions(self):
'''
Subclass this method to execute code after the minions have been setup
'''
def _enter_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
if MOCKBIN not in path_items:
path_items.insert(0, MOCKBIN)
os.environ['PATH'] = os.pathsep.join(path_items)
def _exit_ssh(self):
if hasattr(self, 'sshd_process'):
try:
self.sshd_process.kill()
except OSError as exc:
if exc.errno != 3:
raise
with salt.utils.files.fopen(self.sshd_pidfile) as fhr:
try:
os.kill(int(fhr.read()), signal.SIGKILL)
except OSError as exc:
if exc.errno != 3:
raise
def _exit_mockbin(self):
path = os.environ.get('PATH', '')
path_items = path.split(os.pathsep)
try:
path_items.remove(MOCKBIN)
except ValueError:
pass
os.environ['PATH'] = os.pathsep.join(path_items)
@classmethod
def clean(cls):
'''
Clean out the tmp files
'''
def remove_readonly(func, path, excinfo):
# Give full permissions to owner
os.chmod(path, stat.S_IRWXU)
func(path)
for dirname in (TMP, RUNTIME_VARS.TMP_STATE_TREE,
RUNTIME_VARS.TMP_PILLAR_TREE, RUNTIME_VARS.TMP_PRODENV_STATE_TREE):
if os.path.isdir(dirname):
try:
shutil.rmtree(six.text_type(dirname), onerror=remove_readonly)
except Exception:
log.exception('Failed to remove directory: %s', dirname)
def wait_for_jid(self, targets, jid, timeout=120):
time.sleep(1) # Allow some time for minions to accept jobs
now = datetime.now()
expire = now + timedelta(seconds=timeout)
job_finished = False
while now <= expire:
running = self.__client_job_running(targets, jid)
sys.stdout.write(
'\r{0}\r'.format(
' ' * getattr(self.parser.options, 'output_columns', PNUM)
)
)
if not running and job_finished is False:
# Let's not have false positives and wait one more seconds
job_finished = True
elif not running and job_finished is True:
return True
elif running and job_finished is True:
job_finished = False
if job_finished is False:
sys.stdout.write(
' * {LIGHT_YELLOW}[Quit in {0}]{ENDC} Waiting for {1}'.format(
'{0}'.format(expire - now).rsplit('.', 1)[0],
', '.join(running),
**self.colors
)
)
sys.stdout.flush()
time.sleep(1)
now = datetime.now()
else: # pylint: disable=W0120
sys.stdout.write(
'\n {LIGHT_RED}*{ENDC} ERROR: Failed to get information '
'back\n'.format(**self.colors)
)
sys.stdout.flush()
return False
def __client_job_running(self, targets, jid):
running = self.client.cmd(
list(targets), 'saltutil.running', tgt_type='list'
)
return [
k for (k, v) in six.iteritems(running) if v and v[0]['jid'] == jid
]
def sync_minion_modules_(self, modules_kind, targets, timeout=None):
if not timeout:
timeout = 120
# Let's sync all connected minions
print(
' {LIGHT_BLUE}*{ENDC} Syncing minion\'s {1} '
'(saltutil.sync_{1})'.format(
', '.join(targets),
modules_kind,
**self.colors
)
)
syncing = set(targets)
jid_info = self.client.run_job(
list(targets), 'saltutil.sync_{0}'.format(modules_kind),
tgt_type='list',
timeout=999999999999999,
)
if self.wait_for_jid(targets, jid_info['jid'], timeout) is False:
print(
' {LIGHT_RED}*{ENDC} WARNING: Minions failed to sync {0}. '
'Tests requiring these {0} WILL fail'.format(
modules_kind, **self.colors)
)
raise SystemExit()
while syncing:
rdata = self.client.get_full_returns(jid_info['jid'], syncing, 1)
if rdata:
for name, output in six.iteritems(rdata):
if not output['ret']:
# Already synced!?
syncing.remove(name)
continue
if isinstance(output['ret'], six.string_types):
# An errors has occurred
print(
' {LIGHT_RED}*{ENDC} {0} Failed to sync {2}: '
'{1}'.format(
name, output['ret'],
modules_kind,
**self.colors)
)
return False
print(
' {LIGHT_GREEN}*{ENDC} Synced {0} {2}: '
'{1}'.format(
name,
', '.join(output['ret']),
modules_kind, **self.colors
)
)
# Synced!
try:
syncing.remove(name)
except KeyError:
print(
' {LIGHT_RED}*{ENDC} {0} already synced??? '
'{1}'.format(name, output, **self.colors)
)
return True
def sync_minion_states(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionStates')
self.sync_minion_modules_('states', targets, timeout=timeout)
def sync_minion_modules(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionModules')
self.sync_minion_modules_('modules', targets, timeout=timeout)
def sync_minion_grains(self, targets, timeout=None):
salt.utils.process.appendproctitle('SyncMinionGrains')
self.sync_minion_modules_('grains', targets, timeout=timeout)
def wait_for_minions(self, start, timeout, sleep=5):
'''
Ensure all minions and masters (including sub-masters) are connected.
'''
while True:
try:
ret = self.client.run_job('*', 'test.ping')
except salt.exceptions.SaltClientError:
ret = None
if ret and 'minions' not in ret:
continue
if ret and sorted(ret['minions']) == ['minion', 'sub_minion']:
break
if time.time() - start >= timeout:
raise RuntimeError("Ping Minions Failed")
time.sleep(sleep)
|
online_drain.py
|
#!/usr/bin/python
"""
(C) Copyright 2020-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
"""
import time
import random
import threading
from write_host_file import write_host_file
from osa_utils import OSAUtils
from daos_utils import DaosCommand
from apricot import skipForTicket
class OSAOnlineDrain(OSAUtils):
# pylint: disable=too-many-ancestors
"""
Test Class Description: This test runs
daos_server Online Drain test cases.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super().setUp()
self.dmg_command = self.get_dmg_command()
self.daos_command = DaosCommand(self.bin)
self.ior_test_sequence = self.params.get(
"ior_test_sequence", '/run/ior/iorflags/*')
self.test_oclass = self.params.get("oclass", '/run/test_obj_class/*')
# Recreate the client hostfile without slots defined
self.hostfile_clients = write_host_file(
self.hostlist_clients, self.workdir, None)
self.pool = None
self.dmg_command.exit_status_exception = True
def run_online_drain_test(self, num_pool, oclass=None, app_name="ior"):
"""Run the Online drain without data.
Args:
num_pool(int) : total pools to create for testing purposes.
oclass(str) : Object class type (RP_2G1, etc)
app_name(str) : application to run on parallel (ior or mdtest)
Defaults to ior.
"""
# Create a pool
self.pool = []
target_list = []
if oclass is None:
oclass = self.ior_cmd.dfs_oclass.value
test_seq = self.ior_test_sequence[0]
drain_servers = (len(self.hostlist_servers) * 2) - 1
# Exclude target : random two targets (target idx : 0-7)
n = random.randint(0, 6) #nosec
target_list.append(n)
target_list.append(n+1)
t_string = "{},{}".format(target_list[0], target_list[1])
# Drain one of the ranks (or server)
rank = random.randint(1, drain_servers) #nosec
for val in range(0, num_pool):
self.pool.append(self.get_pool())
self.pool[-1].set_property("reclaim", "disabled")
# Drain the rank and targets
for val in range(0, num_pool):
threads = []
# Instantiate aggregation
if self.test_during_aggregation is True:
for _ in range(0, 2):
self.run_ior_thread("Write", oclass, test_seq)
self.delete_extra_container(self.pool[val])
# The following thread runs while performing osa operations.
if app_name == "ior":
threads.append(threading.Thread(target=self.run_ior_thread,
kwargs={"action": "Write",
"oclass": oclass,
"test": test_seq}))
else:
threads.append(threading.Thread(target=self.run_mdtest_thread))
# Launch the IOR threads
for thrd in threads:
self.log.info("Thread : %s", thrd)
thrd.start()
time.sleep(1)
# Wait the threads to write some data before drain.
time.sleep(5)
self.pool[val].display_pool_daos_space("Pool space: Beginning")
pver_begin = self.get_pool_version()
self.log.info("Pool Version at the beginning %s", pver_begin)
output = self.dmg_command.pool_drain(
self.pool[val].uuid, rank, t_string)
self.print_and_assert_on_rebuild_failure(output)
pver_drain = self.get_pool_version()
self.log.info("Pool Version after drain %s", pver_drain)
# Check pool version incremented after pool exclude
self.assertTrue(pver_drain > pver_begin,
"Pool Version Error: After drain")
# Wait to finish the threads
for thrd in threads:
thrd.join()
if not self.out_queue.empty():
self.assert_on_exception()
for val in range(0, num_pool):
display_string = "Pool{} space at the End".format(val)
self.pool[val].display_pool_daos_space(display_string)
self.run_ior_thread("Read", oclass, test_seq)
self.container = self.pool_cont_dict[self.pool[val]][0]
kwargs = {"pool": self.pool[val].uuid,
"cont": self.container.uuid}
output = self.daos_command.container_check(**kwargs)
self.log.info(output)
@skipForTicket("DAOS-7289")
def test_osa_online_drain(self):
"""Test ID: DAOS-4750
Test Description: Validate Online drain with checksum
enabled.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_with_csum
"""
self.log.info("Online Drain : With Checksum")
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_no_csum(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain without enabling
checksum.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa
:avocado: tags=osa_drain,online_drain,online_drain_without_csum
"""
self.log.info("Online Drain : No Checksum")
self.test_with_checksum = self.params.get("test_with_checksum",
'/run/checksum/*')
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_oclass(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain with different
object class.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_oclass
"""
self.log.info("Online Drain : Oclass")
for oclass in self.test_oclass:
self.run_online_drain_test(1, oclass=oclass)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_with_aggregation(self):
"""Test ID: DAOS-6909
Test Description: Validate Online drain with different
object class.
:avocado: tags=all,pr,full_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_with_aggregation
"""
self.log.info("Online Drain : Aggregation")
self.test_during_aggregation = self.params.get("test_with_aggregation",
'/run/aggregation/*')
self.run_online_drain_test(1)
@skipForTicket("DAOS-7289")
def test_osa_online_drain_mdtest(self):
"""Test ID: DAOS-4750
Test Description: Validate Online drain with mdtest
running during the testing.
:avocado: tags=all,pr,daily_regression
:avocado: tags=hw,medium,ib2
:avocado: tags=osa,checksum
:avocado: tags=osa_drain,online_drain,online_drain_mdtest
"""
self.log.info("Online Drain : With Mdtest")
self.run_online_drain_test(1, app_name="mdtest")
|
worker.py
|
from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Callable, Dict, Iterator, List, Optional, Tuple, Union
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.gcs_utils as gcs_utils
import ray._private.services as services
from ray._private.runtime_env import working_dir as working_dir_pkg
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
from ray.util.debug import log_once
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
import ray._private.profiling as profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import _internal_kv_get, \
_internal_kv_initialized
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return json.loads(self.core_worker.get_job_config()
.runtime_env.serialized_runtime_env)
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
pubsub_client = self.redis_client.pubsub(
ignore_subscribe_messages=True)
pubsub_client.subscribe(gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_binary = ray._private.utils.binary_to_hex(
self.current_job_id.binary())
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_binary != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
@PublicAPI
@client_mode_hook(auto_init=True)
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
if log_once("worker_get_gpu_ids_empty_from_driver"):
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook(auto_init=False)
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = logging.INFO,
logging_format: str = ray_constants.LOGGER_FORMAT,
log_to_driver: bool = True,
namespace: Optional[str] = None,
runtime_env: Dict[str, Any] = None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction: bool = False,
_redis_max_memory: Optional[int] = None,
_plasma_directory: Optional[str] = None,
_node_ip_address: str = ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory: Optional[int] = None,
_memory: Optional[int] = None,
_redis_password: str = ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir: Optional[str] = None,
_metrics_export_port: Optional[int] = None,
_system_config: Optional[Dict[str, str]] = None,
_tracing_startup_hook: Optional[Callable] = None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows (substituting
in the appropriate port if needed).
.. code-block:: python
ray.init(address="localhost:6379")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job (see
:ref:`runtime-environments` for details). This API is in beta
and may change before becoming stable.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if runtime_env:
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
if driver_mode == SCRIPT_MODE and job_config:
# Rewrite the URI. Note the package isn't uploaded to the URI until
# later in the connect.
working_dir_pkg.rewrite_runtime_env_uris(job_config)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook(auto_init=False)
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"cluster status. To disable these "
"messages, set RAY_SCHEDULER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(data: Dict[str, str], print_file: Any):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data.get("pid") in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data.get("actor_name"):
res = data["actor_name"] + " " + res
elif data.get("task_name"):
res = data["task_name"] + " " + res
return res
def color_for(data: Dict[str, str], line: str) -> str:
"""The color for this log line."""
if data.get("pid") == "raylet":
return colorama.Fore.YELLOW
elif data.get("pid") == "autoscaler":
if "Error:" in line or "Warning:" in line:
return colorama.Style.BRIGHT + colorama.Fore.YELLOW
else:
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data.get("pid") == "autoscaler":
pid = "scheduler +{}".format(time_string())
lines = filter_autoscaler_events(data.get("lines", []))
else:
pid = data.get("pid")
lines = data.get("lines", [])
if data.get("ip") == data.get("localhost"):
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM,
color_for(data,
line), prefix_for(data),
pid, colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(colorama.Style.DIM,
color_for(data, line),
prefix_for(data), pid,
data.get("ip"),
colorama.Style.RESET_ALL,
line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = gcs_utils.ErrorTableData.FromString(pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
@PublicAPI
@client_mode_hook(auto_init=False)
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
worker_shim_pid=0,
startup_token=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
startup_token (int): The startup token of the process assigned to
it during startup as a command line argument.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid, startup_token)
worker.gcs_client = worker.core_worker.get_gcs_client()
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environment will be prepared
# at the server side.
if mode == SCRIPT_MODE and not job_config.client_job:
manager = working_dir_pkg.WorkingDirManager(
worker.node.get_runtime_env_dir_path())
manager.upload_runtime_env_package_if_needed(job_config)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and len(
job_config.get_runtime_env_uris()) == 0:
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
if _internal_kv_get("tracing_startup_hook"):
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
_internal_kv_get("tracing_startup_hook").decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def get(object_refs: Union[ray.ObjectRef, List[ray.ObjectRef]],
*,
timeout: Optional[float] = None) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook(auto_init=True)
def put(value: Any, *,
_owner: Optional["ray.actor.ActorHandle"] = None) -> ray.ObjectRef:
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook(auto_init=True)
def wait(object_refs: List[ray.ObjectRef],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True
) -> Tuple[List[ray.ObjectRef], List[ray.ObjectRef]]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook(auto_init=True)
def get_actor(name: str,
namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook(auto_init=True)
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook(auto_init=True)
def cancel(object_ref: ray.ObjectRef,
*,
force: bool = False,
recursive: bool = True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
placement_group="default",
worker=None,
retry_exceptions=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, retry_exceptions,
runtime_env, placement_group)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_retries is not None:
raise TypeError("The keyword 'max_retries' is not "
"allowed for actors.")
if retry_exceptions is not None:
raise TypeError("The keyword 'retry_exceptions' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
accelerator_type, max_restarts,
max_task_retries, runtime_env)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation. This API is
in beta and may change before becoming stable.
retry_exceptions (bool): Only for *remote functions*. This specifies
whether application-level errors should be retried
up to max_retries times.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns", "num_cpus", "num_gpus", "memory", "object_store_memory",
"resources", "accelerator_type", "max_calls", "max_restarts",
"max_task_retries", "max_retries", "runtime_env", "retry_exceptions",
"placement_group"
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
placement_group = kwargs.get("placement_group", "default")
retry_exceptions = kwargs.get("retry_exceptions")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
placement_group=placement_group,
worker=worker,
retry_exceptions=retry_exceptions)
|
latex.py
|
"""
Compiling tex files and processing bibliography (bib) files.
"""
import subprocess
import sys
import threading
import time
import latex_suite.util
class Outcome:
"""
The outcome of compiling or processing latex and related files.
"""
UNKNOWN = -1
SUCCESS = 0
FAILURE = 1
ABORTED = 2
FILE_NOT_FOUND = 3
class ProcessRunnerWithOutput:
"""
Class to run a cmd line program on a positional parameter (e.g. a file) and collect the output.
"""
def __init__(self, command, parameter, verbose=False):
self._command = command
self._parameter = str(parameter)
self._verbose = verbose
self._process = subprocess.Popen(self._command + " " + self._parameter, shell=True,
stdout=subprocess.PIPE, stdin=subprocess.PIPE)
self._total_output = ""
@property
def output(self):
"""
:return:
The output written to stdout by the pdf compiler engine.
"""
return self._total_output
@property
def command(self):
"""
The command/process.
:return: str
The command/process.
"""
return self._command
@property
def parameter(self):
"""
The parameter of the command/process.
:return:
The parameter of the command/process.
"""
return self._parameter
def write_to_stdin(self, input_bytes):
"""
Write bytes to sdtin.
:param input_bytes: bytes
The input to write.
"""
self._process.stdin.write(input_bytes)
self._process.stdin.flush()
def write_newline(self):
"""
Write newline to stdin.
"""
self.write_to_stdin(b'\n')
def __iter__(self):
"""
:return:
An iterator over the bytes of the stdout from the compiler.
"""
self._stdout_iterator = iter(lambda: self._process.stdout.read(1), b'')
return self
def __next__(self):
"""
Get the next char from stdout until the process the finished or stdin gets closed.
The char is also added to the entire output (see :func:`~LatexBashCompile.output`).
:return: str
The next char.
"""
if self._process.stdin.closed:
raise StopIteration
byte_char = next(self._stdout_iterator)
char = byte_char.decode("utf-8")
self._total_output += char
if self._verbose:
sys.stdout.write(char)
return char
class BibBashProcessing(ProcessRunnerWithOutput):
"""
Process citations of a latex document.
"""
def __init__(self, engine, tex_file, verbose=False):
super().__init__(engine, BibBashProcessing.get_bib_parameter(engine, tex_file), verbose)
self._verbose = verbose
@staticmethod
def get_bib_parameter(engine, tex_file):
"""
Return the parameter of the bib engine to process citations.
:param engine: str
The engine.
:param tex_file: str
The name of the text file.
:return: str
The parameter for the bib engine.
"""
bib_filename = None
if engine == "biber":
bib_filename = latex_suite.util.filename_stem(tex_file)
elif engine == "bibtex":
bib_filename = latex_suite.util.filename_stem(tex_file) + ".aux"
return bib_filename
def run_processing(self):
"""
Process the citations.
:return: int
The outcome indicator.
"""
compile_outcome = Outcome.SUCCESS
for _ in iter(self):
pass
if self._verbose:
sys.stdout.write("\n")
sys.stdout.flush()
if "I couldn't open file name" in self._total_output:
compile_outcome = Outcome.FILE_NOT_FOUND
if "I found no \\bibstyle command" in self._total_output:
compile_outcome = Outcome.FAILURE
return compile_outcome
class LatexBashCompile(ProcessRunnerWithOutput):
"""
Class to use bash tex compile tools to translate pdfs.
"""
def __init__(self, engine, file, max_end_attempts, verbose=False):
"""
Constructor
:param engine:
The pdf compiler engine.
:param file:
The tex file to translate.
:param max_end_attempts:
The maximal number of attempts to progress past a compile error.
"""
super().__init__(engine, file, verbose)
self._max_end_attempts = max_end_attempts
self._verbose = verbose
self._warning_progressions = 0
self._error_progressions = 0
self.compile_outcome = None
self._end_attempts = 0
self._processed_chars = 0
@property
def num_warnings(self):
"""
:return: int
The number of question mark warnings during the pdf compile process.
"""
return self._warning_progressions
@property
def num_errors(self):
"""
:return: int
The number of asterisk errors during the pdf compile process.
"""
return self._error_progressions
def write_end_enter(self):
"""
Passes \\end and newline to stdin for passing past a latex error that requires entering an
\\end command.
"""
self.write_to_stdin(b'\\end\n')
def run_compile(self):
"""
Compile the tex file with the specified engine.
:return: LatexBashCompileResult
The compile result with the outcome, output and number of errors and warnings.
"""
start_of_new_line = True
potential_file_not_found_idx = 0
for char in iter(self):
self._processed_chars += 1
if start_of_new_line:
if char == "?":
self.write_newline()
self._warning_progressions += 1
elif char == "!":
potential_file_not_found_idx = 1
elif char == "*":
error_timeout_thread = threading.Thread(target=self.error_checking, args=(self._processed_chars,))
error_timeout_thread.start()
elif potential_file_not_found_idx > 0:
fnf_message = "! I can't find file"
if char == fnf_message[potential_file_not_found_idx]:
potential_file_not_found_idx += 1
else:
potential_file_not_found_idx = 0
if potential_file_not_found_idx == len(fnf_message):
self._process.stdin.close()
self.compile_outcome = Outcome.FILE_NOT_FOUND
start_of_new_line = False
if char == "\n":
start_of_new_line = True
if self._verbose:
sys.stdout.write("\n")
sys.stdout.flush()
if (self.compile_outcome != Outcome.ABORTED
and "Fatal error occurred, no output PDF file produced!" in self._total_output):
self.compile_outcome = Outcome.FAILURE
if self.compile_outcome is None:
self.compile_outcome = Outcome.SUCCESS
return self.compile_outcome
def error_checking(self, processed_chars):
"""
Processes a potential error in the tex processing.
Waits for half a second
to see if the error is not an error and if the compiling is still not processing
assumes that an error has happened and either tries to process passed the error
or if the max number of errors has happened terminates the compilation. The number of processed chars
is used as a measure if the compilation is still running.
:param processed_chars:
The number of processed chars a the time of the potential error.
"""
time.sleep(0.5)
if self._processed_chars == processed_chars:
if self._end_attempts >= self._max_end_attempts:
self._process.stdin.close()
self.compile_outcome = Outcome.ABORTED
else:
self.write_end_enter()
self._end_attempts += 1
self._error_progressions += 1
else:
pass # Do nothing since the processing has continued.
class TypesettingResult:
"""
The result of a complete typesetting process including one or several tex compilations and
the bib processing if performed.
"""
def __init__(self):
self._tex_compile_results = []
self._result_pos = -1
self._bib_processing_result = None
@property
def bib_processing_result(self):
"""
:return:
BibBashProcessingResult
The result of a bib processing.
"""
return self._bib_processing_result
@bib_processing_result.setter
def bib_processing_result(self, result):
"""
Set the result of a bib processing.
:param result: BibBashProcessingResult
The result.
"""
self._bib_processing_result = result
def add_compilation_result(self, compilation_result):
"""
Add the result of one tex compilation.
:param compilation_result: LatexBashCompileResult
The result information object.
"""
self._tex_compile_results.append(compilation_result)
self._result_pos = len(self) - 1
def __len__(self):
return len(self._tex_compile_results)
def __iter__(self):
self._result_pos = len(self) - 1
return self
def __next__(self):
if self._result_pos >= 0:
next_result = self._tex_compile_results[self._result_pos]
else:
raise StopIteration()
self._result_pos -= 1
return next_result
class LatexBashCompileResult:
"""
The results of a tex compilation.
"""
def __init__(self, filename, outcome, output, num_warnings, num_errors):
self.filename = filename
self.outcome = outcome
self.output = output
self.num_warnings = num_warnings
self.num_errors = num_errors
class BibBashProcessingResult:
"""
The results of a bib processing.
"""
def __init__(self, filename, outcome, output):
self.filename = filename
self.outcome = outcome
self.output = output
def make_pdf(file_to_translate, engine, bib_engine, max_end_attempts=1,
verbose=False, do_bib=False, do_single_run=False):
"""
Compile a tex file to a pdf.
Performs one or several steps of the translation of a tex file to a pdf file.
This can include single and several compilation runs as well as citation processing.
The minimal run is one translation of the tex file (do_bib=False, do_single_run=True) and the maximal
run is: tex translation, bib processing, tex translation, tex translation (do_bib=True, do_single_run=False).
:param file_to_translate:
The path of the tex file.
:param engine: str
The engine to use to translate the tex file.
:param bib_engine: str
The engine to use to process the citations.
:param max_end_attempts: int
The maximal number of attempts to try to pass past a latex translation error.
:param verbose: bool
If True print processing.
:param do_bib: bool
If True processes the citations with the citation engine.
:param do_single_run:
If True and do_bib is False one translation of the tex file.
If True and do_bib is True: tex translation, citation processing, tex translation.
:return: TypesettingResult
The compile result of all compiles with the outcome, output and number of errors and warnings.
If the bibliography was processed the bib processing information is included.
"""
overall_compile_result = TypesettingResult()
compile_result = compile_file(engine, file_to_translate, max_end_attempts=max_end_attempts, verbose=verbose)
overall_compile_result.add_compilation_result(compile_result)
if do_bib:
bib_process_result = compile_bib(bib_engine, file_to_translate, verbose=verbose)
overall_compile_result.bib_processing_result = bib_process_result
compile_result_after_bib = compile_file(engine, file_to_translate,
max_end_attempts=max_end_attempts, verbose=verbose)
overall_compile_result.add_compilation_result(compile_result_after_bib)
if not do_single_run:
compile_result_additional_run = compile_file(engine, file_to_translate,
max_end_attempts=max_end_attempts, verbose=verbose)
overall_compile_result.add_compilation_result(compile_result_additional_run)
return overall_compile_result
def compile_bib(engine, bib_parameter, verbose=False):
"""
Process citations.
:param engine: str
The engine to use to process the citations.
:param bib_parameter: str
The parameter for the bib engine to process the citations. This could be a file name or a file name without
the file extension.
:param verbose: bool
If True print processing.
:return: BibBashProcessingResult
The result of the citation processing.
"""
processor = BibBashProcessing(engine, bib_parameter, verbose)
outcome = processor.run_processing()
return BibBashProcessingResult(BibBashProcessing.get_bib_parameter(processor.command, processor.parameter),
outcome, processor.output)
def compile_file(engine, file, max_end_attempts=1, verbose=False):
"""
Compiles a tex file with the specified engine.
:param engine:
The pdf compiler engine.
:param file:
The tex file to translate.
:param max_end_attempts:
The maximal number of attempts to progress past a compile error.
:param verbose: bool
If true prints engines output.
:return: LatexBashCompileResult
The compile result with the outcome, output and number of errors and warnings.
"""
compiler = LatexBashCompile(engine, file, max_end_attempts, verbose)
outcome = compiler.run_compile()
return LatexBashCompileResult(file, outcome, compiler.output, compiler.num_warnings, compiler.num_errors)
def get_warnings(engine, file):
"""
Get all warnings of a tex translation.
:param engine:
The pdf compiler engine.
:param file:
The tex file to translate.
:return: list
All warnings that occurred during the compilation.
"""
compile_file(engine, file)
compile_result = compile_file(engine, file)
warnings = []
warning_indicators = ["multiply defined", "overfull", "undefined", "Warning: Reference", "Warning:"]
for one_line in compile_result.output.split("\n"):
if any(indicator in one_line for indicator in warning_indicators):
warnings.append(one_line)
return warnings
|
common.py
|
# Copyright (c) 2011 - 2017, Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""``common.py``
`Unittests common functionality`
"""
import threading
from xmlrpc.server import SimpleXMLRPCServer
from testlib.custom_exceptions import SwitchException
TCP_PORT = 9999
class FakeXMLRPCServer(object):
def __init__(self, port=TCP_PORT):
self.server = SimpleXMLRPCServer(("localhost", port))
self.server.register_function(self.applications_gettable,
'nb.Applications.getTable')
self.server.register_function(self.applications_set_loglevel,
'nb.Applications.set.logLevel')
self.server.register_function(self.applications_get_size, 'nb.Applications.size')
self.server.register_function(self.applications_find, 'nb.Applications.find')
self.server.register_function(self.applications_exists, 'nb.Applications.exists')
self.server.register_function(self.system_tables_ready, 'system.tablesReady')
self.server.register_function(self.platform_get_row, 'nb.Platform.getRow')
# self.server.register_function(self.platform_get_table, 'nb.Platform.getTable')
self.server.register_function(self.platform_get_size, 'nb.Platform.size')
self.server.register_function(self.ports_get_name, 'nb.Ports.get.name')
self.server.register_function(self.ports_get_size, 'nb.Ports.size')
self.server.register_function(self.ports_get_info, 'nb.Ports.getInfo')
self.server.register_function(self.ports_get_info_name, 'nb.Ports.getInfo.name')
self.server.register_function(self.method_help, 'system.methodHelp')
self.server.register_function(self.ports_add_row, 'nb.Ports.addRow')
self.server.register_function(self.ports_del_row, 'nb.Ports.delRow')
self.server.register_function(self.system_multicall, 'system.multicall')
self.server.register_function(self.ports_lags_get_table,
'nb.Ports2LagAdmin.getTable')
self.server.register_function(self.ports_lags_get_size, 'nb.Ports2LagAdmin.size')
self.server.register_function(self.lags_get_table, 'nb.LagsAdmin.getTable')
self.server.register_function(self.lags_get_size, 'nb.LagsAdmin.size')
self.server.register_function(self.lags_add_row, 'nb.LagsAdmin.addRow')
self.server.register_function(self.ports_lag_add_row, 'nb.Ports2LagAdmin.addRow')
self.applications = [
{'name': 'ONSApplicationServer', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'SimSwitchApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'ONSCoreServer', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'ONSNorthboundServer', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L3DhcpRelayControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2MirrorControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2QosControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2StormControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2StatsControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'ONSOpenVSwitchApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L1SfpControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2VlanControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L1PortControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2QinqControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2FdbControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2AclControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L1SwitchControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2MulticastControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2LagControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L3ControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2LldpControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
{'name': 'L2StpControlApp', 'logLevel': 'test level',
'adminState': 'Run', 'appId': 1, 'operationalState': 'Run'},
]
self.platform = [{'ethernetSwitchType': 'SimSwitch Switch',
'name': 'ONS CoreSwitch',
'cpuArchitecture': 'x86_64',
'chipVersion': '2.0',
'chipSubType': 'simswitch',
'apiVersion': 'SimSwitch 2.0.0',
'switchppVersion': '1.2.0.1405-1',
'chipName': 'SimSwitch', 'osType':
'Linux', 'model': 'ONS', 'osVersion':
'3.2.0-61-generic',
'cpu': 'x86_64',
'serialNumber': ''}]
self.ports = [
{'portId': 1, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Up', 'speed': 10000, 'name': 'xe1'},
{'portId': 2, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Up', 'speed': 10000, 'name': 'xe2'},
{'portId': 3, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe3'},
{'portId': 4, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe4'},
{'portId': 5, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe5'},
{'portId': 6, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe6'},
{'portId': 7, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe7'},
{'portId': 8, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe8'},
{'portId': 9, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe9'},
{'portId': 10, 'adminMode': 'Up', 'pvid': 1, 'type': 'Physical',
'operationalStatus': 'Down', 'speed': 10000, 'name': 'xe10'}]
self.ports_info = {'primary_key': ['portId'],
'persistent': 'True',
'description':
'Ports table includes all type of ports in a single table.',
'columns': ['portId', 'adminMode', 'name',
'pvid', 'speed', 'operationalStatus', 'type'],
'mode': 'rw'}
self.ports_name_info = {'restrictions': {'size': '32'}, 'type': 'string',
'description': 'This ports name (a 32-byte string).',
'mode': 'ro'}
self.ports_get_row_help = 'Method for getting variable from table Ports'
self.error_multicall = False
self.lags = []
self.ports_to_lags = []
self.th = None
def start(self):
self.th = threading.Thread(target=self.server.serve_forever)
self.th.start()
def stop(self):
if self.th.is_alive():
self.server.shutdown()
self.server.server_close()
self.th.join()
def applications_gettable(self):
return self.applications
def applications_set_loglevel(self, app_id, loglevel):
if loglevel == 'error':
raise SwitchException("Error loglevel")
for row in self.applications:
if row['appId'] == app_id:
row['logLevel'] = loglevel
return 0
def applications_find(self, app_id, pid_id, app_name):
index = 0
for row in self.applications:
index += 1
if row['appId'] == app_id and row['name'] == app_name:
return index
return -1
def applications_get_size(self):
return len(self.applications)
def applications_exists(self, app_id, pid_id, app_name):
return self.applications_find(app_id, pid_id, app_name)
def system_tables_ready(self):
return 0
def platform_get_row(self, row):
row = row - 1
return self.platform[row]
def platform_get_table(self):
return self.platform
def platform_get_size(self):
return len(self.platform)
def ports_gettable(self):
return self.ports
def ports_get_name(self, row_id):
row_id = row_id - 1
return self.ports[row_id]['name']
def ports_get_size(self):
return len(self.ports)
def ports_get_info(self):
return self.ports_info
def ports_get_info_name(self):
return self.ports_name_info
def ports_add_row(self, *row):
port = {
'portId': row[0],
'adminMode': row[1],
'pvid': row[2],
'type': row[3],
'operationalStatus': row[4],
'speed': row[5],
'name': row[6],
}
self.ports.append(port)
return 0
def ports_del_row(self, row_id):
self.ports.remove(self.ports[row_id - 1])
return 0
def clear_config(self):
return 0
def method_help(self, method):
if method == 'nb.Ports.getRow':
return self.ports_get_row_help
raise SwitchException('Method %s does not exist' % (method, ))
def system_multicall(self, *calls):
res = []
for _ in calls[0]:
res.append(0)
if self.error_multicall:
return res[: -1]
return res
def ports_lags_get_table(self):
return self.ports_to_lags
def ports_lags_get_size(self):
return len(self.ports_to_lags)
def lags_get_table(self):
return self.lags
def lags_get_size(self):
return len(self.lags)
def lags_add_row(self, *row):
lag = {
'lagId': row[0],
'name': row[1],
'lagControlType': row[3],
'actorAdminLagKey': row[2],
'hashMode': row[4],
}
port = {
'portId': row[0],
'adminMode': 'Up',
'pvid': 1,
'type': 'LAG',
'operationalStatus': 'Down',
'speed': 10000,
'name': row[1],
}
self.lags.append(lag)
self.ports.append(port)
return 0
def ports_lag_add_row(self, *row):
port_lag = {
'lagId': row[1],
'portId': row[0],
'actorPortPriority': row[2],
'actorAdminPortKey': row[3],
'adminAggregation': row[4],
'adminActive': row[5],
'adminTimeout': row[6],
'adminSynchronization': row[7],
'adminCollecting': row[8],
'adminDistributing': row[9],
'adminDefaulted': row[10],
'adminExpired': row[11],
}
port = [x for x in self.ports if x['portId'] == row[0]][0]
port['type'] = 'LagMember'
self.ports_to_lags.append(port_lag)
return 0
|
pre_commit_linter.py
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pre-commit script for Oppia.
This script lints Python and JavaScript code, and prints a
list of lint errors to the terminal. If the directory path is passed,
it will lint all Python and JavaScript files in that directory; otherwise,
it will only lint files that have been touched in this commit.
This script ignores all filepaths contained within .eslintignore.
=====================
CUSTOMIZATION OPTIONS
=====================
1. To lint only files that have been touched in this commit
python -m scripts.pre_commit_linter
2. To lint all files in the folder or to lint just a specific file
python -m scripts.pre_commit_linter --path filepath
3. To lint a specific list of files (*.js/*.py only). Separate files by spaces
python -m scripts.pre_commit_linter --files file_1 file_2 ... file_n
4. To lint files in verbose mode
python -m scripts.pre_commit_linter --verbose
Note that the root folder MUST be named 'oppia'.
"""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
# Pylint has issues with the import order of argparse.
# pylint: disable=wrong-import-order
import abc
import argparse
import ast
import collections
import contextlib
import fnmatch
import glob
import multiprocessing
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
import time
# Install third party dependencies before proceeding.
from . import install_third_party_libs
install_third_party_libs.main(args=[])
# pylint: disable=wrong-import-position
import python_utils # isort:skip
_PARSER = argparse.ArgumentParser()
_EXCLUSIVE_GROUP = _PARSER.add_mutually_exclusive_group()
_EXCLUSIVE_GROUP.add_argument(
'--path',
help='path to the directory with files to be linted',
action='store')
_EXCLUSIVE_GROUP.add_argument(
'--files',
nargs='+',
help='specific files to be linted. Space separated list',
action='store')
_PARSER.add_argument(
'--verbose',
help='verbose mode. All details will be printed.',
action='store_true')
EXCLUDED_PHRASES = [
'utf', 'pylint:', 'http://', 'https://', 'scripts/', 'extract_node']
EXCLUDED_PATHS = (
'third_party/*', 'build/*', '.git/*', '*.pyc', 'CHANGELOG',
'integrations/*', 'integrations_dev/*', '*.svg', '*.gif',
'*.png', '*.zip', '*.ico', '*.jpg', '*.min.js', 'backend_prod_files/*',
'assets/scripts/*', 'core/tests/data/*', 'core/tests/build_sources/*',
'*.mp3', '*.mp4', 'node_modules/*', 'typings/*', 'local_compiled_js/*',
'webpack_bundles/*', 'core/tests/services_sources/*')
GENERATED_FILE_PATHS = (
'extensions/interactions/LogicProof/static/js/generatedDefaultData.ts',
'extensions/interactions/LogicProof/static/js/generatedParser.ts',
'core/templates/dev/head/expressions/expression-parser.service.js')
CONFIG_FILE_PATHS = (
'core/tests/.browserstack.env.example',
'core/tests/protractor.conf.js',
'core/tests/karma.conf.ts',
'core/templates/dev/head/mathjaxConfig.ts',
'assets/constants.ts',
'assets/rich_text_components_definitions.ts',
'webpack.config.ts',
'webpack.dev.config.ts',
'webpack.prod.config.ts')
BAD_PATTERNS = {
'__author__': {
'message': 'Please remove author tags from this file.',
'excluded_files': (),
'excluded_dirs': ()},
'datetime.datetime.now()': {
'message': 'Please use datetime.datetime.utcnow() instead of'
'datetime.datetime.now().',
'excluded_files': (),
'excluded_dirs': ()},
'\t': {
'message': 'Please use spaces instead of tabs.',
'excluded_files': (),
'excluded_dirs': (
'assets/i18n/', 'core/tests/build_sources/assets/')},
'\r': {
'message': 'Please make sure all files only have LF endings (no CRLF).',
'excluded_files': (),
'excluded_dirs': ()},
'<<<<<<<': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'>>>>>>>': {
'message': 'Please fully resolve existing merge conflicts.',
'excluded_files': (),
'excluded_dirs': ()},
'glyphicon': {
'message': 'Please use equivalent material-icons '
'instead of glyphicons.',
'excluded_files': (),
'excluded_dirs': ()}
}
BAD_PATTERNS_REGEXP = [
{
'regexp': re.compile(r'TODO[^\(]*[^\)][^:]*[^\w]*$'),
'message': 'Please assign TODO comments to a user '
'in the format TODO(username): XXX. ',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_JS_AND_TS_REGEXP = [
{
'regexp': re.compile(r'\b(browser.explore)\('),
'message': 'In tests, please do not use browser.explore().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.pause)\('),
'message': 'In tests, please do not use browser.pause().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.sleep)\('),
'message': 'In tests, please do not use browser.sleep().',
'excluded_files': (
# TODO(#7622): Remove the file from the excluded list. Remove the
# TODO in core/tests/protractor_desktop/embedding.js pointing to the
# same issue. The following was placed due to a necessary sleep as
# a temporary measure to keep the embedding tests from failing.
'core/tests/protractor_desktop/embedding.js'
),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(browser.waitForAngular)\('),
'message': 'In tests, please do not use browser.waitForAngular().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(ddescribe|fdescribe)\('),
'message': 'In tests, please use \'describe\' instead of \'ddescribe\''
'or \'fdescribe\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(iit|fit)\('),
'message': 'In tests, please use \'it\' instead of \'iit\' or \'fit\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\b(beforeEach\(inject\(function)\('),
'message': 'In tests, please use \'angular.mock.inject\' instead of '
'\'inject\'',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'templateUrl: \''),
'message': 'The directives must be directly referenced.',
'excluded_files': (
'core/templates/dev/head/pages/exploration-player-page/'
'FeedbackPopupDirective.js'
),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/dependencies/',
'extensions/value_generators/',
'extensions/visualizations/')
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'require\(.*\.\..*\);'),
'message': 'Please, don\'t use relative imports in require().',
'excluded_files': (),
'excluded_dirs': ('core/tests/')
}
]
MANDATORY_PATTERNS_REGEXP = [
{
'regexp': re.compile(
r'Copyright \d{4} The Oppia Authors\. All Rights Reserved\.'),
'message': 'Please ensure this file should contain a proper '
'copyright notice.',
'included_types': ('.py', '.js', '.sh', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py', ),
'excluded_dirs': EXCLUDED_PATHS
},
{
'regexp': re.compile('from __future__ import unicode_literals'),
'message': 'Please ensure this file should contain unicode_literals '
'future import.',
'included_types': ('.py'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS + (
'__init__.py',),
'excluded_dirs': EXCLUDED_PATHS
}
]
MANDATORY_PATTERNS_JS_REGEXP = [
{
'regexp': re.compile(r'^\s\*\s@fileoverview\s[a-zA-Z0-9_]+'),
'message': 'Please ensure this file should contain a file '
'overview i.e. a short description of the file.',
'included_types': ('.js', '.ts'),
'excluded_files': GENERATED_FILE_PATHS + CONFIG_FILE_PATHS,
'excluded_dirs': EXCLUDED_PATHS
}
]
BAD_LINE_PATTERNS_HTML_REGEXP = [
{
'regexp': re.compile(r'text\/ng-template'),
'message': 'The directives must be directly referenced.',
'excluded_files': (),
'excluded_dirs': (
'extensions/answer_summarizers/',
'extensions/classifiers/',
'extensions/objects/',
'extensions/value_generators/')
},
{
'regexp': re.compile(r'[ \t]+$'),
'message': 'There should not be any trailing whitespaces.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\$parent'),
'message': 'Please do not access parent properties ' +
'using $parent. Use the scope object' +
'for this purpose.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_PYTHON_REGEXP = [
{
'regexp': re.compile(r'\Wprint\('),
'message': 'Please do not use print statement.',
'excluded_files': (
'core/tests/test_utils.py',
'core/tests/performance_framework/perf_domain.py'),
'excluded_dirs': ('scripts/',)
},
{
'regexp': re.compile(r'\sprint\('),
'message': 'Please use python_utils.PRINT().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'# pylint:\s*disable=[A-Z][0-9]{4}'),
'message': 'Please remove pylint exclusion if it is unnecessary, or '
'make it human readable with a sentence instead of an id. '
'The id-to-message list can be seen '
'here->http://pylint-messages.wikidot.com/all-codes',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'self.assertEquals\('),
'message': 'Please do not use self.assertEquals method. ' +
'This method has been deprecated. Instead use ' +
'self.assertEqual method.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'with open\(|= open\('),
'message': 'Please use python_utils.open_file() instead of open().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'StringIO'),
'message': 'Please use python_utils.string_io() instead of ' +
'import StringIO.',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*quote\('),
'message': 'Please use python_utils.url_quote().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*unquote_plus\('),
'message': 'Please use python_utils.url_unquote_plus().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlencode\('),
'message': 'Please use python_utils.url_encode().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib\..*urlretrieve\('),
'message': 'Please use python_utils.url_retrieve().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*urlopen\('),
'message': 'Please use python_utils.url_open().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlsplit'),
'message': 'Please use python_utils.url_split().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlparse'),
'message': 'Please use python_utils.url_parse().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urlunsplit'),
'message': 'Please use python_utils.url_unsplit().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'parse_qs'),
'message': 'Please use python_utils.parse_query_string().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wunquote\('),
'message': 'Please use python_utils.urllib_unquote().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urljoin'),
'message': 'Please use python_utils.url_join().',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'urllib(2)?\..*Request\('),
'message': 'Please use python_utils.url_request().',
'excluded_files': ('python_utils.py', 'python_utils_test.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w]input\('),
'message': 'Please use python_utils.INPUT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'[^.|\w|\s]map\('),
'message': 'Please use python_utils.MAP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wnext\('),
'message': 'Please use python_utils.NEXT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'object\):'),
'message': 'Please use python_utils.OBJECT.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wrange\('),
'message': 'Please use python_utils.RANGE.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wround\('),
'message': 'Please use python_utils.ROUND.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wstr\('),
'message': (
'Please try to use python_utils.convert_to_bytes() for the strings '
'used in webapp2\'s built-in methods or for strings used directly '
'in NDB datastore models. If you need to cast ints/floats to '
'strings, please use python_utils.UNICODE() instead.'),
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'\Wzip\('),
'message': 'Please use python_utils.ZIP.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'basestring'),
'message': 'Please use python_utils.BASESTRING.',
'excluded_files': ('python_utils.py'),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'__metaclass__'),
'message': 'Please use python_utils.with_metaclass().',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iteritems'),
'message': 'Please use items() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'itervalues'),
'message': 'Please use values() instead.',
'excluded_files': (),
'excluded_dirs': ()
},
{
'regexp': re.compile(r'iterkeys'),
'message': 'Please use keys() instead.',
'excluded_files': (),
'excluded_dirs': ()
}
]
BAD_PATTERNS_MAP = {
'.js': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.ts': BAD_PATTERNS_JS_AND_TS_REGEXP,
'.html': BAD_LINE_PATTERNS_HTML_REGEXP,
'.py': BAD_PATTERNS_PYTHON_REGEXP
}
REQUIRED_STRINGS_CONSTANTS = {
'DEV_MODE: true': {
'message': 'Please set the DEV_MODE variable in constants.ts'
'to true before committing.',
'excluded_files': ()
}
}
ALLOWED_TERMINATING_PUNCTUATIONS = ['.', '?', '}', ']', ')']
CODEOWNER_FILEPATH = '.github/CODEOWNERS'
# This list needs to be in sync with the important patterns in the CODEOWNERS
# file.
CODEOWNER_IMPORTANT_PATHS = [
'/core/controllers/acl_decorators*.py',
'/core/controllers/base*.py',
'/core/domain/html*.py',
'/core/domain/rights_manager*.py',
'/core/domain/role_services*.py',
'/core/domain/user*.py',
'/core/storage/',
'/export/',
'/manifest.json',
'/package.json',
'/yarn.lock',
'/scripts/install_third_party_libs.py',
'/.github/']
if not os.getcwd().endswith('oppia'):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run this script from the oppia root directory.')
_PARENT_DIR = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
_PYLINT_PATH = os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-1.9.4')
if not os.path.exists(_PYLINT_PATH):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run install_third_party_libs.py first to install pylint')
python_utils.PRINT(' and its dependencies.')
sys.exit(1)
_PATHS_TO_INSERT = [
_PYLINT_PATH,
os.getcwd(),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'webapp2-2.3'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'yaml-3.10'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine', 'lib', 'jinja2-2.6'),
os.path.join(
_PARENT_DIR, 'oppia_tools', 'google_appengine_1.9.67',
'google_appengine'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'webtest-2.0.33'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'browsermob-proxy-0.8.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'esprima-4.0.1'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'pycodestyle-2.5.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'pylint-quotes-0.1.8'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'selenium-3.13.0'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'PyGithub-1.43.7'),
os.path.join(_PARENT_DIR, 'oppia_tools', 'Pillow-6.0.0'),
os.path.join('third_party', 'backports.functools_lru_cache-1.5'),
os.path.join('third_party', 'beautifulsoup4-4.7.1'),
os.path.join('third_party', 'bleach-3.1.0'),
os.path.join('third_party', 'callbacks-0.3.0'),
os.path.join('third_party', 'gae-cloud-storage-1.9.22.1'),
os.path.join('third_party', 'gae-mapreduce-1.9.22.0'),
os.path.join('third_party', 'gae-pipeline-1.9.22.1'),
os.path.join('third_party', 'mutagen-1.42.0'),
os.path.join('third_party', 'soupsieve-1.9.1'),
os.path.join('third_party', 'six-1.12.0'),
os.path.join('third_party', 'webencodings-0.5.1'),
]
for path in _PATHS_TO_INSERT:
sys.path.insert(0, path)
# pylint: disable=wrong-import-order
# pylint: disable=wrong-import-position
import isort # isort:skip
import pycodestyle # isort:skip
import esprima # isort:skip
from pylint import lint # isort:skip
from . import build # isort:skip
from . import docstrings_checker # isort:skip
import html.parser # isort:skip
# pylint: enable=wrong-import-order
# pylint: enable=wrong-import-position
_MESSAGE_TYPE_SUCCESS = 'SUCCESS'
_MESSAGE_TYPE_FAILED = 'FAILED'
_TARGET_STDOUT = python_utils.string_io()
_STDOUT_LIST = multiprocessing.Manager().list()
_FILES = multiprocessing.Manager().dict()
class FileCache(python_utils.OBJECT):
"""Provides thread-safe access to cached file content."""
def __init__(self):
self._CACHE_DATA_DICT = {}
def read(self, filepath, mode='r'):
"""Returns the data read from the file in unicode form.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
str. The data read from the file.
"""
return self._get_data(filepath, mode)[0]
def readlines(self, filepath, mode='r'):
"""Returns the tuple containing data line by line as read from the
file in unicode form.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
tuple(str). The tuple containing data line by line as read from the
file.
"""
return self._get_data(filepath, mode)[1]
def _get_data(self, filepath, mode):
"""Returns the collected data from the file corresponding to the given
filepath.
Args:
filepath: str. The file path from which data is to be read.
mode: str. The mode in which the file is to be opened.
Returns:
tuple(str, tuple(str)). The tuple containing data read from the file
as first element and tuple containing the text line by line as
second element.
"""
key = (filepath, mode)
if key not in self._CACHE_DATA_DICT:
with python_utils.open_file(filepath, mode) as f:
lines = f.readlines()
self._CACHE_DATA_DICT[key] = (''.join(lines), tuple(lines))
return self._CACHE_DATA_DICT[key]
def _lint_all_files(
js_filepaths, ts_filepaths, py_filepaths, html_filepaths,
css_filepaths, verbose_mode_enabled):
"""This function is used to check if node-eslint dependencies are
installed and pass ESLint binary path and lint all the files(JS, Python,
HTML, CSS) with their respective third party linters.
"""
python_utils.PRINT('Starting Js, Ts, Python, HTML, and CSS linter...')
pylintrc_path = os.path.join(os.getcwd(), '.pylintrc')
config_pylint = '--rcfile=%s' % pylintrc_path
config_pycodestyle = os.path.join(os.getcwd(), 'tox.ini')
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(
parent_dir, 'oppia_tools', 'node-10.15.3', 'bin', 'node')
eslint_path = os.path.join(
'node_modules', 'eslint', 'bin', 'eslint.js')
stylelint_path = os.path.join(
'node_modules', 'stylelint', 'bin', 'stylelint.js')
config_path_for_css_in_html = os.path.join(
parent_dir, 'oppia', '.stylelintrc')
config_path_for_oppia_css = os.path.join(
parent_dir, 'oppia', 'core', 'templates', 'dev', 'head',
'css', '.stylelintrc')
if not (os.path.exists(eslint_path) and os.path.exists(stylelint_path)):
python_utils.PRINT('')
python_utils.PRINT(
'ERROR Please run start.sh first to install node-eslint ')
python_utils.PRINT(
' or node-stylelint and its dependencies.')
sys.exit(1)
js_and_ts_files_to_lint = js_filepaths + ts_filepaths
linting_processes = []
js_and_ts_result = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_js_and_ts_files, args=(
node_path, eslint_path, js_and_ts_files_to_lint,
js_and_ts_result, verbose_mode_enabled)))
css_in_html_result = multiprocessing.Queue()
css_in_html_stdout = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_css_files, args=(
node_path,
stylelint_path,
config_path_for_css_in_html,
html_filepaths, css_in_html_stdout,
css_in_html_result, verbose_mode_enabled)))
css_result = multiprocessing.Queue()
css_stdout = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_css_files, args=(
node_path,
stylelint_path,
config_path_for_oppia_css,
css_filepaths, css_stdout,
css_result, verbose_mode_enabled)))
py_result = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_py_files,
args=(
config_pylint, config_pycodestyle, py_filepaths,
py_result, verbose_mode_enabled)))
py_result_for_python3_compatibility = multiprocessing.Queue()
linting_processes.append(multiprocessing.Process(
target=_lint_py_files_for_python3_compatibility,
args=(
py_filepaths, py_result_for_python3_compatibility,
verbose_mode_enabled)))
for process in linting_processes:
process.daemon = False
process.start()
result_queues = [
js_and_ts_result, css_in_html_result, css_result, py_result,
py_result_for_python3_compatibility
]
stdout_queus = [
css_in_html_stdout, css_stdout
]
return linting_processes, result_queues, stdout_queus
def _is_filepath_excluded_for_bad_patterns_check(pattern, filepath):
"""Checks if file is excluded from the bad patterns check.
Args:
pattern: str. The pattern to be checked against.
filepath: str. Path of the file.
Returns:
bool: Whether to exclude the given file from this
particular pattern check.
"""
return (any(filepath.startswith(bad_pattern)
for bad_pattern in BAD_PATTERNS[pattern]['excluded_dirs'])
or filepath in BAD_PATTERNS[pattern]['excluded_files'])
def _get_expression_from_node_if_one_exists(
parsed_node, components_to_check):
"""This function first checks whether the parsed node represents
the required angular component that needs to be derived by checking if
its in the 'components_to_check' list. If yes, then it will return the
expression part of the node from which the component can be derived.
If no, it will return None. It is done by filtering out
'AssignmentExpression' (as it represents an assignment) and 'Identifier'
(as it represents a static expression).
Args:
parsed_node: dict. Parsed node of the body of a JS file.
components_to_check: list(str). List of angular components to check
in a JS file. These include directives, factories, controllers,
etc.
Returns:
expression: dict or None. Expression part of the node if the node
represents a component else None.
"""
if parsed_node.type != 'ExpressionStatement':
return
# Separate the expression part of the node which is the actual
# content of the node.
expression = parsed_node.expression
# Check whether the expression belongs to a
# 'CallExpression' which always contains a call
# and not an 'AssignmentExpression'.
# For example, func() is a CallExpression.
if expression.type != 'CallExpression':
return
# Check whether the expression belongs to a 'MemberExpression' which
# represents a computed expression or an Identifier which represents
# a static expression.
# For example, 'thing.func' is a MemberExpression where
# 'thing' is the object of the MemberExpression and
# 'func' is the property of the MemberExpression.
# Another example of a MemberExpression within a CallExpression is
# 'thing.func()' where 'thing.func' is the callee of the CallExpression.
if expression.callee.type != 'MemberExpression':
return
# Get the component in the JS file.
component = expression.callee.property.name
if component not in components_to_check:
return
return expression
def _walk_with_gitignore(root, exclude_dirs):
"""A walk function similar to os.walk but this would ignore the files and
directories which is not tracked by git. Also, this will ignore the
directories mentioned in exclude_dirs.
Args:
root: str. The path from where the function should start walking.
exclude_dirs: list(str). A list of dir path which should be ignored.
Yields:
list(str). A list of unignored files.
"""
dirs, file_paths = [], []
for name in os.listdir(root):
if os.path.isdir(os.path.join(root, name)):
dirs.append(os.path.join(root, name))
else:
file_paths.append(os.path.join(root, name))
yield [file_path for file_path in file_paths if not _is_path_ignored(
file_path)]
for dir_path in dirs:
# Adding "/" in the end of the dir path according to the git dir path
# structure.
if (not _is_path_ignored(dir_path + '/')) and (
dir_path not in exclude_dirs):
for x in _walk_with_gitignore(dir_path, exclude_dirs):
yield x
def _is_path_ignored(path_to_check):
"""Checks whether the given path is ignored by git.
Args:
path_to_check: str. A path to a file or a dir.
Returns:
bool. Whether the given path is ignored by git.
"""
command = ['git', 'check-ignore', '-q', path_to_check]
# The "git check-ignore <path>" command returns 0 when the path is ignored
# otherwise it returns 1. subprocess.call then returns this returncode.
if subprocess.call(command):
return False
else:
return True
def _get_changed_filepaths():
"""Returns a list of modified files (both staged and unstaged)
Returns:
a list of filepaths of modified files.
"""
unstaged_files = subprocess.check_output([
'git', 'diff', '--name-only',
'--diff-filter=ACM']).splitlines()
staged_files = subprocess.check_output([
'git', 'diff', '--cached', '--name-only',
'--diff-filter=ACM']).splitlines()
all_changed_filepaths = unstaged_files + staged_files
return [filepath for filepath in all_changed_filepaths]
def _get_all_files_in_directory(dir_path, excluded_glob_patterns):
"""Recursively collects all files in directory and
subdirectories of specified path.
Args:
dir_path: str. Path to the folder to be linted.
excluded_glob_patterns: set(str). Set of all glob patterns
to be excluded.
Returns:
a list of files in directory and subdirectories without excluded files.
"""
files_in_directory = []
for _dir, _, files in os.walk(dir_path):
for file_name in files:
filepath = os.path.relpath(
os.path.join(_dir, file_name), os.getcwd())
if not any([fnmatch.fnmatch(filepath, gp) for gp in
excluded_glob_patterns]):
files_in_directory.append(filepath)
return files_in_directory
@contextlib.contextmanager
def _redirect_stdout(new_target):
"""Redirect stdout to the new target.
Args:
new_target: TextIOWrapper. The new target to which stdout is redirected.
Yields:
TextIOWrapper. The new target.
"""
old_target = sys.stdout
sys.stdout = new_target
try:
yield new_target
finally:
sys.stdout = old_target
def _get_all_filepaths(input_path, input_filenames):
"""This function is used to return the filepaths which needs to be linted
and checked.
Args:
input_path: str. The path of the directory to be linted and checked.
input_filenames: list(str). The list of filenames to be linted and
checked, ignored if input_path is specified.
Returns:
all_filepaths: list(str). The list of filepaths to be linted and
checked.
"""
eslintignore_path = os.path.join(os.getcwd(), '.eslintignore')
if input_path:
input_path = os.path.join(os.getcwd(), input_path)
if not os.path.exists(input_path):
python_utils.PRINT(
'Could not locate file or directory %s. Exiting.' % input_path)
python_utils.PRINT('----------------------------------------')
sys.exit(1)
if os.path.isfile(input_path):
all_filepaths = [input_path]
else:
excluded_glob_patterns = FILE_CACHE.readlines(eslintignore_path)
all_filepaths = _get_all_files_in_directory(
input_path, excluded_glob_patterns)
elif input_filenames:
valid_filepaths = []
invalid_filepaths = []
for filename in input_filenames:
if os.path.isfile(filename):
valid_filepaths.append(filename)
else:
invalid_filepaths.append(filename)
if invalid_filepaths:
python_utils.PRINT(
'The following file(s) do not exist: %s\n'
'Exiting.' % invalid_filepaths)
sys.exit(1)
all_filepaths = valid_filepaths
else:
all_filepaths = _get_changed_filepaths()
all_filepaths = [
filename for filename in all_filepaths if not
any(fnmatch.fnmatch(filename, pattern) for pattern in EXCLUDED_PATHS)]
return all_filepaths
def _check_bad_pattern_in_file(filepath, file_content, pattern):
"""Detects whether the given pattern is present in the file.
Args:
filepath: str. Path of the file.
file_content: str. Contents of the file.
pattern: dict. (regexp(regex pattern) : pattern to match,
message(str) : message to show if pattern matches,
excluded_files(tuple(str)) : files to be excluded from matching,
excluded_dirs(tuple(str)) : directories to be excluded from
matching).
Object containing details for the pattern to be checked.
Returns:
bool. True if there is bad pattern else false.
"""
regexp = pattern['regexp']
if not (any(filepath.startswith(excluded_dir)
for excluded_dir in pattern['excluded_dirs'])
or filepath in pattern['excluded_files']):
bad_pattern_count = 0
for line_num, line in enumerate(file_content.split('\n'), 1):
if line.endswith('disable-bad-pattern-check'):
continue
if regexp.search(line):
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, pattern['message']))
python_utils.PRINT('')
bad_pattern_count += 1
if bad_pattern_count:
return True
return False
def _check_file_type_specific_bad_pattern(filepath, content):
"""Check the file content based on the file's extension.
Args:
filepath: str. Path of the file.
content: str. Contents of the file.
Returns:
failed: bool. True if there is bad pattern else false.
total_error_count: int. The number of errors.
"""
_, extension = os.path.splitext(filepath)
pattern = BAD_PATTERNS_MAP.get(extension)
failed = False
total_error_count = 0
if pattern:
for regexp in pattern:
if _check_bad_pattern_in_file(filepath, content, regexp):
failed = True
total_error_count += 1
return failed, total_error_count
class TagMismatchException(Exception):
"""Error class for mismatch between start and end tags."""
pass
class CustomHTMLParser(html.parser.HTMLParser):
"""Custom HTML parser to check indentation."""
def __init__(self, filepath, file_lines, debug, failed=False):
"""Define various variables to parse HTML."""
html.parser.HTMLParser.__init__(self)
self.tag_stack = []
self.debug = debug
self.failed = failed
self.filepath = filepath
self.file_lines = file_lines
self.indentation_level = 0
self.indentation_width = 2
self.void_elements = [
'area', 'base', 'br', 'col', 'embed',
'hr', 'img', 'input', 'link', 'meta',
'param', 'source', 'track', 'wbr']
def handle_starttag(self, tag, attrs):
"""Handle start tag of a HTML line."""
line_number, column_number = self.getpos()
# Check the indentation of the tag.
expected_indentation = self.indentation_level * self.indentation_width
tag_line = self.file_lines[line_number - 1].lstrip()
opening_tag = '<' + tag
# Check the indentation for content of style tag.
if tag_line.startswith(opening_tag) and tag == 'style':
# Getting next line after style tag.
next_line = self.file_lines[line_number]
next_line_expected_indentation = (
self.indentation_level + 1) * self.indentation_width
next_line_column_number = len(next_line) - len(next_line.lstrip())
if next_line_column_number != next_line_expected_indentation:
python_utils.PRINT(
'%s --> Expected indentation '
'of %s, found indentation of %s '
'for content of %s tag on line %s ' % (
self.filepath, next_line_expected_indentation,
next_line_column_number, tag, line_number + 1))
python_utils.PRINT('')
self.failed = True
if tag_line.startswith(opening_tag) and (
column_number != expected_indentation):
python_utils.PRINT(
'%s --> Expected indentation '
'of %s, found indentation of %s '
'for %s tag on line %s ' % (
self.filepath, expected_indentation,
column_number, tag, line_number))
python_utils.PRINT('')
self.failed = True
if tag not in self.void_elements:
self.tag_stack.append((tag, line_number, column_number))
self.indentation_level += 1
if self.debug:
python_utils.PRINT('DEBUG MODE: Start tag_stack')
python_utils.PRINT(self.tag_stack)
# Check the indentation of the attributes of the tag.
indentation_of_first_attribute = (
column_number + len(tag) + 2)
starttag_text = self.get_starttag_text()
# Check whether the values of all attributes are placed
# in double quotes.
for attr, value in attrs:
# Not all attributes will have a value.
# Therefore the check should run only for those
# attributes which have a value.
if value:
expected_value = '"' + value + '"'
# " is rendered as a double quote by the parser.
if '"' in starttag_text:
rendered_text = starttag_text.replace('"', '"')
else:
rendered_text = starttag_text
if not expected_value in rendered_text:
self.failed = True
python_utils.PRINT(
'%s --> The value %s of attribute '
'%s for the tag %s on line %s should '
'be enclosed within double quotes.' % (
self.filepath, value, attr,
tag, line_number))
python_utils.PRINT('')
for line_num, line in enumerate(starttag_text.splitlines()):
if line_num == 0:
continue
leading_spaces_count = len(line) - len(line.lstrip())
list_of_attrs = []
for attr, _ in attrs:
list_of_attrs.append(attr)
if not line.lstrip().startswith(tuple(list_of_attrs)):
continue
if indentation_of_first_attribute != leading_spaces_count:
line_num_of_error = line_number + line_num
python_utils.PRINT(
'%s --> Attribute for tag %s on line '
'%s should align with the leftmost '
'attribute on line %s ' % (
self.filepath, tag,
line_num_of_error, line_number))
python_utils.PRINT('')
self.failed = True
def handle_endtag(self, tag):
"""Handle end tag of a HTML line."""
line_number, _ = self.getpos()
tag_line = self.file_lines[line_number - 1]
leading_spaces_count = len(tag_line) - len(tag_line.lstrip())
try:
last_starttag, last_starttag_line_num, last_starttag_col_num = (
self.tag_stack.pop())
except IndexError:
raise TagMismatchException('Error in line %s of file %s\n' % (
line_number, self.filepath))
if last_starttag != tag:
raise TagMismatchException('Error in line %s of file %s\n' % (
line_number, self.filepath))
if leading_spaces_count != last_starttag_col_num and (
last_starttag_line_num != line_number):
python_utils.PRINT(
'%s --> Indentation for end tag %s on line '
'%s does not match the indentation of the '
'start tag %s on line %s ' % (
self.filepath, tag, line_number,
last_starttag, last_starttag_line_num))
python_utils.PRINT('')
self.failed = True
self.indentation_level -= 1
if self.debug:
python_utils.PRINT('DEBUG MODE: End tag_stack')
python_utils.PRINT(self.tag_stack)
def handle_data(self, data):
"""Handle indentation level."""
data_lines = data.split('\n')
opening_block = tuple(
['{% block', '{% macro', '{% if', '% for', '% if'])
ending_block = tuple(['{% end', '{%- end', '% } %>'])
for data_line in data_lines:
data_line = data_line.lstrip()
if data_line.startswith(opening_block):
self.indentation_level += 1
elif data_line.startswith(ending_block):
self.indentation_level -= 1
def check_for_important_patterns_at_bottom_of_codeowners(important_patterns):
"""Checks that the most important patterns are at the bottom
of the CODEOWNERS file.
Arguments:
important_patterns: list(str). List of the important
patterns for CODEOWNERS file.
Returns:
bool. Whether the CODEOWNERS "important pattern" check fails.
"""
failed = False
# Check that there are no duplicate elements in the lists.
important_patterns_set = set(important_patterns)
codeowner_important_paths_set = set(CODEOWNER_IMPORTANT_PATHS)
if len(important_patterns_set) != len(important_patterns):
python_utils.PRINT(
'%s --> Duplicate pattern(s) found in critical rules'
' section.' % CODEOWNER_FILEPATH)
failed = True
if len(codeowner_important_paths_set) != len(CODEOWNER_IMPORTANT_PATHS):
python_utils.PRINT(
'scripts/pre_commit_linter.py --> Duplicate pattern(s) found '
'in CODEOWNER_IMPORTANT_PATHS list.')
failed = True
# Check missing rules by set difference operation.
critical_rule_section_minus_list_set = (
important_patterns_set.difference(codeowner_important_paths_set))
list_minus_critical_rule_section_set = (
codeowner_important_paths_set.difference(important_patterns_set))
for rule in critical_rule_section_minus_list_set:
python_utils.PRINT(
'%s --> Rule %s is not present in the '
'CODEOWNER_IMPORTANT_PATHS list in '
'scripts/pre_commit_linter.py. Please add this rule in the '
'mentioned list or remove this rule from the \'Critical files'
'\' section.' % (CODEOWNER_FILEPATH, rule))
failed = True
for rule in list_minus_critical_rule_section_set:
python_utils.PRINT(
'%s --> Rule \'%s\' is not present in the \'Critical files\' '
'section. Please place it under the \'Critical files\' '
'section since it is an important rule. Alternatively please '
'remove it from the \'CODEOWNER_IMPORTANT_PATHS\' list in '
'scripts/pre_commit_linter.py if it is no longer an '
'important rule.' % (CODEOWNER_FILEPATH, rule))
failed = True
return failed
def _check_codeowner_file(verbose_mode_enabled):
"""Checks the CODEOWNERS file for any uncovered dirs/files and also
checks that every pattern in the CODEOWNERS file matches at least one
file/dir. Note that this checks the CODEOWNERS file according to the
glob patterns supported by Python2.7 environment. For more information
please refer https://docs.python.org/2/library/glob.html.
This function also ensures that the most important rules are at the
bottom of the CODEOWNERS file.
"""
if verbose_mode_enabled:
python_utils.PRINT('Starting CODEOWNERS file check')
python_utils.PRINT('----------------------------------------')
with _redirect_stdout(_TARGET_STDOUT):
failed = False
summary_messages = []
# Checks whether every pattern in the CODEOWNERS file matches at
# least one dir/file.
critical_file_section_found = False
important_rules_in_critical_section = []
file_patterns = []
dir_patterns = []
for line_num, line in enumerate(FILE_CACHE.readlines(
CODEOWNER_FILEPATH)):
stripped_line = line.strip()
if '# Critical files' in line:
critical_file_section_found = True
if stripped_line and stripped_line[0] != '#':
if '@' not in line:
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t have '
'codeowner' % (CODEOWNER_FILEPATH, line_num + 1))
failed = True
else:
# Extract the file pattern from the line.
line_in_concern = line.split('@')[0].strip()
# This is being populated for the important rules
# check.
if critical_file_section_found:
important_rules_in_critical_section.append(
line_in_concern)
# Checks if the path is the full path relative to the
# root oppia directory.
if not line_in_concern.startswith('/'):
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. Use '
'full path relative to the root directory'
% (CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The double asterisks pattern is supported by the
# CODEOWNERS syntax but not the glob in Python 2.
# The following condition checks this.
if '**' in line_in_concern:
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. '
'\'**\' wildcard not allowed' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# Adjustments to the dir paths in CODEOWNERS syntax
# for glob-style patterns to match correctly.
if line_in_concern.endswith('/'):
line_in_concern = line_in_concern[:-1]
# The following condition checks whether the specified
# path exists in the codebase or not. The CODEOWNERS
# syntax has paths starting with '/' which refers to
# full path relative to root, but python glob module
# does not conform to this logic and literally matches
# the '/' character. Therefore the leading '/' has to
# be changed to './' for glob patterns to match
# correctly.
line_in_concern = line_in_concern.replace('/', './', 1)
if not glob.glob(line_in_concern):
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t match '
'any file or directory' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The following list is being populated with the
# paths in the CODEOWNERS file with the removal of the
# leading '/' to aid in the glob pattern matching in
# the next part of the check wherein the valid patterns
# are used to check if they cover the entire codebase.
if os.path.isdir(line_in_concern):
dir_patterns.append(line_in_concern)
else:
file_patterns.append(line_in_concern)
# Checks that every file (except those under the dir represented by
# the dir_patterns) is covered under CODEOWNERS.
for file_paths in _walk_with_gitignore('.', dir_patterns):
for file_path in file_paths:
match = False
for file_pattern in file_patterns:
if file_path in glob.glob(file_pattern):
match = True
break
if not match:
python_utils.PRINT(
'%s is not covered under CODEOWNERS' % file_path)
failed = True
failed = failed or (
check_for_important_patterns_at_bottom_of_codeowners(
important_rules_in_critical_section))
if failed:
summary_message = '%s CODEOWNERS file check failed' % (
_MESSAGE_TYPE_FAILED)
else:
summary_message = '%s CODEOWNERS file check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
return summary_messages
def _lint_css_files(
node_path, stylelint_path, config_path, files_to_lint, stdout, result,
verbose_mode_enabled):
"""Prints a list of lint errors in the given list of CSS files.
Args:
node_path: str. Path to the node binary.
stylelint_path: str. Path to the Stylelint binary.
config_path: str. Path to the configuration file.
files_to_lint: list(str). A list of filepaths to lint.
stdout: multiprocessing.Queue. A queue to store Stylelint outputs.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
num_files_with_errors = 0
num_css_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT('There are no CSS files to lint.')
return
python_utils.PRINT('Total css files: ', num_css_files)
stylelint_cmd_args = [
node_path, stylelint_path, '--config=' + config_path]
result_list = []
if not verbose_mode_enabled:
python_utils.PRINT('Linting CSS files.')
for _, filepath in enumerate(files_to_lint):
if verbose_mode_enabled:
python_utils.PRINT('Linting: ', filepath)
proc_args = stylelint_cmd_args + [filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, encoded_linter_stderr = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
linter_stderr = encoded_linter_stderr.decode(encoding='utf-8')
if linter_stderr:
python_utils.PRINT('LINTER FAILED')
python_utils.PRINT(linter_stderr)
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
result_list.append(linter_stdout)
python_utils.PRINT(linter_stdout)
stdout.put(linter_stdout)
if num_files_with_errors:
for error in result_list:
result.put(error)
result.put('%s %s CSS file' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put('%s %s CSS file linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_css_files, time.time() - start_time))
python_utils.PRINT('CSS linting finished.')
def _lint_js_and_ts_files(
node_path, eslint_path, files_to_lint, result, verbose_mode_enabled):
"""Prints a list of lint errors in the given list of JavaScript files.
Args:
node_path: str. Path to the node binary.
eslint_path: str. Path to the ESLint binary.
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
num_files_with_errors = 0
num_js_and_ts_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT(
'There are no JavaScript or Typescript files to lint.')
return
python_utils.PRINT('Total js and ts files: ', num_js_and_ts_files)
eslint_cmd_args = [node_path, eslint_path, '--quiet']
result_list = []
python_utils.PRINT('Linting JS and TS files.')
for _, filepath in enumerate(files_to_lint):
if verbose_mode_enabled:
python_utils.PRINT('Linting: ', filepath)
proc_args = eslint_cmd_args + [filepath]
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, encoded_linter_stderr = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
linter_stderr = encoded_linter_stderr.decode(encoding='utf-8')
if linter_stderr:
python_utils.PRINT('LINTER FAILED')
python_utils.PRINT(linter_stderr)
sys.exit(1)
if linter_stdout:
num_files_with_errors += 1
result_list.append(linter_stdout)
if num_files_with_errors:
for error in result_list:
result.put(error)
result.put('%s %s JavaScript and Typescript files' % (
_MESSAGE_TYPE_FAILED, num_files_with_errors))
else:
result.put(
'%s %s JavaScript and Typescript files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_js_and_ts_files,
time.time() - start_time))
python_utils.PRINT('Js and Ts linting finished.')
def _lint_py_files(
config_pylint, config_pycodestyle, files_to_lint, result,
verbose_mode_enabled):
"""Prints a list of lint errors in the given list of Python files.
Args:
config_pylint: str. Path to the .pylintrc file.
config_pycodestyle: str. Path to the tox.ini file.
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
are_there_errors = False
num_py_files = len(files_to_lint)
if not files_to_lint:
result.put('')
python_utils.PRINT('There are no Python files to lint.')
return
python_utils.PRINT('Linting %s Python files' % num_py_files)
_batch_size = 50
current_batch_start_index = 0
while current_batch_start_index < len(files_to_lint):
# Note that this index is an exclusive upper bound -- i.e., the current
# batch of files ranges from 'start_index' to 'end_index - 1'.
current_batch_end_index = min(
current_batch_start_index + _batch_size, len(files_to_lint))
current_files_to_lint = files_to_lint[
current_batch_start_index: current_batch_end_index]
if verbose_mode_enabled:
python_utils.PRINT('Linting Python files %s to %s...' % (
current_batch_start_index + 1, current_batch_end_index))
with _redirect_stdout(_TARGET_STDOUT):
# This line invokes Pylint and prints its output
# to the target stdout.
pylinter = lint.Run(
current_files_to_lint + [config_pylint],
exit=False).linter
# These lines invoke Pycodestyle and print its output
# to the target stdout.
style_guide = pycodestyle.StyleGuide(config_file=config_pycodestyle)
pycodestyle_report = style_guide.check_files(
paths=current_files_to_lint)
if pylinter.msg_status != 0 or pycodestyle_report.get_count() != 0:
result.put(_TARGET_STDOUT.getvalue())
are_there_errors = True
current_batch_start_index = current_batch_end_index
if are_there_errors:
result.put('%s Python linting failed' % _MESSAGE_TYPE_FAILED)
else:
result.put('%s %s Python files linted (%.1f secs)' % (
_MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time))
python_utils.PRINT('Python linting finished.')
def _lint_py_files_for_python3_compatibility(
files_to_lint, result, verbose_mode_enabled):
"""Prints a list of Python 3 compatibility errors in the given list of
Python files.
Args:
files_to_lint: list(str). A list of filepaths to lint.
result: multiprocessing.Queue. A queue to put results of test.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
start_time = time.time()
any_errors = False
files_to_lint_for_python3_compatibility = [
file_name for file_name in files_to_lint if not re.match(
r'^.*python_utils.*\.py$', file_name)]
num_py_files = len(files_to_lint_for_python3_compatibility)
if not files_to_lint_for_python3_compatibility:
result.put('')
python_utils.PRINT(
'There are no Python files to lint for Python 3 compatibility.')
return
python_utils.PRINT(
'Linting %s Python files for Python 3 compatibility.' % num_py_files)
_batch_size = 50
current_batch_start_index = 0
while current_batch_start_index < len(
files_to_lint_for_python3_compatibility):
# Note that this index is an exclusive upper bound -- i.e., the current
# batch of files ranges from 'start_index' to 'end_index - 1'.
current_batch_end_index = min(
current_batch_start_index + _batch_size, len(
files_to_lint_for_python3_compatibility))
current_files_to_lint = files_to_lint_for_python3_compatibility[
current_batch_start_index: current_batch_end_index]
if verbose_mode_enabled:
python_utils.PRINT(
'Linting Python files for Python 3 compatibility %s to %s...'
% (current_batch_start_index + 1, current_batch_end_index))
with _redirect_stdout(_TARGET_STDOUT):
# This line invokes Pylint and prints its output
# to the target stdout.
python_utils.PRINT('Messages for Python 3 support:')
pylinter_for_python3 = lint.Run(
current_files_to_lint + ['--py3k'], exit=False).linter
if pylinter_for_python3.msg_status != 0:
result.put(_TARGET_STDOUT.getvalue())
any_errors = True
current_batch_start_index = current_batch_end_index
if any_errors:
result.put(
'%s Python linting for Python 3 compatibility failed'
% _MESSAGE_TYPE_FAILED)
else:
result.put(
'%s %s Python files linted for Python 3 compatibility (%.1f secs)'
% (_MESSAGE_TYPE_SUCCESS, num_py_files, time.time() - start_time))
python_utils.PRINT('Python linting for Python 3 compatibility finished.')
def _check_codeowner_file(verbose_mode_enabled):
"""Checks the CODEOWNERS file for any uncovered dirs/files and also
checks that every pattern in the CODEOWNERS file matches at least one
file/dir. Note that this checks the CODEOWNERS file according to the
glob patterns supported by Python2.7 environment. For more information
please refer https://docs.python.org/2/library/glob.html.
This function also ensures that the most important rules are at the
bottom of the CODEOWNERS file.
"""
if verbose_mode_enabled:
python_utils.PRINT('Starting CODEOWNERS file check')
python_utils.PRINT('----------------------------------------')
with _redirect_stdout(_TARGET_STDOUT):
failed = False
summary_messages = []
# Checks whether every pattern in the CODEOWNERS file matches at
# least one dir/file.
critical_file_section_found = False
important_rules_in_critical_section = []
file_patterns = []
dir_patterns = []
for line_num, line in enumerate(FILE_CACHE.readlines(
CODEOWNER_FILEPATH)):
stripped_line = line.strip()
if '# Critical files' in line:
critical_file_section_found = True
if stripped_line and stripped_line[0] != '#':
if '@' not in line:
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t have '
'codeowner' % (CODEOWNER_FILEPATH, line_num + 1))
failed = True
else:
# Extract the file pattern from the line.
line_in_concern = line.split('@')[0].strip()
# This is being populated for the important rules
# check.
if critical_file_section_found:
important_rules_in_critical_section.append(
line_in_concern)
# Checks if the path is the full path relative to the
# root oppia directory.
if not line_in_concern.startswith('/'):
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. Use '
'full path relative to the root directory'
% (CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The double asterisks pattern is supported by the
# CODEOWNERS syntax but not the glob in Python 2.
# The following condition checks this.
if '**' in line_in_concern:
python_utils.PRINT(
'%s --> Pattern on line %s is invalid. '
'\'**\' wildcard not allowed' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# Adjustments to the dir paths in CODEOWNERS syntax
# for glob-style patterns to match correctly.
if line_in_concern.endswith('/'):
line_in_concern = line_in_concern[:-1]
# The following condition checks whether the specified
# path exists in the codebase or not. The CODEOWNERS
# syntax has paths starting with '/' which refers to
# full path relative to root, but python glob module
# does not conform to this logic and literally matches
# the '/' character. Therefore the leading '/' has to
# be changed to './' for glob patterns to match
# correctly.
line_in_concern = line_in_concern.replace('/', './', 1)
if not glob.glob(line_in_concern):
python_utils.PRINT(
'%s --> Pattern on line %s doesn\'t match '
'any file or directory' % (
CODEOWNER_FILEPATH, line_num + 1))
failed = True
# The following list is being populated with the
# paths in the CODEOWNERS file with the removal of the
# leading '/' to aid in the glob pattern matching in
# the next part of the check wherein the valid patterns
# are used to check if they cover the entire codebase.
if os.path.isdir(line_in_concern):
dir_patterns.append(line_in_concern)
else:
file_patterns.append(line_in_concern)
# Checks that every file (except those under the dir represented by
# the dir_patterns) is covered under CODEOWNERS.
for file_paths in _walk_with_gitignore('.', dir_patterns):
for file_path in file_paths:
match = False
for file_pattern in file_patterns:
if file_path in glob.glob(file_pattern):
match = True
break
if not match:
python_utils.PRINT(
'%s is not covered under CODEOWNERS' % file_path)
failed = True
failed = failed or (
check_for_important_patterns_at_bottom_of_codeowners(
important_rules_in_critical_section))
if failed:
summary_message = '%s CODEOWNERS file check failed' % (
_MESSAGE_TYPE_FAILED)
else:
summary_message = '%s CODEOWNERS file check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
return summary_messages
class LintChecksManager( # pylint: disable=inherit-non-class
python_utils.with_metaclass(abc.ABCMeta, python_utils.OBJECT)):
"""Manages all the common linting functions. As an abstract base class, this
is not intended to be used directly.
Attributes:
all_filepaths: list(str). The list of filepaths to be linted.
parsed_js_files: dict. Contains the content of JS files, after
validating and parsing the files.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(self, verbose_mode_enabled=False): # pylint: disable=super-init-not-called
"""Constructs a LintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
# Set path for node.
# The path for node is set explicitly, since otherwise the lint
# tests fail on CircleCI due to the TypeScript files not being
# compilable.
node_path = os.path.join(os.pardir, 'oppia_tools/node-10.15.3')
os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']
self.verbose_mode_enabled = verbose_mode_enabled
self.process_manager = multiprocessing.Manager().dict()
@abc.abstractproperty
def all_filepaths(self):
"""Returns all file paths."""
pass
def _run_multiple_checks(self, *checks):
"""Run multiple checks in parallel."""
processes = []
for check in checks:
p = multiprocessing.Process(target=check)
processes.append(p)
p.start()
for p in processes:
p.join()
def _check_for_mandatory_pattern_in_file(
self, pattern_list, filepath, failed):
"""Checks for a given mandatory pattern in a file.
Args:
pattern_list: list(dict). The list of the mandatory patterns list to
be checked for in the file.
filepath: str. The path to the file to be linted.
failed: bool. Status of failure of the check.
Returns:
bool. The failure status of the check.
"""
# This boolean list keeps track of the regex matches
# found in the file.
pattern_found_list = []
file_content = FILE_CACHE.readlines(filepath)
for index, regexp_to_check in enumerate(
pattern_list):
if (any([filepath.endswith(
allowed_type) for allowed_type in (
regexp_to_check['included_types'])]) and (
not any([
filepath.endswith(
pattern) for pattern in (
regexp_to_check[
'excluded_files'] +
regexp_to_check[
'excluded_dirs'])]))):
pattern_found_list.append(index)
for line in file_content:
if regexp_to_check['regexp'].search(line):
pattern_found_list.pop()
break
if pattern_found_list:
failed = True
for pattern_found in pattern_found_list:
python_utils.PRINT('%s --> %s' % (
filepath,
pattern_list[pattern_found]['message']))
return failed
def _check_mandatory_patterns(self):
"""This function checks that all files contain the mandatory
patterns.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting mandatory patterns check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
sets_of_patterns_to_match = [
MANDATORY_PATTERNS_REGEXP, MANDATORY_PATTERNS_JS_REGEXP]
for filepath in self.all_filepaths:
for pattern_list in sets_of_patterns_to_match:
failed = self._check_for_mandatory_pattern_in_file(
pattern_list, filepath, failed)
if failed:
summary_message = (
'%s Mandatory pattern check failed' % (
_MESSAGE_TYPE_FAILED))
else:
summary_message = (
'%s Mandatory pattern check passed' % (
_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
python_utils.PRINT('')
summary_messages.append(summary_message)
self.process_manager['mandatory'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_bad_patterns(self):
"""This function is used for detecting bad patterns."""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting Pattern Checks')
python_utils.PRINT('----------------------------------------')
total_files_checked = 0
total_error_count = 0
summary_messages = []
all_filepaths = [
filepath for filepath in self.all_filepaths if not (
filepath.endswith('pre_commit_linter.py') or
any(
fnmatch.fnmatch(filepath, pattern)
for pattern in EXCLUDED_PATHS)
)]
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in all_filepaths:
file_content = FILE_CACHE.read(filepath)
total_files_checked += 1
for pattern in BAD_PATTERNS:
if (pattern in file_content and
not _is_filepath_excluded_for_bad_patterns_check(
pattern, filepath)):
failed = True
python_utils.PRINT('%s --> %s' % (
filepath, BAD_PATTERNS[pattern]['message']))
python_utils.PRINT('')
total_error_count += 1
for regexp in BAD_PATTERNS_REGEXP:
if _check_bad_pattern_in_file(
filepath, file_content, regexp):
failed = True
total_error_count += 1
temp_failed, temp_count = _check_file_type_specific_bad_pattern(
filepath, file_content)
failed = failed or temp_failed
total_error_count += temp_count
if filepath == 'constants.ts':
for pattern in REQUIRED_STRINGS_CONSTANTS:
if pattern not in file_content:
failed = True
python_utils.PRINT('%s --> %s' % (
filepath,
REQUIRED_STRINGS_CONSTANTS[pattern]['message']))
python_utils.PRINT('')
total_error_count += 1
if failed:
summary_message = '%s Pattern checks failed' % (
_MESSAGE_TYPE_FAILED)
summary_messages.append(summary_message)
else:
summary_message = '%s Pattern checks passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
if total_files_checked == 0:
python_utils.PRINT('There are no files to be checked.')
else:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
python_utils.PRINT(summary_message)
self.process_manager['bad_pattern'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_patterns(self):
"""Run checks relate to bad patterns."""
methods = [self._check_bad_patterns, self._check_mandatory_patterns]
self._run_multiple_checks(*methods)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
self._check_patterns()
mandatory_patterns_messages = self.process_manager['mandatory']
pattern_messages = self.process_manager['bad_pattern']
return (
mandatory_patterns_messages + pattern_messages)
class JsTsLintChecksManager(LintChecksManager):
"""Manages all the Js and Ts linting functions.
Attributes:
all_filepaths: list(str). The list of filepaths to be linted.
js_filepaths: list(str): The list of js filepaths to be linted.
ts_filepaths: list(str): The list of ts filepaths to be linted.
parsed_js_and_ts_files: dict. Contains the content of JS files, after
validating and parsing the files.
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(self, verbose_mode_enabled=False):
"""Constructs a JsTsLintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
node_path = os.path.join(os.pardir, 'oppia_tools/node-10.15.3')
os.environ['PATH'] = '%s/bin:' % node_path + os.environ['PATH']
super(JsTsLintChecksManager, self).__init__(
verbose_mode_enabled=verbose_mode_enabled)
self.parsed_js_and_ts_files = []
self.parsed_expressions_in_files = []
@property
def js_filepaths(self):
"""Return all js filepaths."""
return _FILES['.js']
@property
def ts_filepaths(self):
"""Return all ts filepaths."""
return _FILES['.ts']
@property
def all_filepaths(self):
"""Return all filepaths."""
return self.js_filepaths + self.ts_filepaths
def _validate_and_parse_js_and_ts_files(self):
"""This function validates JavaScript and Typescript files and
returns the parsed contents as a Python dictionary.
"""
# Select JS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
parsed_js_and_ts_files = dict()
if not files_to_check:
return parsed_js_and_ts_files
compiled_js_dir = tempfile.mkdtemp(
dir=os.getcwd(), prefix='tmpcompiledjs')
if not self.verbose_mode_enabled:
python_utils.PRINT('Validating and parsing JS and TS files ...')
for filepath in files_to_check:
if self.verbose_mode_enabled:
python_utils.PRINT(
'Validating and parsing %s file ...' % filepath)
file_content = FILE_CACHE.read(filepath)
try:
# Use esprima to parse a JS or TS file.
parsed_js_and_ts_files[filepath] = esprima.parseScript(
file_content, comment=True)
except Exception as e:
# Compile typescript file which has syntax not valid for JS
# file.
if filepath.endswith('.js'):
shutil.rmtree(compiled_js_dir)
raise Exception(e)
try:
compiled_js_filepath = self._compile_ts_file(
filepath, compiled_js_dir)
file_content = FILE_CACHE.read(compiled_js_filepath)
parsed_js_and_ts_files[filepath] = esprima.parseScript(
file_content)
except Exception as e:
shutil.rmtree(compiled_js_dir)
raise Exception(e)
shutil.rmtree(compiled_js_dir)
return parsed_js_and_ts_files
def _get_expressions_from_parsed_script(self):
"""This function returns the expressions in the script parsed using
js and ts files.
"""
parsed_expressions_in_files = collections.defaultdict(dict)
components_to_check = ['controller', 'directive', 'factory', 'filter']
for filepath, parsed_script in self.parsed_js_and_ts_files.items():
parsed_expressions_in_files[filepath] = collections.defaultdict(
list)
parsed_nodes = parsed_script.body
for parsed_node in parsed_nodes:
for component in components_to_check:
expression = _get_expression_from_node_if_one_exists(
parsed_node, [component])
parsed_expressions_in_files[filepath][component].append(
expression)
return parsed_expressions_in_files
def _compile_ts_file(self, filepath, dir_path):
"""Compiles a typescript file and returns the path for compiled
js file.
"""
allow_js = 'true'
lib = 'es2017,dom'
no_implicit_use_strict = 'true'
skip_lib_check = 'true'
target = 'es5'
type_roots = './node_modules/@types'
cmd = (
'./node_modules/typescript/bin/tsc -outDir %s -allowJS %s '
'-lib %s -noImplicitUseStrict %s -skipLibCheck '
'%s -target %s -typeRoots %s %s typings/*') % (
dir_path, allow_js, lib, no_implicit_use_strict,
skip_lib_check, target, type_roots, filepath)
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
compiled_js_filepath = os.path.join(
dir_path, os.path.basename(filepath).replace('.ts', '.js'))
return compiled_js_filepath
def _check_extra_js_files(self):
"""Checks if the changes made include extra js files in core
or extensions folder which are not specified in
build.JS_FILEPATHS_NOT_TO_BUILD.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting extra js files check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
js_files_to_check = self.js_filepaths
for filepath in js_files_to_check:
if filepath.startswith(('core/templates', 'extensions')) and (
filepath not in build.JS_FILEPATHS_NOT_TO_BUILD) and (
not filepath.endswith('protractor.js')):
python_utils.PRINT(
'%s --> Found extra .js file\n' % filepath)
failed = True
if failed:
err_msg = (
'If you want the above files to be present as js files, '
'add them to the list JS_FILEPATHS_NOT_TO_BUILD in '
'build.py. Otherwise, rename them to .ts\n')
python_utils.PRINT(err_msg)
if failed:
summary_message = '%s Extra JS files check failed' % (
_MESSAGE_TYPE_FAILED)
else:
summary_message = '%s Extra JS files check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
python_utils.PRINT('')
self.process_manager['extra'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_js_and_ts_component_name_and_count(self):
"""This function ensures that all JS/TS files have exactly
one component and and that the name of the component
matches the filename.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting js component name and count check')
python_utils.PRINT('----------------------------------------')
# Select JS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)
and (not filepath.endswith('App.ts'))]
failed = False
summary_messages = []
components_to_check = ['controller', 'directive', 'factory', 'filter']
stdout = python_utils.string_io()
for filepath in files_to_check:
component_num = 0
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
for component in components_to_check:
if component_num > 1:
break
for expression in parsed_expressions[component]:
if not expression:
continue
component_num += 1
# Check if the number of components in each file exceeds
# one.
if component_num > 1:
python_utils.PRINT(
'%s -> Please ensure that there is exactly one '
'component in the file.' % (filepath))
failed = True
break
with _redirect_stdout(stdout):
if failed:
summary_message = (
'%s JS and TS Component name and count check failed' %
(_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s JS and TS Component name and count check passed' %
(_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['component'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_directive_scope(self):
"""This function checks that all directives have an explicit
scope: {} and it should not be scope: true.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting directive scope check')
python_utils.PRINT('----------------------------------------')
# Select JS and TS files which need to be checked.
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
components_to_check = ['directive']
stdout = python_utils.string_io()
for filepath in files_to_check:
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
# Parse the body of the content as nodes.
for component in components_to_check:
for expression in parsed_expressions[component]:
if not expression:
continue
# Separate the arguments of the expression.
arguments = expression.arguments
# The first argument of the expression is the
# name of the directive.
if arguments[0].type == 'Literal':
directive_name = str(arguments[0].value)
arguments = arguments[1:]
for argument in arguments:
# Check the type of an argument.
if argument.type != 'ArrayExpression':
continue
# Separate out the elements for the argument.
elements = argument.elements
for element in elements:
# Check the type of an element.
if element.type != 'FunctionExpression':
continue
# Separate out the body of the element.
body = element.body
if body.type != 'BlockStatement':
continue
# Further separate the body elements from the
# body.
body_elements = body.body
for body_element in body_elements:
# Check if the body element is a return
# statement.
body_element_type_is_not_return = (
body_element.type != 'ReturnStatement')
body_element_arg_type_is_not_object = (
body_element.argument.type != (
'ObjectExpression'))
if (
body_element_arg_type_is_not_object
or (
body_element_type_is_not_return
)):
continue
# Separate the properties of the return
# node.
return_node_properties = (
body_element.argument.properties)
# Loop over all the properties of the return
# node to find out the scope key.
for return_node_property in (
return_node_properties):
# Check whether the property is scope.
property_key_is_an_identifier = (
return_node_property.key.type == (
'Identifier'))
property_key_name_is_scope = (
return_node_property.key.name == (
'scope'))
if (
property_key_is_an_identifier
and (
property_key_name_is_scope
)):
# Separate the scope value and
# check if it is an Object
# Expression. If it is not, then
# check for scope: true and report
# the error message.
scope_value = (
return_node_property.value)
if (
scope_value.type == (
'Literal')
and (
scope_value.value)):
failed = True
python_utils.PRINT(
'Please ensure that %s '
'directive in %s file '
'does not have scope set '
'to true.' %
(directive_name, filepath))
python_utils.PRINT('')
elif scope_value.type != (
'ObjectExpression'):
# Check whether the directive
# has scope: {} else report
# the error message.
failed = True
python_utils.PRINT(
'Please ensure that %s '
'directive in %s file has '
'a scope: {}.' % (
directive_name, filepath
))
python_utils.PRINT('')
with _redirect_stdout(stdout):
if failed:
summary_message = '%s Directive scope check failed' % (
_MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = '%s Directive scope check passed' % (
_MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['directive'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_sorted_dependencies(self):
"""This function checks that the dependencies which are
imported in the controllers/directives/factories in JS
files are in following pattern: dollar imports, regular
imports, and constant imports, all in sorted order.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting sorted dependencies check')
python_utils.PRINT('----------------------------------------')
files_to_check = [
filepath for filepath in self.all_filepaths if
not any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
components_to_check = ['controller', 'directive', 'factory']
failed = False
summary_messages = []
stdout = python_utils.string_io()
for filepath in files_to_check:
parsed_expressions = self.parsed_expressions_in_files[filepath]
with _redirect_stdout(stdout):
for component in components_to_check:
for expression in parsed_expressions[component]:
if not expression:
continue
# Separate the arguments of the expression.
arguments = expression.arguments
if arguments[0].type == 'Literal':
property_value = str(arguments[0].value)
arguments = arguments[1:]
for argument in arguments:
if argument.type != 'ArrayExpression':
continue
literal_args = []
function_args = []
dollar_imports = []
regular_imports = []
constant_imports = []
elements = argument.elements
for element in elements:
if element.type == 'Literal':
literal_args.append(str(element.value))
elif element.type == 'FunctionExpression':
func_args = element.params
for func_arg in func_args:
function_args.append(str(func_arg.name))
for arg in function_args:
if arg.startswith('$'):
dollar_imports.append(arg)
elif re.search('[a-z]', arg):
regular_imports.append(arg)
else:
constant_imports.append(arg)
dollar_imports.sort()
regular_imports.sort()
constant_imports.sort()
sorted_imports = (
dollar_imports + regular_imports + (
constant_imports))
if sorted_imports != function_args:
failed = True
python_utils.PRINT(
'Please ensure that in %s in file %s, the '
'injected dependencies should be in the '
'following manner: dollar imports, regular '
'imports and constant imports, all in '
'sorted order.'
% (property_value, filepath))
if sorted_imports != literal_args:
failed = True
python_utils.PRINT(
'Please ensure that in %s in file %s, the '
'stringfied dependencies should be in the '
'following manner: dollar imports, regular '
'imports and constant imports, all in '
'sorted order.'
% (property_value, filepath))
with _redirect_stdout(stdout):
if failed:
summary_message = (
'%s Sorted dependencies check failed' % (
_MESSAGE_TYPE_FAILED))
else:
summary_message = (
'%s Sorted dependencies check passed' % (
_MESSAGE_TYPE_SUCCESS))
summary_messages.append(summary_message)
python_utils.PRINT('')
python_utils.PRINT(summary_message)
if self.verbose_mode_enabled:
python_utils.PRINT('----------------------------------------')
self.process_manager['sorted'] = summary_messages
_STDOUT_LIST.append(stdout)
def _match_line_breaks_in_controller_dependencies(self):
"""This function checks whether the line breaks between the dependencies
listed in the controller of a directive or service exactly match those
between the arguments of the controller function.
"""
if self.verbose_mode_enabled:
python_utils.PRINT(
'Starting controller dependency line break check')
python_utils.PRINT('----------------------------------------')
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
# For RegExp explanation, please see https://regex101.com/r/T85GWZ/2/.
pattern_to_match = (
r'controller.* \[(?P<stringfied_dependencies>[\S\s]*?)' +
r'function\((?P<function_parameters>[\S\s]*?)\)')
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
file_content = FILE_CACHE.read(filepath)
matched_patterns = re.findall(pattern_to_match, file_content)
for matched_pattern in matched_patterns:
stringfied_dependencies, function_parameters = (
matched_pattern)
stringfied_dependencies = (
stringfied_dependencies.strip().replace(
'\'', '').replace(' ', ''))[:-1]
function_parameters = (
function_parameters.strip().replace(' ', ''))
if stringfied_dependencies != function_parameters:
failed = True
python_utils.PRINT(
'Please ensure that in file %s the line breaks '
'pattern between the dependencies mentioned as '
'strings:\n[%s]\nand the dependencies mentioned '
'as function parameters: \n(%s)\nfor the '
'corresponding controller should '
'exactly match.' % (
filepath, stringfied_dependencies,
function_parameters))
python_utils.PRINT('')
if failed:
summary_message = (
'%s Controller dependency line break check failed' % (
_MESSAGE_TYPE_FAILED))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Controller dependency line break check passed' % (
_MESSAGE_TYPE_SUCCESS))
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['line_breaks'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_constants_declaration(self):
"""Checks the declaration of constants in the TS files to ensure that
the constants are not declared in files other than *.constants.ajs.ts
and that the constants are declared only single time. This also checks
that the constants are declared in both *.constants.ajs.ts (for
AngularJS) and in *.constants.ts (for Angular 8).
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting constants declaration check')
python_utils.PRINT('----------------------------------------')
summary_messages = []
failed = False
with _redirect_stdout(_TARGET_STDOUT):
ts_files_to_check = self.ts_filepaths
constants_to_source_filepaths_dict = {}
angularjs_source_filepaths_to_constants_dict = {}
for filepath in ts_files_to_check:
# The following block extracts the corresponding Angularjs
# constants file for the Angular constants file. This is
# required since the check cannot proceed if the AngularJS
# constants file is not provided before the Angular constants
# file.
if filepath.endswith('.constants.ts'):
filename_without_extension = filepath[:-3]
corresponding_angularjs_filepath = (
filename_without_extension + '.ajs.ts')
compiled_js_dir = tempfile.mkdtemp(dir=os.getcwd())
try:
if os.path.isfile(corresponding_angularjs_filepath):
compiled_js_filepath = self._compile_ts_file(
corresponding_angularjs_filepath,
compiled_js_dir)
file_content = FILE_CACHE.read(
compiled_js_filepath).decode('utf-8')
parsed_script = esprima.parseScript(file_content)
parsed_nodes = parsed_script.body
angularjs_constants_list = []
components_to_check = ['constant']
for parsed_node in parsed_nodes:
expression = (
_get_expression_from_node_if_one_exists(
parsed_node, components_to_check))
if not expression:
continue
else:
# The following block populates a set to
# store constants for the Angular-AngularJS
# constants file consistency check.
angularjs_constants_name = (
expression.arguments[0].value)
angularjs_constants_value = (
expression.arguments[1].property.name)
if angularjs_constants_value != (
angularjs_constants_name):
failed = True
python_utils.PRINT(
'%s --> Please ensure that the '
'constant %s is initialized '
'from the value from the '
'corresponding Angular constants'
' file (the *.constants.ts '
'file). Please create one in the'
' Angular constants file if it '
'does not exist there.' % (
filepath,
angularjs_constants_name))
angularjs_constants_list.append(
angularjs_constants_name)
angularjs_constants_set = set(
angularjs_constants_list)
if len(angularjs_constants_set) != len(
angularjs_constants_list):
failed = True
python_utils.PRINT(
'%s --> Duplicate constant declaration '
'found.' % (
corresponding_angularjs_filepath))
angularjs_source_filepaths_to_constants_dict[
corresponding_angularjs_filepath] = (
angularjs_constants_set)
else:
failed = True
python_utils.PRINT(
'%s --> Corresponding AngularJS constants '
'file not found.' % filepath)
finally:
shutil.rmtree(compiled_js_dir)
# Check that the constants are declared only in a
# *.constants.ajs.ts file.
if not filepath.endswith('.constants.ajs.ts'):
for line_num, line in enumerate(FILE_CACHE.readlines(
filepath)):
if 'oppia.constant(' in line:
failed = True
python_utils.PRINT(
'%s --> Constant declaration found at line '
'%s. Please declare the constants in a '
'separate constants file.' % (
filepath, line_num))
# Check if the constant has multiple declarations which is
# prohibited.
parsed_script = self.parsed_js_and_ts_files[filepath]
parsed_nodes = parsed_script.body
components_to_check = ['constant']
angular_constants_list = []
for parsed_node in parsed_nodes:
expression = _get_expression_from_node_if_one_exists(
parsed_node, components_to_check)
if not expression:
continue
else:
constant_name = expression.arguments[0].raw
if constant_name in constants_to_source_filepaths_dict:
failed = True
python_utils.PRINT(
'%s --> The constant %s is already declared '
'in %s. Please import the file where the '
'constant is declared or rename the constant'
'.' % (
filepath, constant_name,
constants_to_source_filepaths_dict[
constant_name]))
else:
constants_to_source_filepaths_dict[
constant_name] = filepath
# Checks that the *.constants.ts and the corresponding
# *.constants.ajs.ts file are in sync.
if filepath.endswith('.constants.ts'):
angular_constants_nodes = (
parsed_nodes[1].declarations[0].init.callee.body.body)
for angular_constant_node in angular_constants_nodes:
if not angular_constant_node.expression:
continue
angular_constant_name = (
angular_constant_node.expression.left.property.name)
angular_constants_list.append(angular_constant_name)
angular_constants_set = set(angular_constants_list)
if len(angular_constants_set) != len(
angular_constants_list):
failed = True
python_utils.PRINT(
'%s --> Duplicate constant declaration found.'
% filepath)
if corresponding_angularjs_filepath in (
angularjs_source_filepaths_to_constants_dict):
angular_minus_angularjs_constants = (
angular_constants_set.difference(
angularjs_source_filepaths_to_constants_dict[
corresponding_angularjs_filepath]))
for constant in angular_minus_angularjs_constants:
failed = True
python_utils.PRINT(
'%s --> The constant %s is not declared '
'in the corresponding angularjs '
'constants file.' % (filepath, constant))
if failed:
summary_message = '%s Constants declaration check failed' % (
_MESSAGE_TYPE_FAILED)
else:
summary_message = '%s Constants declaration check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT(summary_message)
return summary_messages
def _check_dependencies(self):
"""Check the dependencies related issues. This runs
_check_sorted_dependencies and
_match_line_breaks_in_controller_dependencies
in parallel.
"""
methods = [
self._check_sorted_dependencies,
self._match_line_breaks_in_controller_dependencies
]
super(JsTsLintChecksManager, self)._run_multiple_checks(*methods)
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
self.parsed_js_and_ts_files = self._validate_and_parse_js_and_ts_files()
self.parsed_expressions_in_files = (
self._get_expressions_from_parsed_script())
common_messages = super(
JsTsLintChecksManager, self).perform_all_lint_checks()
super(JsTsLintChecksManager, self)._run_multiple_checks(
self._check_extra_js_files,
self._check_js_and_ts_component_name_and_count,
self._check_directive_scope
)
self._check_dependencies()
extra_js_files_messages = self.process_manager['extra']
js_and_ts_component_messages = self.process_manager['component']
directive_scope_messages = self.process_manager['directive']
sorted_dependencies_messages = self.process_manager['sorted']
controller_dependency_messages = self.process_manager['line_breaks']
all_messages = (
common_messages + extra_js_files_messages +
js_and_ts_component_messages + directive_scope_messages +
sorted_dependencies_messages + controller_dependency_messages)
return all_messages
def _check_html_directive_name(self):
"""This function checks that all HTML directives end
with _directive.html.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML directive name check')
python_utils.PRINT('----------------------------------------')
total_files_checked = 0
total_error_count = 0
files_to_check = [
filepath for filepath in self.all_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
summary_messages = []
# For RegExp explanation, please see https://regex101.com/r/gU7oT6/37.
pattern_to_match = (
r'templateUrl: UrlInterpolationService\.[A-z\(]+' +
r'(?P<directive_name>[^\)]+)')
with _redirect_stdout(_TARGET_STDOUT):
for filepath in files_to_check:
file_content = FILE_CACHE.read(filepath)
total_files_checked += 1
matched_patterns = re.findall(pattern_to_match, file_content)
for matched_pattern in matched_patterns:
matched_pattern = matched_pattern.split()
directive_filepath = ''.join(matched_pattern).replace(
'\'', '').replace('+', '')
if not directive_filepath.endswith('_directive.html'):
failed = True
total_error_count += 1
python_utils.PRINT(
'%s --> Please ensure that this file ends'
'with _directive.html.' % directive_filepath)
python_utils.PRINT('')
if failed:
summary_message = '%s HTML directive name check failed' % (
_MESSAGE_TYPE_FAILED)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML directive name check passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
if total_files_checked == 0:
if self.verbose_mode_enabled:
python_utils.PRINT('There are no files to be checked.')
else:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
python_utils.PRINT(summary_message)
return summary_messages
class OtherLintChecksManager(LintChecksManager):
"""Manages all the linting functions except the ones against Js and Ts. It
checks Python, CSS, and HTML files.
Attributes:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
def __init__(
self, verbose_mode_enabled=False):
"""Constructs a OtherLintChecksManager object.
Args:
verbose_mode_enabled: bool. True if verbose mode is enabled.
"""
super(OtherLintChecksManager, self).__init__(
verbose_mode_enabled=verbose_mode_enabled)
@property
def py_filepaths(self):
"""Return all python filepaths."""
return _FILES['.py']
@property
def html_filepaths(self):
"""Return all html filepaths."""
return _FILES['.html']
@property
def other_filepaths(self):
"""Return other filepaths."""
return _FILES['other']
@property
def css_filepaths(self):
"""Return css filepaths."""
return _FILES['.css']
@property
def all_filepaths(self):
"""Return all filepaths."""
return (
self.css_filepaths + self.html_filepaths +
self.other_filepaths + self.py_filepaths)
def _check_division_operator(self):
"""This function ensures that the division operator('/') is not used and
python_utils.divide() is used instead.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting division checks')
python_utils.PRINT('----------------------------------------')
summary_messages = []
files_to_check = [
filepath for filepath in self.py_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
ast_file = ast.walk(
ast.parse(
python_utils.convert_to_bytes(
FILE_CACHE.read(filepath))))
ast_divisions = [n for n in ast_file if isinstance(n, ast.Div)]
if ast_divisions:
python_utils.PRINT(
'Please use python_utils.divide() instead of the '
'"/" operator in --> %s' % filepath)
failed = True
python_utils.PRINT('')
if failed:
summary_message = (
'%s Division operator check failed' % _MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Division operator check passed' % _MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
self.process_manager['division'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_import_order(self):
"""This function is used to check that each file
has imports placed in alphabetical order.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting import-order checks')
python_utils.PRINT('----------------------------------------')
summary_messages = []
files_to_check = [
filepath for filepath in self.py_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
failed = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
# This line prints the error message along with file path
# and returns True if it finds an error else returns False
# If check is set to True, isort simply checks the file and
# if check is set to False, it autocorrects import-order errors.
if (isort.SortImports(
filepath, check=True, show_diff=(
True)).incorrectly_sorted):
failed = True
python_utils.PRINT('')
python_utils.PRINT('')
if failed:
summary_message = (
'%s Import order checks failed' % _MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Import order checks passed' % _MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
self.process_manager['import'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_divide_and_import(self):
"""Run checks relates to division and import order."""
methods = [self._check_division_operator, self._check_import_order]
super(OtherLintChecksManager, self)._run_multiple_checks(*methods)
def _check_docstrings_and_comments(self):
"""Run checks relates to docstring and comments."""
methods = [self._check_docstrings, self._check_comments]
super(OtherLintChecksManager, self)._run_multiple_checks(*methods)
def _check_docstrings(self):
"""This function ensures that docstrings end in a period and the arg
order in the function definition matches the order in the doc string.
Returns:
summary_messages: list(str). Summary of messages generated by the
check.
"""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting docstring checks')
python_utils.PRINT('----------------------------------------')
summary_messages = []
files_to_check = [
filepath for filepath in self.py_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
missing_period_message = (
'There should be a period at the end of the docstring.')
multiline_docstring_message = (
'Multiline docstring should end with a new line.')
single_line_docstring_message = (
'Single line docstring should not span two lines. '
'If line length exceeds 80 characters, '
'convert the single line docstring to a multiline docstring.')
previous_line_message = (
'There should not be any empty lines before the end of '
'the multi-line docstring.')
space_after_triple_quotes_in_docstring_message = (
'There should be no space after """ in docstring.')
failed = False
is_docstring = False
is_class_or_function = False
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
file_content = FILE_CACHE.readlines(filepath)
file_length = len(file_content)
for line_num in python_utils.RANGE(file_length):
line = file_content[line_num].strip()
prev_line = ''
if line_num > 0:
prev_line = file_content[line_num - 1].strip()
# Check if it is a docstring and not some multi-line string.
if (prev_line.startswith('class ') or
prev_line.startswith('def ')) or (
is_class_or_function):
is_class_or_function = True
if prev_line.endswith('):') and (
line.startswith('"""')):
is_docstring = True
is_class_or_function = False
# Check for space after """ in docstring.
if re.match(r'^""".+$', line) and is_docstring and (
line[3] == ' '):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1,
space_after_triple_quotes_in_docstring_message))
python_utils.PRINT('')
is_docstring = False
# Check if single line docstring span two lines.
if line == '"""' and prev_line.startswith('"""') and (
is_docstring):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, single_line_docstring_message))
python_utils.PRINT('')
is_docstring = False
# Check for single line docstring.
elif re.match(r'^""".+"""$', line) and is_docstring:
# Check for punctuation at line[-4] since last three
# characters are double quotes.
if (len(line) > 6) and (
line[-4] not in
ALLOWED_TERMINATING_PUNCTUATIONS):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1, missing_period_message))
python_utils.PRINT('')
is_docstring = False
# Check for multiline docstring.
elif line.endswith('"""') and is_docstring:
# Case 1: line is """. This is correct for multiline
# docstring.
if line == '"""':
# Check for empty line before the end of docstring.
if prev_line == '':
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num, previous_line_message))
python_utils.PRINT('')
# Check for punctuation at end of docstring.
else:
last_char_is_invalid = prev_line[-1] not in (
ALLOWED_TERMINATING_PUNCTUATIONS)
no_word_is_present_in_excluded_phrases = (
not any(
word in prev_line for word in(
EXCLUDED_PHRASES)))
if last_char_is_invalid and (
no_word_is_present_in_excluded_phrases):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num,
missing_period_message))
python_utils.PRINT('')
# Case 2: line contains some words before """. """
# should shift to next line.
elif not any(word in line for word in EXCLUDED_PHRASES):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1,
multiline_docstring_message))
python_utils.PRINT('')
is_docstring = False
docstring_checker = docstrings_checker.ASTDocStringChecker()
for filepath in files_to_check:
ast_file = ast.walk(
ast.parse(
python_utils.convert_to_bytes(
FILE_CACHE.read(filepath))))
func_defs = [n for n in ast_file if isinstance(
n, ast.FunctionDef)]
for func in func_defs:
# Check that the args in the docstring are listed in the
# same order as they appear in the function definition.
func_result = docstring_checker.check_docstrings_arg_order(
func)
for error_line in func_result:
python_utils.PRINT('%s --> Func %s: %s' % (
filepath, func.name, error_line))
python_utils.PRINT('')
failed = True
python_utils.PRINT('')
if failed:
summary_message = (
'%s Docstring check failed' % _MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Docstring check passed' % _MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
self.process_manager['docstrings'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_comments(self):
"""This function ensures that comments follow correct style."""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting comment checks')
python_utils.PRINT('----------------------------------------')
summary_messages = []
files_to_check = [
filepath for filepath in self.py_filepaths if not
any(fnmatch.fnmatch(filepath, pattern) for pattern in
EXCLUDED_PATHS)]
message = 'There should be a period at the end of the comment.'
failed = False
space_regex = re.compile(r'^#[^\s].*$')
capital_regex = re.compile('^# [a-z][A-Za-z]* .*$')
stdout = python_utils.string_io()
with _redirect_stdout(stdout):
for filepath in files_to_check:
file_content = FILE_CACHE.readlines(filepath)
file_length = len(file_content)
for line_num in python_utils.RANGE(file_length):
line = file_content[line_num].strip()
next_line = ''
previous_line = ''
if line_num + 1 < file_length:
next_line = file_content[line_num + 1].strip()
if line_num > 0:
previous_line = file_content[line_num - 1].strip()
if line.startswith('#') and not next_line.startswith('#'):
# Check that the comment ends with the proper
# punctuation.
last_char_is_invalid = line[-1] not in (
ALLOWED_TERMINATING_PUNCTUATIONS)
no_word_is_present_in_excluded_phrases = not any(
word in line for word in EXCLUDED_PHRASES)
if last_char_is_invalid and (
no_word_is_present_in_excluded_phrases):
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1, message))
python_utils.PRINT('')
# Check that comment starts with a space and is not a
# shebang expression at the start of a bash script which
# loses funtion when a space is added.
if space_regex.match(line) and not line.startswith('#!'):
message = (
'There should be a space at the beginning '
'of the comment.')
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1, message))
python_utils.PRINT('')
# Check that comment starts with a capital letter.
if not previous_line.startswith('#') and (
capital_regex.match(line)):
message = (
'There should be a capital letter'
' to begin the content of the comment.')
failed = True
python_utils.PRINT('%s --> Line %s: %s' % (
filepath, line_num + 1, message))
python_utils.PRINT('')
python_utils.PRINT('')
if failed:
summary_message = (
'%s Comments check failed' % _MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = (
'%s Comments check passed' % _MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
self.process_manager['comments'] = summary_messages
_STDOUT_LIST.append(stdout)
def _check_html_tags_and_attributes(self, debug=False):
"""This function checks the indentation of lines in HTML files."""
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML tag and attribute check')
python_utils.PRINT('----------------------------------------')
html_files_to_lint = self.html_filepaths
failed = False
summary_messages = []
with _redirect_stdout(_TARGET_STDOUT):
for filepath in html_files_to_lint:
file_content = FILE_CACHE.read(filepath)
file_lines = FILE_CACHE.readlines(filepath)
parser = CustomHTMLParser(filepath, file_lines, debug)
parser.feed(file_content)
if len(parser.tag_stack) != 0:
raise TagMismatchException('Error in file %s\n' % filepath)
if parser.failed:
failed = True
if failed:
summary_message = '%s HTML tag and attribute check failed' % (
_MESSAGE_TYPE_FAILED)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML tag and attribute check passed' % (
_MESSAGE_TYPE_SUCCESS)
python_utils.PRINT(summary_message)
summary_messages.append(summary_message)
python_utils.PRINT('')
return summary_messages
def _lint_html_files(self):
"""This function is used to check HTML files for linting errors."""
parent_dir = os.path.abspath(os.path.join(os.getcwd(), os.pardir))
node_path = os.path.join(
parent_dir, 'oppia_tools', 'node-10.15.3', 'bin', 'node')
htmllint_path = os.path.join(
'node_modules', 'htmllint-cli', 'bin', 'cli.js')
error_summary = []
total_error_count = 0
summary_messages = []
htmllint_cmd_args = [node_path, htmllint_path, '--rc=.htmllintrc']
html_files_to_lint = self.html_filepaths
if self.verbose_mode_enabled:
python_utils.PRINT('Starting HTML linter...')
python_utils.PRINT('----------------------------------------')
python_utils.PRINT('')
if not self.verbose_mode_enabled:
python_utils.PRINT('Linting HTML files.')
for filepath in html_files_to_lint:
proc_args = htmllint_cmd_args + [filepath]
if self.verbose_mode_enabled:
python_utils.PRINT('Linting %s file' % filepath)
with _redirect_stdout(_TARGET_STDOUT):
proc = subprocess.Popen(
proc_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
encoded_linter_stdout, _ = proc.communicate()
linter_stdout = encoded_linter_stdout.decode(encoding='utf-8')
# This line splits the output of the linter and extracts digits
# from it. The digits are stored in a list. The second last
# digit in the list represents the number of errors in the file.
error_count = (
[int(s) for s in linter_stdout.split() if s.isdigit()][-2])
if error_count:
error_summary.append(error_count)
python_utils.PRINT(linter_stdout)
with _redirect_stdout(_TARGET_STDOUT):
if self.verbose_mode_enabled:
python_utils.PRINT('----------------------------------------')
for error_count in error_summary:
total_error_count += error_count
total_files_checked = len(html_files_to_lint)
if total_error_count:
python_utils.PRINT('(%s files checked, %s errors found)' % (
total_files_checked, total_error_count))
summary_message = '%s HTML linting failed' % (
_MESSAGE_TYPE_FAILED)
summary_messages.append(summary_message)
else:
summary_message = '%s HTML linting passed' % (
_MESSAGE_TYPE_SUCCESS)
summary_messages.append(summary_message)
python_utils.PRINT('')
python_utils.PRINT(summary_message)
python_utils.PRINT('HTML linting finished.')
python_utils.PRINT('')
return summary_messages
def perform_all_lint_checks(self):
"""Perform all the lint checks and returns the messages returned by all
the checks.
Returns:
all_messages: str. All the messages returned by the lint checks.
"""
common_messages = super(
OtherLintChecksManager, self).perform_all_lint_checks()
# division_operator_messages = self._check_division_operator()
# import_order_messages = self._check_import_order()
self._check_divide_and_import()
self._check_docstrings_and_comments()
docstring_messages = self.process_manager['docstrings']
comment_messages = self.process_manager['comments']
# The html tags and attributes check has an additional
# debug mode which when enabled prints the tag_stack for each file.
html_tag_and_attribute_messages = (
self._check_html_tags_and_attributes())
html_linter_messages = self._lint_html_files()
import_order_messages = self.process_manager['import']
division_operator_messages = self.process_manager['division']
all_messages = (
import_order_messages + common_messages +
docstring_messages + comment_messages +
html_tag_and_attribute_messages + html_linter_messages +
division_operator_messages)
return all_messages
def _print_complete_summary_of_errors():
"""Print complete summary of errors."""
error_messages = _TARGET_STDOUT.getvalue()
piped_messages = ''.join([x.getvalue() for x in _STDOUT_LIST])
error_messages += piped_messages
if error_messages != '':
python_utils.PRINT('Summary of Errors:')
python_utils.PRINT('----------------------------------------')
python_utils.PRINT(error_messages)
def read_files(file_paths):
"""Read all files to be checked and cache them. This will spin off multiple
threads to increase the efficiency.
"""
threads = []
for file_path in file_paths:
thread = threading.Thread(target=FILE_CACHE.read, args=(file_path,))
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def categorize_files(file_paths):
"""Categorize all the files and store them in shared variable _FILES."""
all_filepaths_dict = {
'.py': [], '.html': [], '.ts': [], '.js': [], 'other': [], '.css': []
}
for file_path in file_paths:
_, extension = os.path.splitext(file_path)
if extension in all_filepaths_dict:
all_filepaths_dict[extension].append(file_path)
else:
all_filepaths_dict['other'].append(file_path)
_FILES.update(all_filepaths_dict)
def _join_linting_process(linting_processes, result_queues, result_stdouts):
"""Join process spawn off by _lint_all_files and capture the outputs."""
for process in linting_processes:
process.join()
summary_messages = []
for result_queue in result_queues:
while not result_queue.empty():
summary_messages.append(result_queue.get())
for result_stdout in result_stdouts:
while not result_stdout.empty():
summary_messages.append(result_stdout.get())
with _redirect_stdout(_TARGET_STDOUT):
python_utils.PRINT(b'\n'.join(summary_messages))
python_utils.PRINT('')
python_utils.PRINT('')
return summary_messages
def main(args=None):
"""Main method for pre commit linter script that lints Python, JavaScript,
HTML, and CSS files.
"""
parsed_args = _PARSER.parse_args(args=args)
# Default mode is non-verbose mode, if arguments contains --verbose flag it
# will be made True, which will represent verbose mode.
verbose_mode_enabled = bool(parsed_args.verbose)
all_filepaths = _get_all_filepaths(parsed_args.path, parsed_args.files)
if len(all_filepaths) == 0:
python_utils.PRINT('---------------------------')
python_utils.PRINT('No files to check.')
python_utils.PRINT('---------------------------')
return
read_files(all_filepaths)
categorize_files(all_filepaths)
linting_processes, result_queues, result_stdout = _lint_all_files(
_FILES['.js'], _FILES['.ts'], _FILES['.py'], _FILES['.html'],
_FILES['.css'], verbose_mode_enabled)
code_owner_message = _check_codeowner_file(verbose_mode_enabled)
# Pylint requires to provide paramter "this_bases" and "d", guess due to
# meta class.
js_ts_lint_checks_manager = JsTsLintChecksManager( # pylint: disable=no-value-for-parameter
verbose_mode_enabled)
other_lint_checks_manager = OtherLintChecksManager( # pylint: disable=no-value-for-parameter
verbose_mode_enabled)
all_messages = code_owner_message
js_message = js_ts_lint_checks_manager.perform_all_lint_checks()
other_messages = other_lint_checks_manager.perform_all_lint_checks()
all_messages += js_message + other_messages
all_messages += _join_linting_process(
linting_processes, result_queues, result_stdout)
_print_complete_summary_of_errors()
if any([message.startswith(_MESSAGE_TYPE_FAILED) for message in
all_messages]):
python_utils.PRINT('---------------------------')
python_utils.PRINT('Checks Not Passed.')
python_utils.PRINT('---------------------------')
sys.exit(1)
else:
python_utils.PRINT('---------------------------')
python_utils.PRINT('All Checks Passed.')
python_utils.PRINT('---------------------------')
NAME_SPACE = multiprocessing.Manager().Namespace()
PROCESSES = multiprocessing.Manager().dict()
NAME_SPACE.files = FileCache()
FILE_CACHE = NAME_SPACE.files
if __name__ == '__main__':
main()
|
move_it_robot.py
|
"""
ROSWELD
Version 0.0.1, March 2019
http://rosin-project.eu/ftp/rosweld
Copyright (c) 2019 PPM Robotics AS
This library is part of ROSWELD project,
the Focused Technical Project ROSWELD - ROS based framework
for planning, monitoring and control of multi-pass robot welding
is co-financed by the EU project ROSIN (www.rosin-project.eu)
and ROS-Industrial initiative (www.rosindustrial.eu).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from math import degrees
from threading import Thread, currentThread
import moveit_commander
import moveit_msgs.msg
import numpy as np
import rospy
import rosweld_drivers.srv
from rosweld_drivers.msg import RobotState
import tf
from std_srvs.srv import Empty, EmptyResponse
from src.drivers.misc.status import STATE, status
# publish state thread
thread = None
#global speed
speed = 10.0
# last robot state
lastState = None
# the robot is moving
isMoving = False
# Stored poses
allPositions = []
# node name
name = None
# last step
lastStep = -1
#move group commander
group = None
#pose publisher
p_pose = None
#min distance between points
min_d = 0
#send update even it is the same as before
do_update = True
def getPositions():
"""Get positions
Returns:
Pose -- allPositions
"""
global allPositions
return allPositions
def sendPlay(start, end, d, poses=None):
"""Moves the robot between two given poses
Arguments:
start {int32} -- start index
end {int32} -- end index
d {int32} -- direction 1: forward, -1: backward
Keyword Arguments:
poses {Move[]} -- set of poses, if None, use allPositions (default: {None})
"""
# The last step along the multiPath
global lastStep
global allPositions
global speed
global name
if poses is None:
poses = allPositions
lastStep = -1
# Create plan
plan = moveit_msgs.msg.RobotTrajectory()
# Used to append plans with different speeds
currentMove = 0
# Going through all moves
waypoints = []
for currentMove in range(min(start, end), max(start, end) + 1)[::d]:
p = poses[currentMove]
waypoints.append(p)
# Set planning start to the last waypoint - in case it exists
group.set_start_state_to_current_state()
# We want the Cartesian path to be interpolated at a resolution of 1 cm
# which is why we will specify 0.01 as the eef_step in Cartesian
# translation. We will disable the jump threshold by setting it to 0.0
# disabling:
(plan, fraction) = group.compute_cartesian_path(
waypoints, # waypoints to follow
0.01, # eef_step
0.0) # jump_threshold
if fraction < sys.float_info.epsilon:
status(
name,
"Can't follow path (fraction: %d)" %
(fraction),
STATE.ERROR)
return
# Divide the timing using the given speed for this plan
for p in plan.joint_trajectory.points:
p.time_from_start /= speed
# Going through the multiPlan sync
group.execute(plan, wait=False)
status(name, "Play sent from %d to %d with direction: %d" %
(start, len(allPositions) if end == -1 else end, d))
def move_along(req):
"""Moves along a set of points
Expects a list of Moves, that specify the Pose and the speed of the current move
Arguments:
req {MoveAlongRequest} -- set of poses
Returns:
MoveAlongResponse -- Empty
"""
# Save all positions of the request for later use
store_poses(req)
if len(req.moves) == 0:
return rosweld_drivers.srv.MoveAlongResponse()
sendPlay(0, len(req.moves) - 1, 1)
return rosweld_drivers.srv.MoveAlongResponse()
def move_pose(req):
"""Moves to a set of poses without storing them
Arguments:
req {MoveAlongRequest} -- set of poses
Returns:
MoveAlongResponse -- Empty
"""
if len(req.moves) == 0:
return rosweld_drivers.srv.MoveAlongResponse()
poses = []
for i in range(0, len(req.moves)):
poses.append(req.moves[i].pose)
sendPlay(0, len(req.moves) - 1, 1, poses)
return rosweld_drivers.srv.MoveAlongResponse()
def abort(req):
"""Aborts the current movement
Stops the robot executing the current movement
Arguments:
req {EmptyRequest} -- Empty
Returns:
EmptyResponse -- Empty
"""
global name
group.stop()
status(name, "Abort sent.")
return EmptyResponse()
def robot_pose_publisher():
"""Publishes the robot pose
Runs in a different thread, polling and publishing the robot pose every second
"""
rate = rospy.Rate(5) # 1hz
t = currentThread()
global lastState
global isMoving
global do_update
while not rospy.is_shutdown() and getattr(t, "do_run", True):
state = RobotState()
state.pose = group.get_current_pose().pose
state.step = get_robot_step(state.pose.position)
state.joints = group.get_current_joint_values()
rpy = tf.transformations.euler_from_quaternion(
np.array([
state.pose.orientation.x,
state.pose.orientation.y,
state.pose.orientation.z,
state.pose.orientation.w]))
isMoving = lastState is not None and state.pose != lastState.pose
state.euler_angles.rx = degrees(rpy[0])
state.euler_angles.ry = degrees(rpy[1])
state.euler_angles.rz = degrees(rpy[2])
state.isMoving = isMoving
if lastState != state or do_update:
p_pose.publish(state)
do_update = False
lastState = state
rate.sleep()
def get_min_distance():
global allPositions
if len(allPositions) == 0:
return 0
min_d = sys.float_info.max
for i in range(len(allPositions) - 1):
pos1 = allPositions[i].position
pos2 = allPositions[i + 1].position
p1 = np.array([pos1.x, pos1.y, pos1.z])
p2 = np.array([pos2.x, pos2.y, pos2.z])
squared_dist = np.sum(p1**2 + p2**2, axis=0)
dist = np.sqrt(squared_dist)
min_d = min(min_d, dist)
return min_d
def get_robot_step(currentPos):
"""Publishes the current approximate step on the path
Searches for the closest point on the given path and publishes it's index
The algorithm searches in a circle, trying to find a point after the current one
Arguments:
currentPos {Position} -- current position x,y,z
Returns:
int32 -- current pose index in the path
"""
global lastStep
global allPositions
global min_d
try:
minD = 9999
minI = -1
inThreshold = False
if len(allPositions) != 0:
posCount = len(allPositions)
for i in range(0, posCount):
# Go in a circle, starting from the current position
i = (lastStep + i) % posCount
p = allPositions[i].position
# The distance of the current robot pose and the checked pose
d = abs(p.x - currentPos.x) + \
abs(p.y - currentPos.y) + abs(p.z - currentPos.z)
# Marking a new best match
if d < minD and d < min_d / 2:
minD = d
minI = i
# Catches, if the allPositions is not yet defined
except NameError as e:
global name
status(name, e, STATE.ERROR)
lastStep = minI
return minI
def set_speed(req):
"""Handler for the set speed service request
Sets the global moveit speed
Arguments:
req {SetSpeedRequest} -- request object, containing the value
Returns:
SetSpeedResponse -- response objet, empty
"""
global speed
global name
speed = req.value
status(name, "SetSpeed sent: %d." % (req.value))
return rosweld_drivers.srv.SetSpeedResponse()
def move_between(req):
"""Handler for move between service request
Moves the robot between two poses
Arguments:
req {MoveBetweenRequest} -- request object, containing the start and stop index
Returns:
MoveBetweengResponse -- response object, empty
"""
resp = rosweld_drivers.srv.MoveBetweenResponse()
print req, len(allPositions)
if req.start < 0 or req.start >= len(
allPositions) or req.end < 0 or req.end >= len(allPositions):
resp.result = "Invalid move"
global name
status(
name,
"Invalid move, the selected steps has to be between 0 and %d" %
(len(allPositions)),
STATE.ERROR)
return resp
resp = "OK"
d = -1 if req.start > req.end else 1
sendPlay(req.start, req.end, d)
return resp
def store_poses(req):
"""Handler for store_poses service request
Stores the poses locally
Arguments:
req {MoveAlongRequest} -- request object, containing the moves
Returns:
MoveAlongResponse -- response object, empty
"""
global allPositions
global min_d
allPositions = []
for i in range(0, len(req.moves)):
allPositions.append(req.moves[i].pose)
min_d = get_min_distance()
return rosweld_drivers.srv.MoveAlongResponse()
def update(req):
"""Request update
Arguments:
req {EmptyRequest} -- Empty
Returns:
EmptyResponse -- Empty
"""
global do_update
do_update = True
return EmptyResponse()
def init():
""" Initalizes the robot controller
Creates all variables required to control the robot.
Moves the robot to a neutral position.
"""
global thread
global name
name = rospy.get_param("robot_name", "move_it_robot")
rospy.init_node(name, anonymous=True)
try:
rospy.wait_for_service('move_group/load_map', 1)
except rospy.exceptions.ROSException:
status(name, "Please start MoveIt first", STATE.ERROR)
rospy.wait_for_service('move_group/load_map')
moveit_commander.roscpp_initialize(sys.argv)
moveit_commander.RobotCommander()
moveit_commander.PlanningSceneInterface()
global group
group_name = rospy.get_param("move_group_name", "welding")
group = moveit_commander.MoveGroupCommander(group_name)
# # Registering services
rospy.Service('move_along', rosweld_drivers.srv.MoveAlong, move_along)
rospy.Service('abort', Empty, abort)
rospy.Service('update', Empty, update)
rospy.Service('store_poses', rosweld_drivers.srv.MoveAlong, store_poses)
rospy.Service('move_pose', rosweld_drivers.srv.MoveAlong, move_pose)
rospy.Service('set_speed', rosweld_drivers.srv.SetSpeed, set_speed)
rospy.Service(
'move_between',
rosweld_drivers.srv.MoveBetween,
move_between)
# Registering publishers
global p_pose
p_pose = rospy.Publisher(
'robot_state',
RobotState,
queue_size=100,
latch=True)
status(name, "MoveIt Robot Driver - ready (%s)" % (name))
# Starts the robot pose publisher on a new thread
thread = Thread(target=robot_pose_publisher)
thread.do_run = True
thread.start()
try:
# Wait for service calls or user interruption
rospy.spin()
finally:
thread.do_run = False
thread.join()
status(name, "MoveIt Robot Driver - stopped")
if __name__ == "__main__":
init()
|
camera.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 24 20:46:25 2017
Thread that gets frames from a camera
It *is* OK to use one Camera in multiple Processors
"""
## NOTE: OpenCV interface to camera controls is sketchy
## use v4l2-ctl directly for explicit control
## example for dark picture: v4l2-ctl -c exposure_auto=1 -c exposure_absolute=10
import cv2
from subprocess import call
from threading import Thread
from threading import Lock
from framerate import FrameRate
from cubbyhole import Cubbyhole
import platform
class Camera:
def __init__(self, name, src, width, height, exposure):
print("Creating Camera " + name)
self.name = name
self.src = src
self.stream = cv2.VideoCapture(src)
self.stream.set(cv2.CAP_PROP_FRAME_WIDTH,width)
self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT,height)
self.exposure = None
self.setExposure(exposure)
self.fps = FrameRate()
self.running = False
# Dict maps user (client) to the Cubbyhole instance used to pass it frames
self.userDict = {}
self.userDictLock = Lock() # Protects shared access to userDict
self.rate = self.stream.get(cv2.CAP_PROP_FPS)
print("RATE = " + str(self.rate))
self.brightness = self.stream.get(cv2.CAP_PROP_BRIGHTNESS)
print("BRIGHT = " + str(self.brightness))
self.contrast = self.stream.get(cv2.CAP_PROP_CONTRAST)
print("CONTRAST = " + str(self.contrast))
self.saturation = self.stream.get(cv2.CAP_PROP_SATURATION)
print("SATURATION = " + str(self.saturation))
print("EXPOSURE = " + str(self.exposure))
def start(self):
print("Camera " + self.name + " STARTING")
t = Thread(target=self.run, args=())
t.daemon = True
t.start()
return self
def run(self):
print("Camera " + self.name + " RUNNING")
self.running = True
while True:
(grabbed, frame) = self.stream.read()
self.fps.start()
# grabbed will be false if camera has been disconnected.
# How to deal with that??
# Should probably try to reconnect somehow? Don't know how...
if grabbed:
# Pass a copy of the frame to each user in userDict
self.userDictLock.acquire()
values = self.userDict.values()
self.userDictLock.release()
for mb in values:
mb.put(frame.copy())
self.fps.stop()
def read(self, user):
# See if this user already registered in userDict.
# If not, create a new Cubbyhole instance to pass frames to user.
# If so, just get the user's Cubbyhole instance.
# Then get the frame from the Cubbyhole & return it.
self.userDictLock.acquire()
if not user in self.userDict:
self.userDict[user] = Cubbyhole()
mb = self.userDict[user]
self.userDictLock.release()
return mb.get()
def processUserCommand(self, key):
if key == ord('w'):
self.brightness+=1
self.stream.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness)
print("BRIGHT = " + str(self.brightness))
elif key == ord('s'):
self.brightness-=1
self.stream.set(cv2.CAP_PROP_BRIGHTNESS,self.brightness)
print("BRIGHT = " + str(self.brightness))
elif key == ord('d'):
self.contrast+=1
self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast)
print("CONTRAST = " + str(self.contrast))
elif key == ord('a'):
self.contrast-=1
self.stream.set(cv2.CAP_PROP_CONTRAST,self.contrast)
print("CONTRAST = " + str(self.contrast))
elif key == ord('e'):
self.saturation+=1
self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation)
print("SATURATION = " + str(self.saturation))
elif key == ord('q'):
self.saturation-=1
self.stream.set(cv2.CAP_PROP_SATURATION,self.saturation)
print("SATURATION = " + str(self.saturation))
elif key == ord('z'):
self.setExposure(self.exposure+1)
print("EXPOSURE = " + str(self.exposure))
elif key == ord('c'):
self.setExposure(self.exposure-1)
print("EXPOSURE = " + str(self.exposure))
## elif key == ord('p'):
## self.iso +=1
## self.stream.set(cv2.CAP_PROP_ISO_SPEED, self.iso)
## elif key == ord('i'):
## self.iso -=1
## self.stream.set(cv2.CAP_PROP_ISO_SPEED, self.iso)
def setExposure(self, exposure):
if self.exposure == exposure :
return
self.exposure = exposure
# cv2 exposure control DOES NOT WORK ON PI
if (platform.system() == 'Windows' or platform.system() == 'Darwin'):
self.stream.set(cv2.CAP_PROP_EXPOSURE, self.exposure)
else:
cmd = ['v4l2-ctl --device=' + str(self.src) + ' -c exposure_auto=1 -c exposure_absolute=' + str(self.exposure)]
call(cmd,shell=True)
return
def isRunning(self):
return self.running
|
red_test.py
|
#!/usr/bin/env python
import logging
from redbot.resource import HttpResource
import redbot.speak as rs
import thor
import threading
from tornado import gen
from tornado.options import parse_command_line
from tornado.testing import AsyncHTTPTestCase
from tornado.web import RequestHandler, Application, asynchronous
import unittest
class HelloHandler(RequestHandler):
def get(self):
self.write("Hello world")
class RedirectHandler(RequestHandler):
def get(self, path):
self.redirect(path, status=int(self.get_argument('status', '302')))
class PostHandler(RequestHandler):
def post(self):
assert self.get_argument('foo') == 'bar'
self.redirect('/hello', status=303)
class ChunkedHandler(RequestHandler):
@asynchronous
@gen.engine
def get(self):
self.write('hello ')
yield gen.Task(self.flush)
self.write('world')
yield gen.Task(self.flush)
self.finish()
class CacheHandler(RequestHandler):
def get(self, computed_etag):
self.write(computed_etag)
def compute_etag(self):
return self._write_buffer[0]
class TestMixin(object):
def get_handlers(self):
return [
('/hello', HelloHandler),
('/redirect(/.*)', RedirectHandler),
('/post', PostHandler),
('/chunked', ChunkedHandler),
('/cache/(.*)', CacheHandler),
]
def get_app_kwargs(self):
return dict(static_path='.')
def get_allowed_warnings(self):
return [
# We can't set a non-heuristic freshness at the framework level,
# so just ignore this warning
rs.FRESHNESS_HEURISTIC,
# For our small test responses the Content-Encoding header
# wipes out any gains from compression
rs.CONNEG_GZIP_BAD,
]
def get_allowed_errors(self):
return []
def check_url(self, path, method='GET', body=None, headers=None,
expected_status=200, allowed_warnings=None,
allowed_errors=None):
url = self.get_url(path)
red = self.run_redbot(url, method, body, headers)
if not red.response.complete:
if isinstance(red.response.http_error, Exception):
logging.warning((red.response.http_error.desc, vars(red.response.http_error), url))
raise red.response.http_error.res_error
else:
raise Exception("unknown error; incomplete response")
self.assertEqual(int(red.response.status_code), expected_status)
allowed_warnings = (allowed_warnings or []) + self.get_allowed_warnings()
allowed_errors = (allowed_errors or []) + self.get_allowed_errors()
errors = []
warnings = []
for msg in red.response.notes:
if msg.level == 'bad':
logger = logging.error
if not isinstance(msg, tuple(allowed_errors)):
errors.append(msg)
elif msg.level == 'warning':
logger = logging.warning
if not isinstance(msg, tuple(allowed_warnings)):
warnings.append(msg)
elif msg.level in ('good', 'info', 'uri'):
logger = logging.info
else:
raise Exception('unknown level' + msg.level)
logger('%s: %s (%s)', msg.category, msg.show_summary('en'),
msg.__class__.__name__)
logger(msg.show_text('en'))
self.assertEqual(len(warnings) + len(errors), 0,
'Had %d unexpected warnings and %d errors' %
(len(warnings), len(errors)))
def run_redbot(self, url, method, body, headers):
red = HttpResource(url, method=method, req_body=body,
req_hdrs=headers)
def work():
red.run(thor.stop)
thor.run()
self.io_loop.add_callback(self.stop)
thread = threading.Thread(target=work)
thread.start()
self.wait()
thread.join()
return red
def test_hello(self):
self.check_url('/hello')
def test_static(self):
# TODO: 304 responses SHOULD return the same etag that a full
# response would. We currently do for If-None-Match, but not
# for If-Modified-Since (because IMS does not otherwise
# require us to read the file from disk)
self.check_url('/static/red_test.py',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_static_versioned_url(self):
self.check_url('/static/red_test.py?v=1234',
allowed_warnings=[rs.MISSING_HDRS_304])
def test_redirect(self):
self.check_url('/redirect/hello', expected_status=302)
def test_permanent_redirect(self):
self.check_url('/redirect/hello?status=301', expected_status=301)
def test_404(self):
self.check_url('/404', expected_status=404)
def test_post(self):
body = 'foo=bar'
# Without an explicit Content-Length redbot will try to send the
# request chunked.
self.check_url(
'/post', method='POST', body=body,
headers=[('Content-Length', str(len(body))),
('Content-Type', 'application/x-www-form-urlencoded')],
expected_status=303)
def test_chunked(self):
self.check_url('/chunked')
def test_strong_etag_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_multiple_strong_etag_match(self):
computed_etag = '"xyzzy1"'
etags = '"xyzzy1", "xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_multiple_strong_etag_not_match(self):
computed_etag = '"xyzzy"'
etags = '"xyzzy1", "xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_wildcard_etag(self):
computed_etag = '"xyzzy"'
etags = '*'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304,
allowed_warnings=[rs.MISSING_HDRS_304])
def test_weak_etag_match(self):
computed_etag = '"xyzzy1"'
etags = 'W/"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_multiple_weak_etag_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=304)
def test_weak_etag_not_match(self):
computed_etag = '"xyzzy2"'
etags = 'W/"xyzzy1"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
def test_multiple_weak_etag_not_match(self):
computed_etag = '"xyzzy3"'
etags = 'W/"xyzzy1", W/"xyzzy2"'
self.check_url(
'/cache/' + computed_etag, method='GET',
headers=[('If-None-Match', etags)],
expected_status=200)
class DefaultHTTPTest(AsyncHTTPTestCase, TestMixin):
def get_app(self):
return Application(self.get_handlers(), **self.get_app_kwargs())
class GzipHTTPTest(AsyncHTTPTestCase, TestMixin):
def get_app(self):
return Application(self.get_handlers(), gzip=True, **self.get_app_kwargs())
def get_allowed_errors(self):
return super(GzipHTTPTest, self).get_allowed_errors() + [
# TODO: The Etag is supposed to change when Content-Encoding is
# used. This should be fixed, but it's difficult to do with the
# way GZipContentEncoding fits into the pipeline, and in practice
# it doesn't seem likely to cause any problems as long as we're
# using the correct Vary header.
rs.VARY_ETAG_DOESNT_CHANGE,
]
if __name__ == '__main__':
parse_command_line()
unittest.main()
|
test_threads.py
|
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Tests the h5py.File object.
"""
import threading
import h5py
from .common import ut, TestCase
class TestErrorPrinting(TestCase):
"""
Verify the error printing is squashed in all threads.
"""
def test_printing(self):
""" No console messages should be shown from membership tests """
# Unfortunately we can't have this test assert anything, as
# HDF5 writes directly to stderr. But it will show up in the
# console output.
import threading
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
try:
doesnt_exist = newfile['doesnt_exist'].value
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
def test_attr_printing(self):
""" No console messages should be shown for non-existing attributes """
def test():
with h5py.File(self.mktemp(), 'w') as newfile:
newfile['newdata'] = [1,2,3]
try:
nonexistent_attr = newfile['newdata'].attrs['nonexistent_attr']
except KeyError:
pass
th = threading.Thread(target=test)
th.start()
th.join()
|
main_window.py
|
#!/usr/bin/env python
#
# Electrum - lightweight ParkByte client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys, time, threading
import os, json, traceback
import shutil
import socket
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import PyQt4
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtCore as QtCore
import icons_rc
from electrum.parkbyte import COIN, is_valid, TYPE_ADDRESS
from electrum.plugins import run_hook
from electrum.i18n import _
from electrum.util import (block_explorer, block_explorer_info, format_time,
block_explorer_URL, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds, StoreDict,
UserCancelled)
from electrum import Transaction, mnemonic
from electrum import util, parkbyte, commands, coinchooser
from electrum import SimpleConfig, paymentrequest
from electrum.wallet import Wallet, BIP32_RD_Wallet, Multisig_Wallet
from amountedit import BTCAmountEdit, MyLineEdit, BTCkBEdit
from network_dialog import NetworkDialog
from qrcodewidget import QRCodeWidget, QRDialog
from qrtextedit import ShowQRTextEdit
from transaction_dialog import show_transaction
from electrum import ELECTRUM_VERSION
import re
from util import *
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
def onPress(self, checked=False):
'''Drops the unwanted PyQt4 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == QtCore.Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
pr_icons = {
PR_UNPAID:":icons/unpaid.png",
PR_PAID:":icons/confirmed.png",
PR_EXPIRED:":icons/expired.png"
}
pr_tooltips = {
PR_UNPAID:_('Pending'),
PR_PAID:_('Paid'),
PR_EXPIRED:_('Expired')
}
expiration_values = [
(_('1 hour'), 60*60),
(_('1 day'), 24*60*60),
(_('1 week'), 7*24*60*60),
(_('Never'), None)
]
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config
self.network = gui_object.daemon.network
self.invoices = gui_object.invoices
self.contacts = gui_object.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 5)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
tabs.addTab(self.create_history_tab(), _('History') )
tabs.addTab(self.create_send_tab(), _('Send') )
tabs.addTab(self.create_receive_tab(), _('Receive') )
tabs.addTab(self.create_addresses_tab(), _('Addresses') )
tabs.addTab(self.create_contacts_tab(), _('Contacts') )
tabs.addTab(self.create_console_tab(), _('Console') )
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.connect(self, QtCore.SIGNAL('payment_request_ok'), self.payment_request_ok)
self.connect(self, QtCore.SIGNAL('payment_request_error'), self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.connect(self, QtCore.SIGNAL('network'), self.on_network_qt)
interests = ['updated', 'new_transaction', 'status',
'banner', 'verified']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.fetch_alias()
self.require_fee_update = False
self.tx_notifications = []
self.tl_windows = []
self.load_wallet(wallet)
self.connect_slots(gui_object.timer)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
if event == 'updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_notifications.append(args[0])
elif event in ['status', 'banner', 'verified']:
# Handle in GUI thread
self.emit(QtCore.SIGNAL('network'), event, *args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, *args):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
self.history_list.update_item(*args)
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.emit(SIGNAL('alias_received'))
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def update_account_selector(self):
# account selector
accounts = self.wallet.get_account_names()
self.account_selector.clear()
if len(accounts) > 1:
self.account_selector.addItems([_("All accounts")] + accounts.values())
self.account_selector.setCurrentIndex(0)
self.account_selector.show()
else:
self.account_selector.hide()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.storage.put('accounts_expanded', self.accounts_expanded)
run_hook('close_wallet', self.wallet)
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.wallet = wallet
self.update_recently_visited(wallet.storage.path)
self.import_old_contacts()
# address used to create a dummy transaction and estimate transaction fee
self.accounts_expanded = self.wallet.storage.get('accounts_expanded',{})
self.current_account = self.wallet.storage.get("current_account", None)
self.history_list.update()
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
self.notify_transactions()
# update menus
self.update_new_account_menu()
self.seed_menu.setEnabled(self.wallet.has_seed())
self.mpk_menu.setEnabled(self.wallet.is_deterministic())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.receive_list.update()
self.tabs.show()
try:
self.setGeometry(*self.wallet.storage.get("winpos-qt"))
except:
self.setGeometry(100, 100, 840, 400)
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
def watching_only_changed(self):
title = 'Electrum %s - %s' % (self.wallet.electrum_version,
self.wallet.basename())
if self.wallet.is_watching_only():
self.warn_if_watching_only()
title += ' [%s]' % (_('watching only'))
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_menu.setVisible(self.wallet.can_import())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend ParkBytes with it."),
_("Make sure you own the seed phrase or the private keys, before you request ParkBytes to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def import_old_contacts(self):
# backward compatibility: import contacts
old_contacts = self.wallet.storage.get('contacts', [])
if old_contacts:
for k in set(old_contacts):
l = self.wallet.labels.get(k)
if parkbyte.is_address(k) and l:
self.contacts[l] = ('address', k)
self.wallet.storage.put('contacts', None)
def open_wallet(self):
wallet_folder = self.get_wallet_folder()
filename = unicode(QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder))
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename = unicode( QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder) )
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
wallet_folder = self.get_wallet_folder()
i = 1
while True:
filename = "wallet_%d" % i
if filename in os.listdir(wallet_folder):
i += 1
else:
break
filename = line_dialog(self, _('New Wallet'), _('Enter file name')
+ ':', _('OK'), filename)
if not filename:
return
full_path = os.path.join(wallet_folder, filename)
if os.path.exists(full_path):
self.show_critical(_("File exists"))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&New contact"), self.new_contact_dialog)
self.new_account_menu = wallet_menu.addAction(_("&New account"), self.new_account_dialog)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.mpk_menu = wallet_menu.addAction(_("&Master Public Keys"), self.show_master_public_keys)
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
wallet_menu.addAction(_("&Export History"), self.export_history_dialog)
wallet_menu.addAction(_("Search"), self.toggle_search).setShortcut(QKeySequence("Ctrl+S"))
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in OSX using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.run_network_dialog)
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official ParkByte Website"), lambda: webbrowser.open("https://www.parkbyte.com"))
help_menu.addSeparator()
help_menu.addAction(_("&Electrum Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
if self.network.is_connected():
d = self.network.get_donation_address()
host = self.network.get_parameters()[0]
self.pay_to_URI('parkbyte:%s?message=donation for %s'%(d, host))
def show_about(self):
QMessageBox.about(self, "Electrum",
_("Version")+" %s" % (self.wallet.electrum_version) + "\n\n" + _("Electrum's focus is speed, with low resource usage and simplifying ParkByte. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the ParkByte system."))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/parkbyte/electrum-parkbyte/issues\">https://github.com/parkbyte/electrum-parkbyte/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum-ParkByte (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"))
def notify_transactions(self):
if not self.network or not self.network.is_connected():
return
self.print_error("Notifying GUI")
if len(self.tx_notifications) > 0:
# Combine the transactions if there are more then three
tx_amount = len(self.tx_notifications)
if(tx_amount >= 3):
total_amount = 0
for tx in self.tx_notifications:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
total_amount += v
self.notify(_("%(txs)s new transactions received. Total amount received in the new transactions %(amount)s") \
% { 'txs' : tx_amount, 'amount' : self.format_amount_and_units(total_amount)})
self.tx_notifications = []
else:
for tx in self.tx_notifications:
if tx:
self.tx_notifications.remove(tx)
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if(v > 0):
self.notify(_("New transaction received. %(amount)s") % { 'amount' : self.format_amount_and_units(v)})
def notify(self, message):
if self.tray:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
fileName = unicode( QFileDialog.getOpenFileName(self, title, directory, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', unicode(os.path.expanduser('~')))
path = os.path.join( directory, filename )
fileName = unicode( QFileDialog.getSaveFileName(self, title, path, filter) )
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def connect_slots(self, sender):
self.connect(sender, QtCore.SIGNAL('timersignal'), self.timer_actions)
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, self.num_zeros, self.decimal_point, whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = run_hook('format_amount_and_units', amount)
if text and x:
text += ' (%s)'%x
return text
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
assert self.decimal_point in [2, 5, 8]
if self.decimal_point == 2:
return 'PKBits'
if self.decimal_point == 5:
return 'mPKB'
if self.decimal_point == 8:
return 'PKB'
raise Exception('Unknown base unit')
def update_status(self):
if not self.wallet:
return
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging (%d blocks)"%server_lag)
icon = QIcon(":icons/status_lagging.png")
else:
c, u, x = self.wallet.get_account_balance(self.current_account)
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, True).strip())
# append fiat balance and price from exchange rate plugin
rate = run_hook('get_fiat_status_text', c + u + x)
if rate:
text += rate
icon = QIcon(":icons/status_connected.png")
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
if self.wallet.up_to_date:
self.check_next_account()
def update_tabs(self):
self.history_list.update()
self.receive_list.update()
self.address_list.update()
self.contacts_list.update()
self.invoices_list.update()
self.update_completions()
def create_history_tab(self):
from history_widget import HistoryWidget
self.history_list = l = HistoryWidget(self)
return l
def show_address(self, addr):
import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('ParkByte address where the payment should be received. Note that each payment request uses a different ParkByte address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.NoFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.expires_combo = QComboBox()
self.expires_combo.addItems(map(lambda x:x[0], expiration_values))
self.expires_combo.setCurrentIndex(1)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding ParkByte addresses.'),
_('The parkbyte address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
self.receive_list = MyTreeWidget(self, self.receive_list_menu, [_('Date'), _('Account'), _('Address'), '', _('Description'), _('Amount'), _('Status')], 4)
self.receive_list.currentItemChanged.connect(self.receive_item_changed)
self.receive_list.itemClicked.connect(self.receive_item_changed)
self.receive_list.setSortingEnabled(True)
self.receive_list.setColumnWidth(0, 180)
self.receive_list.hideColumn(1)
self.receive_list.hideColumn(2)
self.receive_list.on_update = self.update_receive_tab
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.receive_list)
vbox.setStretchFactor(self.receive_list, 1000)
return w
def receive_item_changed(self, item):
if item is None:
return
if not self.receive_list.isItemSelected(item):
return
addr = str(item.text(2))
req = self.wallet.receive_requests[addr]
expires = util.age(req['time'] + req['exp']) if req.get('exp') else _('Never')
amount = req['amount']
message = self.wallet.labels.get(addr, '')
self.receive_address_e.setText(addr)
self.receive_message_e.setText(message)
self.receive_amount_e.setAmount(amount)
self.expires_combo.hide()
self.expires_label.show()
self.expires_label.setText(expires)
self.new_request_button.setEnabled(True)
def delete_payment_request(self, item):
addr = str(item.text(2))
self.wallet.remove_payment_request(addr, self.config)
self.receive_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = req.get('sig').decode('hex')
sig = parkbyte.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def receive_list_menu(self, position):
item = self.receive_list.itemAt(position)
addr = str(item.text(2))
req = self.wallet.receive_requests[addr]
menu = QMenu(self)
menu.addAction(_("Copy Address"), lambda: self.view_and_paste(_('Address'), '', addr))
menu.addAction(_("Copy URI"), lambda: self.view_and_paste('URI', '', self.get_request_URI(addr)))
menu.addAction(_("Save as BIP70 file"), lambda: self.export_payment_request(addr))
menu.addAction(_("Delete"), lambda: self.delete_payment_request(item))
run_hook('receive_list_menu', menu, addr)
menu.exec_(self.receive_list.viewport().mapToGlobal(position))
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text())
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = map(lambda x: x[1], expiration_values)[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(addr)
self.receive_list.update()
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(str(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address(self.current_account)
if addr is None:
if isinstance(self.wallet, Imported_Wallet):
self.show_message(_('No more addresses in your wallet.'))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(self.current_account, False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
addr = self.wallet.get_unused_address(self.current_account)
self.receive_address_e.setText(addr if addr else '')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def receive_at(self, addr):
if not parkbyte.is_address(addr):
return
self.tabs.setCurrentIndex(2)
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_tab(self):
# hide receive tab if no receive requests available
b = len(self.wallet.receive_requests) > 0
self.receive_list.setVisible(b)
self.receive_requests_label.setVisible(b)
if not b:
self.expires_label.hide()
self.expires_combo.show()
# check if it is necessary to show the account
self.receive_list.setColumnHidden(1, len(self.wallet.get_accounts()) == 1)
# update the receive address if necessary
current_address = self.receive_address_e.text()
domain = self.wallet.get_account_addresses(self.current_account, include_change=False)
addr = self.wallet.get_unused_address(self.current_account)
if not current_address in domain and addr:
self.set_receive_address(addr)
self.new_request_button.setEnabled(addr != current_address)
# clear the list and fill it again
self.receive_list.clear()
for req in self.wallet.get_sorted_requests(self.config):
address = req['address']
if address not in domain:
continue
timestamp = req.get('time', 0)
amount = req.get('amount')
expiration = req.get('exp', None)
message = req.get('memo', '')
date = format_time(timestamp)
status = req.get('status')
signature = req.get('sig')
requestor = req.get('name', '')
amount_str = self.format_amount(amount) if amount else ""
account = ''
item = QTreeWidgetItem([date, account, address, '', message, amount_str, pr_tooltips.get(status,'')])
if signature is not None:
item.setIcon(3, QIcon(":icons/seal.png"))
item.setToolTip(3, 'signed by '+ requestor)
if status is not PR_UNKNOWN:
item.setIcon(6, QIcon(pr_icons.get(status)))
self.receive_list.addTopLevelItem(item)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = unicode(self.receive_message_e.text()).encode('utf8')
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.set_content(addr, amount, message, uri)
def show_before_broadcast(self):
return self.config.get('show_before_broadcast', False)
def set_show_before_broadcast(self, show):
self.config.set_key('show_before_broadcast', bool(show))
self.set_send_button_text()
def set_send_button_text(self):
if self.show_before_broadcast():
text = _("Send...")
elif self.wallet and self.wallet.is_watching_only():
text = _("Send...")
else:
text = _("Send")
self.send_button.setText(text)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a ParkByte address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a ParkByte address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
msg = _('ParkByte transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
self.fee_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(self.fee_e_label, 5, 0)
grid.addWidget(self.fee_e, 5, 1)
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.send_button)
buttons.addWidget(self.clear_button)
grid.addLayout(buttons, 6, 1, 1, 2)
def on_shortcut():
inputs = self.get_coins()
sendable = sum(map(lambda x:x['value'], inputs))
fee = self.fee_e.get_amount() if self.fee_e.isModified() else None
addr = self.get_payto_or_dummy()
amount, fee = self.wallet.get_max_amount(self.config, inputs, addr, fee)
if not self.fee_e.isModified():
self.fee_e.setAmount(fee)
self.amount_e.setAmount(amount)
self.not_enough_funds = (fee + amount > sendable)
# emit signal for fiat_amount update
self.amount_e.textEdited.emit("")
self.amount_e.shortcut.connect(on_shortcut)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = RED_FG, RED_FG
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
elif self.fee_e.isModified():
amt_color, fee_color = BLACK_FG, BLACK_FG
elif self.amount_e.isModified():
amt_color, fee_color = BLACK_FG, BLUE_FG
else:
amt_color, fee_color = BLUE_FG, BLUE_FG
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color)
self.fee_e.setStyleSheet(fee_color)
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
self.invoices_list = MyTreeWidget(self, self.invoices_list_menu,
[_('Expires'), _('Requestor'), _('Description'), _('Amount'), _('Status')], 2)
self.invoices_list.setSortingEnabled(True)
self.invoices_list.header().setResizeMode(1, QHeaderView.Interactive)
self.invoices_list.setColumnWidth(1, 200)
self.invoices_list.on_update = self.update_invoices_list
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoices_list)
vbox.setStretchFactor(self.invoices_list, 1000)
# Defer this until grid is parented to avoid ugly flash during startup
self.update_fee_edit()
run_hook('create_send_tab', grid)
return w
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
return self.payto_e.payto_address if self.payto_e.payto_address else self.wallet.dummy_address()
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs()
if not outputs:
addr = self.get_payto_or_dummy()
outputs = [(TYPE_ADDRESS, addr, amount)]
try:
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
fee = None if self.not_enough_funds else self.wallet.get_tx_fee(tx)
self.fee_e.setAmount(fee)
def update_fee_edit(self):
b = self.config.get('can_edit_fees', False)
self.fee_e.setVisible(b)
self.fee_e_label.setVisible(b)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, domain = None):
self.pay_from = [] if domain == [] else self.wallet.get_spendable_coins(domain)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:8] + '...' + h[-8:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, value = self.contacts.get(key)
return key + ' <' + value + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.use_encryption:
password = self.password_dialog(parent=parent)
try:
if password:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = unicode( self.message_e.text() )
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs()
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "%s" could not be validated via an additional security check, DNSSEC, and thus may not be correct.'%alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if addr is None:
self.show_error(_('ParkByte Address is None'))
return
if _type == TYPE_ADDRESS and not parkbyte.is_address(addr):
self.show_error(_('Invalid ParkByte Address'))
return
if amount is None:
self.show_error(_('Invalid Amount'))
return
fee = self.fee_e.get_amount()
if fee is None:
self.show_error(_('Invalid Fee'))
return
coins = self.get_coins()
return outputs, fee, label, coins
def do_send(self):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
amount = sum(map(lambda x:x[2], outputs))
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
if tx.get_fee() < self.wallet.relayfee() and tx.requires_fee(self.wallet):
self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
return
if self.show_before_broadcast():
self.show_transaction(tx, tx_desc)
return
# confirmation dialog
confirm_amount = self.config.get('confirm_amount', COIN)
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
extra_fee = run_hook('get_additional_fee', self.wallet, tx)
if extra_fee:
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(extra_fee) )
if tx.get_fee() >= self.config.get('confirm_fee', 100000):
msg.append(_('Warning')+ ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.use_encryption:
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
if self.wallet.use_encryption and not password:
callback(False) # User cancelled password input
return
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
task = partial(self.wallet.sign_transaction, tx, password)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status, msg = self.network.broadcast(tx)
if pr and status is True:
pr.set_paid(tx.hash())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.addresses()[0]
ack_status, ack_msg = pr.send_ack(str(tx), refund_address)
if ack_status:
msg = ack_msg
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.hash(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoices_list.update()
self.do_clear()
else:
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
dialog.exec_()
return clayout.selected_index()
def prepare_for_payment_request(self):
self.tabs.setCurrentIndex(1)
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.payto_e.setText(_("please wait..."))
return True
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoices_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.emit(SIGNAL('payment_request_ok'))
else:
self.emit(SIGNAL('payment_request_error'))
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(unicode(URI), self.on_pr)
except BaseException as e:
self.show_error(_('Invalid parkbyte URI:') + '\n' + str(e))
return
self.tabs.setCurrentIndex(1)
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fee_e]:
e.setText('')
e.setFrozen(False)
self.set_pay_from([])
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.update_fee()
def create_list_tab(self, l):
w = QWidget()
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setMargin(0)
vbox.setSpacing(0)
vbox.addWidget(l)
buttons = QWidget()
vbox.addWidget(buttons)
return w
def create_addresses_tab(self):
l = MyTreeWidget(self, self.create_receive_menu, [ _('Address'), _('Label'), _('Balance'), _('Tx')], 1)
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
l.on_update = self.update_address_tab
self.address_list = l
return self.create_list_tab(l)
def create_contacts_tab(self):
l = MyTreeWidget(self, self.create_contact_menu, [_('Name'), _('Value'), _('Type')], 1, [0, 1])
l.setSelectionMode(QAbstractItemView.ExtendedSelection)
l.setSortingEnabled(True)
l.on_edited = self.on_contact_edited
l.on_permit_edit = self.on_permit_contact_edit
l.on_update = self.update_contacts_tab
self.contacts_list = l
return self.create_list_tab(l)
def update_invoices_list(self):
inv_list = self.invoices.sorted_list()
l = self.invoices_list
l.clear()
for pr in inv_list:
key = pr.get_id()
status = self.invoices.get_status(key)
requestor = pr.get_requestor()
exp = pr.get_expiration_date()
date_str = util.format_time(exp) if exp else _('Never')
item = QTreeWidgetItem([date_str, requestor, pr.memo, self.format_amount(pr.get_amount(), whitespaces=True), pr_tooltips.get(status,'')])
item.setIcon(4, QIcon(pr_icons.get(status)))
item.setData(0, Qt.UserRole, key)
item.setFont(1, QFont(MONOSPACE_FONT))
item.setFont(3, QFont(MONOSPACE_FONT))
l.addTopLevelItem(item)
l.setCurrentItem(l.topLevelItem(0))
self.invoices_list.setVisible(len(inv_list))
self.invoices_label.setVisible(len(inv_list))
def delete_imported_key(self, addr):
if self.question(_("Do you want to remove")+" %s "%addr +_("from your wallet?")):
self.wallet.delete_imported_key(addr)
self.address_list.update()
self.history_list.update()
def edit_account_label(self, k):
text, ok = QInputDialog.getText(self, _('Rename account'), _('Name') + ':', text = self.wallet.labels.get(k,''))
if ok:
label = unicode(text)
self.wallet.set_label(k,label)
self.address_list.update()
def account_set_expanded(self, item, k, b):
item.setExpanded(b)
self.accounts_expanded[k] = b
def create_account_menu(self, position, k, item):
menu = QMenu()
exp = item.isExpanded()
menu.addAction(_("Minimize") if exp else _("Maximize"), lambda: self.account_set_expanded(item, k, not exp))
menu.addAction(_("Rename"), lambda: self.edit_account_label(k))
if self.wallet.seed_version > 4:
menu.addAction(_("View details"), lambda: self.show_account_details(k))
menu.exec_(self.address_list.viewport().mapToGlobal(position))
def create_receive_menu(self, position):
selected = self.address_list.selectedItems()
multi_select = len(selected) > 1
addrs = [unicode(item.text(0)) for item in selected]
if not multi_select:
item = self.address_list.itemAt(position)
if not item:
return
addr = addrs[0]
if not is_valid(addr):
k = str(item.data(0,32).toString())
if k:
self.create_account_menu(position, k, item)
else:
item.setExpanded(not item.isExpanded())
return
menu = QMenu()
if not multi_select:
menu.addAction(_("Copy to clipboard"), lambda: self.app.clipboard().setText(addr))
menu.addAction(_("Request payment"), lambda: self.receive_at(addr))
menu.addAction(_("Edit label"), lambda: self.address_list.editItem(item, self.address_list.editable_columns[0]))
menu.addAction(_('History'), lambda: self.show_address(addr))
menu.addAction(_('Public Keys'), lambda: self.show_public_keys(addr))
if self.wallet.can_export():
menu.addAction(_("Private key"), lambda: self.show_private_key(addr))
if not self.wallet.is_watching_only():
menu.addAction(_("Sign/verify message"), lambda: self.sign_verify_message(addr))
menu.addAction(_("Encrypt/decrypt message"), lambda: self.encrypt_message(addr))
if self.wallet.is_imported(addr):
menu.addAction(_("Remove from wallet"), lambda: self.delete_imported_key(addr))
addr_URL = block_explorer_URL(self.config, 'addr', addr)
if addr_URL:
menu.addAction(_("View on block explorer"), lambda: webbrowser.open(addr_URL))
if any(not self.wallet.is_frozen(addr) for addr in addrs):
menu.addAction(_("Freeze"), lambda: self.set_frozen_state(addrs, True))
if any(self.wallet.is_frozen(addr) for addr in addrs):
menu.addAction(_("Unfreeze"), lambda: self.set_frozen_state(addrs, False))
def can_send(addr):
return not self.wallet.is_frozen(addr) and sum(self.wallet.get_addr_balance(addr)[:2])
if any(can_send(addr) for addr in addrs):
menu.addAction(_("Send From"), lambda: self.send_from_addresses(addrs))
run_hook('receive_menu', menu, addrs, self.wallet)
menu.exec_(self.address_list.viewport().mapToGlobal(position))
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
domain = self.wallet.get_account_addresses(self.current_account)
return self.wallet.get_spendable_coins(domain)
def send_from_addresses(self, addrs):
self.set_pay_from(addrs)
self.tabs.setCurrentIndex(1)
self.update_fee()
def paytomany(self):
self.tabs.setCurrentIndex(1)
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.tabs.setCurrentIndex(1)
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def on_permit_contact_edit(self, item, column):
# openalias items shouldn't be editable
return item.text(2) != "openalias"
def on_contact_edited(self, item, column, prior):
if column == 0: # Remove old contact if renamed
self.contacts.pop(prior)
self.set_contact(unicode(item.text(0)), unicode(item.text(1)))
def set_contact(self, label, address):
if not is_valid(address):
self.show_error(_('Invalid Address'))
self.contacts_list.update() # Displays original unchanged value
return False
self.contacts[label] = ('address', address)
self.contacts_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove %s from your list of contacts?")
% " + ".join(labels)):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contacts_list.update()
self.update_completions()
def create_contact_menu(self, position):
menu = QMenu()
selected = self.contacts_list.selectedItems()
if not selected:
menu.addAction(_("New contact"), lambda: self.new_contact_dialog())
else:
labels = [unicode(item.text(0)) for item in selected]
addrs = [unicode(item.text(1)) for item in selected]
types = [unicode(item.text(2)) for item in selected]
menu.addAction(_("Copy to Clipboard"), lambda:
self.app.clipboard().setText('\n'.join(labels)))
menu.addAction(_("Pay to"), lambda: self.payto_contacts(labels))
menu.addAction(_("Delete"), lambda: self.delete_contacts(labels))
URLs = []
for (addr, _type) in zip(addrs, types):
if _type == 'address':
URLs.append(block_explorer_URL(self.config, 'addr', addr))
if URLs:
menu.addAction(_("View on block explorer"),
lambda: map(webbrowser.open, URLs))
run_hook('create_contact_menu', menu, selected)
menu.exec_(self.contacts_list.viewport().mapToGlobal(position))
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Expires") + ':'), 1, 0)
grid.addWidget(QLabel(format_time(pr.get_expiration_date())), 1, 1)
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
grid.addWidget(QLabel(_("Payment URL") + ':'), 4, 0)
grid.addWidget(QLabel(pr.payment_url), 4, 1)
grid.addWidget(QLabel(_("Outputs") + ':'), 5, 0)
outputs_str = '\n'.join(map(lambda x: x[1] + ' ' + self.format_amount(x[2])+ self.base_unit(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 5, 1)
if pr.tx:
grid.addWidget(QLabel(_("Transaction ID") + ':'), 6, 0)
l = QLineEdit(pr.tx)
l.setReadOnly(True)
grid.addWidget(l, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
return
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def invoices_list_menu(self, position):
item = self.invoices_list.itemAt(position)
if not item:
return
key = str(item.data(0, 32).toString())
pr = self.invoices.get(key)
status = self.invoices.get_status(key)
menu = QMenu()
menu.addAction(_("Details"), lambda: self.show_invoice(key))
if status == PR_UNPAID:
menu.addAction(_("Pay Now"), lambda: self.do_pay_invoice(key))
def delete_invoice(key):
self.invoices.remove(key)
self.invoices_list.update()
menu.addAction(_("Delete"), lambda: delete_invoice(key))
menu.exec_(self.invoices_list.viewport().mapToGlobal(position))
def update_address_tab(self):
l = self.address_list
item = l.currentItem()
current_address = item.data(0, Qt.UserRole).toString() if item else None
l.clear()
accounts = self.wallet.get_accounts()
if self.current_account is None:
account_items = sorted(accounts.items())
else:
account_items = [(self.current_account, accounts.get(self.current_account))]
for k, account in account_items:
if len(accounts) > 1:
name = self.wallet.get_account_name(k)
c, u, x = self.wallet.get_account_balance(k)
account_item = QTreeWidgetItem([ name, '', self.format_amount(c + u + x), ''])
account_item.setExpanded(self.accounts_expanded.get(k, True))
account_item.setData(0, Qt.UserRole, k)
l.addTopLevelItem(account_item)
else:
account_item = l
sequences = [0,1] if account.has_change() else [0]
for is_change in sequences:
if len(sequences) > 1:
name = _("Receiving") if not is_change else _("Change")
seq_item = QTreeWidgetItem( [ name, '', '', '', ''] )
account_item.addChild(seq_item)
if not is_change:
seq_item.setExpanded(True)
else:
seq_item = account_item
used_item = QTreeWidgetItem( [ _("Used"), '', '', '', ''] )
used_flag = False
addr_list = account.get_addresses(is_change)
for address in addr_list:
num = len(self.wallet.history.get(address,[]))
is_used = self.wallet.is_used(address)
label = self.wallet.labels.get(address,'')
c, u, x = self.wallet.get_addr_balance(address)
balance = self.format_amount(c + u + x)
item = QTreeWidgetItem([address, label, balance, "%d"%num])
item.setFont(0, QFont(MONOSPACE_FONT))
item.setData(0, Qt.UserRole, address)
item.setData(0, Qt.UserRole+1, True) # label can be edited
if self.wallet.is_frozen(address):
item.setBackgroundColor(0, QColor('lightblue'))
if self.wallet.is_beyond_limit(address, account, is_change):
item.setBackgroundColor(0, QColor('red'))
if is_used:
if not used_flag:
seq_item.insertChild(0, used_item)
used_flag = True
used_item.addChild(item)
else:
seq_item.addChild(item)
if address == current_address:
l.setCurrentItem(item)
def update_contacts_tab(self):
l = self.contacts_list
item = l.currentItem()
current_key = item.data(0, Qt.UserRole).toString() if item else None
l.clear()
for key in sorted(self.contacts.keys()):
_type, value = self.contacts[key]
item = QTreeWidgetItem([key, value, _type])
item.setData(0, Qt.UserRole, key)
l.addTopLevelItem(item)
if key == current_key:
l.setCurrentItem(item)
run_hook('update_contacts_tab', l)
def create_console_tab(self):
from console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'parkbyte':parkbyte})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: apply( f, (method, args, self.password_dialog ))
for m in dir(c):
if m[0]=='_' or m in ['network','wallet']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def change_account(self,s):
if s == _("All accounts"):
self.current_account = None
else:
accounts = self.wallet.get_account_names()
for k, v in accounts.items():
if v == s:
self.current_account = k
self.history_list.update()
self.update_status()
self.address_list.update()
self.receive_list.update()
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self.account_selector = QComboBox()
self.account_selector.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.connect(self.account_selector, SIGNAL("activated(QString)"), self.change_account)
sb.addPermanentWidget(self.account_selector)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), self.run_network_dialog )
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.use_encryption else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.set_send_button_text()
def change_password_dialog(self):
from password_dialog import PasswordDialog, PW_CHANGE
msg = (_('Your wallet is encrypted. Use this dialog to change your '
'password. To disable wallet encryption, enter an empty new '
'password.') if self.wallet.use_encryption
else _('Your wallet keys are not encrypted'))
d = PasswordDialog(self, self.wallet, msg, PW_CHANGE)
ok, password, new_password = d.run()
if not ok:
return
try:
self.wallet.check_password(password)
except BaseException as e:
self.show_error(str(e))
return
try:
self.wallet.update_password(password, new_password)
except:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
if new_password:
msg = _('Password was updated successfully')
else:
msg = _('This wallet is not encrypted')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
i = self.tabs.currentIndex()
if i == 0:
self.history_list.filter(t, [2, 3, 4]) # Date, Description, Amount
elif i == 1:
self.invoices_list.filter(t, [0, 1, 2, 3]) # Date, Requestor, Description, Amount
elif i == 2:
self.receive_list.filter(t, [0, 1, 2, 3, 4]) # Date, Account, Address, Description, Amount
elif i == 3:
self.address_list.filter(t, [0,1, 2]) # Address, Label, Balance
elif i == 4:
self.contacts_list.filter(t, [0, 1]) # Key, Value
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
if self.set_contact(unicode(line2.text()), str(line1.text())):
self.tabs.setCurrentIndex(4)
def update_new_account_menu(self):
self.new_account_menu.setVisible(self.wallet.can_create_accounts())
self.new_account_menu.setEnabled(self.wallet.permit_account_naming())
self.update_account_selector()
def new_account_dialog(self):
dialog = WindowModalDialog(self, _("New Account Name"))
vbox = QVBoxLayout()
msg = _("Enter a name to give the account. You will not be "
"permitted to create further accounts until the new account "
"receives at least one transaction.") + "\n"
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
e = QLineEdit()
vbox.addWidget(e)
vbox.addLayout(Buttons(CancelButton(dialog), OkButton(dialog)))
dialog.setLayout(vbox)
if dialog.exec_():
self.wallet.set_label(self.wallet.last_account_id(), str(e.text()))
self.address_list.update()
self.tabs.setCurrentIndex(3)
self.update_new_account_menu()
def check_next_account(self):
if self.wallet.needs_next_account() and not self.checking_accounts:
self.checking_accounts = True
msg = _("All the accounts in your wallet have received "
"transactions. Electrum must check whether more "
"accounts exist; one will only be shown if "
"it has been used or you give it a name.")
self.show_message(msg, title=_("Check Accounts"))
self.create_next_account()
@protected
def create_next_account(self, password):
def on_done():
self.checking_accounts = False
self.update_new_account_menu()
task = partial(self.wallet.create_next_account, password)
self.wallet.thread.add(task, on_done=on_done)
def show_master_public_keys(self):
dialog = WindowModalDialog(self, "Master Public Keys")
mpk_dict = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(100)
mpk_text.addCopyButton(self.app)
sorted_keys = sorted(mpk_dict.keys())
def show_mpk(index):
mpk_text.setText(mpk_dict[sorted_keys[index]])
# only show the combobox in case multiple accounts are available
if len(mpk_dict) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
is_mine = self.wallet.master_private_keys.has_key(key)
mine_text = [_("cosigner"), _("self")]
return "%s (%s)" % (key, mine_text[is_mine])
return key
labels = list(map(label, sorted_keys))
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels,
on_click)
vbox.addLayout(labels_clayout.layout())
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
@protected
def show_seed_dialog(self, password):
if self.wallet.use_encryption and password is None:
return # User cancelled password input
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
try:
mnemonic = self.wallet.get_mnemonic(password)
except BaseException as e:
self.show_error(str(e))
return
from seed_dialog import SeedDialog
d = SeedDialog(self, mnemonic, self.wallet.has_imported_keys())
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
def show_public_keys(self, address):
if not address: return
try:
pubkey_list = self.wallet.get_public_keys(address)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Public key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
if isinstance(self.wallet, BIP32_RD_Wallet):
derivation = self.wallet.address_id(address)
vbox.addWidget(QLabel(_("Derivation") + ': ' + derivation))
vbox.addWidget(QLabel(_("Public key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pubkey_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address: return
try:
pk_list = self.wallet.get_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 200)
vbox = QVBoxLayout()
vbox.addWidget( QLabel(_("Address") + ': ' + address))
vbox.addWidget( QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text='\n'.join(pk_list))
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
message = unicode(message.toPlainText()).encode('utf-8')
task = partial(self.wallet.sign_message, str(address.text()),
message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
message = unicode(message.toPlainText())
message = message.encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = parkbyte.verify_message(address.text(), sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(410, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
cyphertext = str(encrypted_e.toPlainText())
task = partial(self.wallet.decrypt_message, str(pubkey_e.text()),
cyphertext, password)
self.wallet.thread.add(task, on_success=message_e.setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = unicode(message_e.toPlainText())
message = message.encode('utf-8')
try:
encrypted = parkbyte.encrypt_message(message, str(pubkey_e.text()))
encrypted_e.setText(encrypted)
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(str(e))
def encrypt_message(self, address = ''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_keys(address)[0]
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
parent = parent or self
d = WindowModalDialog(parent, _("Enter Password"))
pw = QLineEdit()
pw.setEchoMode(2)
vbox = QVBoxLayout()
if not msg:
msg = _('Please enter your password')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Password')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
d.setLayout(vbox)
run_hook('password_dialog', pw, grid, 1)
if not d.exec_(): return
return unicode(pw.text())
def tx_from_text(self, txt):
"json or raw hexadecimal"
txt = txt.strip()
try:
txt.decode('hex')
is_hex = True
except:
is_hex = False
try:
if is_hex:
return Transaction(txt)
tx_dict = json.loads(str(txt))
assert "hex" in tx_dict.keys()
tx = Transaction(tx_dict["hex"])
return tx
except:
traceback.print_exc(file=sys.stdout)
self.show_critical(_("Electrum was unable to parse your transaction"))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_qr(self.config)
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a parkbyte URI
if data.startswith("parkbyte:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
# transactions are binary, but qrcode seems to return utf8...
data = data.decode('utf8')
z = parkbyte.base_decode(data, length=None, base=43)
data = ''.join(chr(ord(b)) for b in z).encode('hex')
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
r = self.network.synchronous_get(('blockchain.transaction.get',[txid]))
except BaseException as e:
self.show_message(str(e))
return
tx = transaction.Transaction(r)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
try:
self.wallet.check_password(password)
except Exception as e:
self.show_error(str(e))
return
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.addresses(True)
done = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done:
break
private_keys[addr] = "\n".join(self.wallet.get_private_key(addr, password))
d.emit(SIGNAL('computing_privkeys'))
d.emit(SIGNAL('show_privkeys'))
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
d.connect(d, QtCore.SIGNAL('computing_privkeys'), lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
d.connect(d, QtCore.SIGNAL('show_privkeys'), show_privkeys)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
import json
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.dat")
if not labelsFile: return
try:
f = open(labelsFile, 'r')
data = f.read()
f.close()
for key, value in json.loads(data).items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to import your labels.") + "\n" + str(reason))
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electrum_labels.dat', "*.dat")
if fileName:
with open(fileName, 'w+') as f:
json.dump(labels, f)
self.show_message(_("Your labels where exported to") + " '%s'" % str(fileName))
except (IOError, os.error), reason:
self.show_critical(_("Electrum was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self, _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electrum-history.csv')
select_msg = _('Select file to export your wallet transactions to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
if not d.exec_():
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_history(self.wallet, filename, csv_button.isChecked())
except (IOError, os.error), reason:
export_error_label = _("Electrum was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
return
self.show_message(_("Your wallet history has been successfully exported."))
def do_export_history(self, wallet, fileName, is_csv):
history = wallet.get_history()
lines = []
for item in history:
tx_hash, confirmations, value, timestamp, balance = item
if confirmations:
if timestamp is not None:
time_string = format_time(timestamp)
else:
time_string = "unknown"
else:
time_string = "unconfirmed"
if value is not None:
value_string = format_satoshis(value, True)
else:
value_string = '--'
if tx_hash:
label = wallet.get_label(tx_hash)
label = label.encode('utf-8')
else:
label = ""
if is_csv:
lines.append([tx_hash, label, confirmations, value_string, time_string])
else:
lines.append({'txid':tx_hash, 'date':"%16s"%time_string, 'label':label, 'value':value_string})
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
transaction.writerow(["transaction_hash","label", "confirmations", "value", "timestamp"])
for line in lines:
transaction.writerow(line)
else:
import json
f.write(json.dumps(lines, indent = 4))
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_("Enter private keys:")))
keys_e = QTextEdit()
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses(self.current_account)
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text())
if parkbyte.is_address(addr):
return addr
def get_pk():
pk = str(keys_e.toPlainText()).strip()
if Wallet.is_private_key(pk):
return pk.split()
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
if not d.exec_():
return
fee = self.wallet.fee_per_kb(self.config)
tx = Transaction.sweep(get_pk(), self.network, get_address(), fee)
if not tx:
self.show_message(_('No inputs found. (Note that inputs need to be confirmed)'))
return
self.warn_if_watching_only()
self.show_transaction(tx)
@protected
def do_import_privkey(self, password):
if not self.wallet.has_imported_keys():
if not self.question('<b>'+_('Warning') +':\n</b><br/>'+ _('Imported keys are not recoverable from seed.') + ' ' \
+ _('If you ever need to restore your wallet from its seed, these keys will be lost.') + '<p>' \
+ _('Are you sure you understand what you are doing?'), title=_('Warning')):
return
text = text_dialog(self, _('Import private keys'), _("Enter private keys")+':', _("Import"))
if not text: return
text = str(text).split()
badkeys = []
addrlist = []
for key in text:
try:
addr = self.wallet.import_key(key, password)
except Exception as e:
badkeys.append(key)
continue
if not addr:
badkeys.append(key)
else:
addrlist.append(addr)
if addrlist:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(addrlist))
if badkeys:
self.show_critical(_("The following inputs could not be imported") + ':\n'+ '\n'.join(badkeys))
self.address_list.update()
self.history_list.update()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(languages.values())
try:
index = languages.keys().index(self.config.get("language",''))
except Exception:
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = languages.keys()[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Fee per kilobyte of transaction.')
])
fee_label = HelpLabel(_('Transaction fee per kb') + ':', msg)
fee_e = BTCkBEdit(self.get_decimal_point)
def on_fee(is_done):
if self.config.get('dynamic_fees'):
return
v = fee_e.get_amount() or 0
self.config.set_key('fee_per_kb', v, is_done)
self.update_fee()
fee_e.editingFinished.connect(lambda: on_fee(True))
fee_e.textEdited.connect(lambda: on_fee(False))
tx_widgets.append((fee_label, fee_e))
dynfee_cb = QCheckBox(_('Dynamic fees'))
dynfee_cb.setChecked(self.config.get('dynamic_fees', False))
dynfee_cb.setToolTip(_("Use a fee per kB value recommended by the server."))
dynfee_sl = QSlider(Qt.Horizontal, self)
# The pref is from 0 to 100; add 50 to get the factor from 50% to 150%
dynfee_sl.setRange(0, 100)
dynfee_sl.setTickInterval(10)
dynfee_sl.setTickPosition(QSlider.TicksBelow)
dynfee_sl.setValue(self.config.get('fee_factor', 50))
dynfee_sl.setToolTip("Min = 50%, Max = 150%")
multiplier_label = HelpLabel("", _("Multiply the recommended fee/kb value by a constant factor. Min = 50%, Max = 150%"))
tx_widgets.append((dynfee_cb, dynfee_sl))
tx_widgets.append((None, multiplier_label))
def update_feeperkb():
fee_e.setAmount(self.wallet.fee_per_kb(self.config))
b = self.config.get('dynamic_fees', False)
dynfee_sl.setEnabled(b)
multiplier_label.setEnabled(b)
fee_e.setEnabled(not b)
def slider_moved():
multiplier_label.setText(_('Fee multiplier: %3d%%')
% (dynfee_sl.sliderPosition() + 50))
def slider_released():
self.config.set_key('fee_factor', dynfee_sl.sliderPosition(), False)
update_feeperkb()
def on_dynfee(x):
dynfee = x == Qt.Checked
self.config.set_key('dynamic_fees', dynfee)
update_feeperkb()
dynfee_cb.stateChanged.connect(on_dynfee)
dynfee_sl.valueChanged.connect(slider_moved)
dynfee_sl.sliderReleased.connect(slider_released)
update_feeperkb()
slider_moved()
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see http://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet(GREEN_BG if validated else RED_BG)
else:
alias_e.setStyleSheet(RED_BG)
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.connect(self, SIGNAL('alias_received'), set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet(RED_BG if SSL_error else GREEN_BG if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = ['PKB', 'mPKB', 'PKBits']
msg = _('Base unit of your wallet.')\
+ '\n1PKB=1000mPKB.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e, fee_e
amounts = [edit.get_amount() for edit in edits]
if unit_result == 'PKB':
self.decimal_point = 8
elif unit_result == 'mPKB':
self.decimal_point = 5
elif unit_result == 'PKBits':
self.decimal_point = 2
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
self.history_list.update()
self.receive_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(on_unit)
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(block_explorer_info.keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_explorers.index(block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.\nOn linux, type: 'apt-get install python-zbar'")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.zbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", str(qr_combo.itemData(x).toString()), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((usechange_cb, None))
tx_widgets.append((multiple_cb, None))
showtx_cb = QCheckBox(_('View transaction before signing'))
showtx_cb.setChecked(self.show_before_broadcast())
showtx_cb.stateChanged.connect(lambda x: self.set_show_before_broadcast(showtx_cb.isChecked()))
showtx_cb.setToolTip(_('Display the details of your transactions before signing it.'))
tx_widgets.append((showtx_cb, None))
can_edit_fees_cb = QCheckBox(_('Set transaction fees manually'))
can_edit_fees_cb.setChecked(self.config.get('can_edit_fees', False))
def on_editfees(x):
self.config.set_key('can_edit_fees', x == Qt.Checked)
self.update_fee_edit()
can_edit_fees_cb.stateChanged.connect(on_editfees)
can_edit_fees_cb.setToolTip(_('This option lets you edit fees in the send tab.'))
tx_widgets.append((can_edit_fees_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
tabs_info = [
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
self.disconnect(self, SIGNAL('alias_received'), set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def run_network_dialog(self):
if not self.network:
self.show_warning(_('You are using Electrum in offline mode; restart Electrum if you want to get connected'), title=_('Offline'))
return
NetworkDialog(self.wallet.network, self.config, self).do_exec()
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
name = descr['__name__']
p = plugins.get(name)
if descr.get('registers_wallet_type'):
continue
try:
cb = QCheckBox(descr['fullname'])
cb.setEnabled(plugins.is_available(name, self.wallet))
cb.setChecked(p is not None and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(i+1,1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def show_account_details(self, k):
account = self.wallet.accounts[k]
d = WindowModalDialog(self, _('Account Details'))
vbox = QVBoxLayout(d)
name = self.wallet.get_account_name(k)
label = QLabel('Name: ' + name)
vbox.addWidget(label)
vbox.addWidget(QLabel(_('Address type') + ': ' + account.get_type()))
vbox.addWidget(QLabel(_('Derivation') + ': ' + k))
vbox.addWidget(QLabel(_('Master Public Key:')))
text = QTextEdit()
text.setReadOnly(True)
text.setMaximumHeight(170)
vbox.addWidget(text)
mpk_text = '\n'.join( account.get_master_pubkeys() )
text.setText(mpk_text)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
|
downloader.py
|
#!/usr/bin/python3 -OO
# Copyright 2007-2021 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.downloader - download engine
"""
import time
import select
import logging
from math import ceil
from threading import Thread, RLock
from nntplib import NNTPPermanentError
import socket
import random
import sys
import ssl
from typing import List, Dict, Optional, Union
import sabnzbd
from sabnzbd.decorators import synchronized, NzbQueueLocker, DOWNLOADER_CV
from sabnzbd.newswrapper import NewsWrapper
import sabnzbd.notifier
import sabnzbd.config as config
import sabnzbd.cfg as cfg
from sabnzbd.misc import from_units, nntp_to_msg, int_conv, get_server_addrinfo
from sabnzbd.utils.happyeyeballs import happyeyeballs
# Timeout penalty in minutes for each cause
_PENALTY_UNKNOWN = 3 # Unknown cause
_PENALTY_502 = 5 # Unknown 502
_PENALTY_TIMEOUT = 10 # Server doesn't give an answer (multiple times)
_PENALTY_SHARE = 10 # Account sharing detected
_PENALTY_TOOMANY = 10 # Too many connections
_PENALTY_PERM = 10 # Permanent error, like bad username/password
_PENALTY_SHORT = 1 # Minimal penalty when no_penalties is set
_PENALTY_VERYSHORT = 0.1 # Error 400 without cause clues
# Wait this many seconds between checking idle servers for new articles or busy threads for timeout
_SERVER_CHECK_DELAY = 0.5
# Wait this many seconds between updates of the BPSMeter
_BPSMETER_UPDATE_DELAY = 0.05
TIMER_LOCK = RLock()
class Server:
# Pre-define attributes to save memory and improve get/set performance
__slots__ = (
"id",
"newid",
"restart",
"displayname",
"host",
"port",
"timeout",
"threads",
"priority",
"ssl",
"ssl_verify",
"ssl_ciphers",
"ssl_context",
"required",
"optional",
"retention",
"send_group",
"username",
"password",
"busy_threads",
"next_busy_threads_check",
"idle_threads",
"next_article_search",
"active",
"bad_cons",
"errormsg",
"warning",
"info",
"ssl_info",
"request",
"have_body",
"have_stat",
"article_queue",
)
def __init__(
self,
server_id,
displayname,
host,
port,
timeout,
threads,
priority,
use_ssl,
ssl_verify,
ssl_ciphers,
send_group,
username=None,
password=None,
required=False,
optional=False,
retention=0,
):
self.id: str = server_id
self.newid: Optional[str] = None
self.restart: bool = False
self.displayname: str = displayname
self.host: str = host
self.port: int = port
self.timeout: int = timeout
self.threads: int = threads
self.priority: int = priority
self.ssl: bool = use_ssl
self.ssl_verify: int = ssl_verify
self.ssl_ciphers: str = ssl_ciphers
self.ssl_context: Optional[ssl.SSLContext] = None
self.required: bool = required
self.optional: bool = optional
self.retention: int = retention
self.send_group: bool = send_group
self.username: Optional[str] = username
self.password: Optional[str] = password
self.busy_threads: List[NewsWrapper] = []
self.next_busy_threads_check: float = 0
self.idle_threads: List[NewsWrapper] = []
self.next_article_search: float = 0
self.active: bool = True
self.bad_cons: int = 0
self.errormsg: str = ""
self.warning: str = ""
self.info: Optional[List] = None # Will hold getaddrinfo() list
self.ssl_info: str = "" # Will hold the type and cipher of SSL connection
self.request: bool = False # True if a getaddrinfo() request is pending
self.have_body: bool = True # Assume server has "BODY", until proven otherwise
self.have_stat: bool = True # Assume server has "STAT", until proven otherwise
self.article_queue: List[sabnzbd.nzbstuff.Article] = []
# Initialize threads
for i in range(threads):
self.idle_threads.append(NewsWrapper(self, i + 1))
# Tell the BPSMeter about this server
sabnzbd.BPSMeter.init_server_stats(self.id)
@property
def hostip(self) -> str:
"""In case a server still has active connections, we use the same IP again
If new connection then based on value of load_balancing() and self.info:
0 - return the first entry, so all threads use the same IP
1 - and self.info has more than 1 entry (read: IP address): Return a random entry from the possible IPs
2 - and self.info has more than 1 entry (read: IP address): Return the quickest IP based on the happyeyeballs algorithm
In case of problems: return the host name itself
"""
# Check if already a successful ongoing connection
if self.busy_threads and self.busy_threads[0].nntp:
# Re-use that IP
logging.debug("%s: Re-using address %s", self.host, self.busy_threads[0].nntp.host)
return self.busy_threads[0].nntp.host
# Determine IP
ip = self.host
if self.info:
if cfg.load_balancing() == 0 or len(self.info) == 1:
# Just return the first one, so all next threads use the same IP
ip = self.info[0][4][0]
logging.debug("%s: Connecting to address %s", self.host, ip)
elif cfg.load_balancing() == 1:
# Return a random entry from the possible IPs
rnd = random.randint(0, len(self.info) - 1)
ip = self.info[rnd][4][0]
logging.debug("%s: Connecting to address %s", self.host, ip)
elif cfg.load_balancing() == 2:
# RFC6555 / Happy Eyeballs:
ip = happyeyeballs(self.host, port=self.port)
if ip:
logging.debug("%s: Connecting to address %s", self.host, ip)
else:
# nothing returned, so there was a connection problem
logging.debug("%s: No successful IP connection was possible", self.host)
return ip
def deactivate(self):
"""Deactive server and reset queued articles"""
self.active = False
self.reset_article_queue()
def stop(self):
"""Remove all connections from server"""
for nw in self.idle_threads:
sabnzbd.Downloader.remove_socket(nw)
nw.hard_reset(send_quit=True)
self.idle_threads = []
def request_info(self):
"""Launch async request to resolve server address.
getaddrinfo() can be very slow. In some situations this can lead
to delayed starts and timeouts on connections.
Because of this, the results will be cached in the server object."""
if not self.request:
self.request = True
Thread(target=self._request_info_internal).start()
def reset_article_queue(self):
logging.debug("Resetting article queue for %s", self)
for article in self.article_queue:
sabnzbd.NzbQueue.reset_try_lists(article, remove_fetcher_from_trylist=False)
self.article_queue = []
def _request_info_internal(self):
"""Async attempt to run getaddrinfo() for specified server"""
logging.debug("Retrieving server address information for %s", self.host)
self.info = get_server_addrinfo(self.host, self.port)
if not self.info:
self.bad_cons += self.threads
else:
self.bad_cons = 0
self.request = False
sabnzbd.Downloader.wakeup()
def __repr__(self):
return "<Server: %s:%s>" % (self.host, self.port)
class Downloader(Thread):
"""Singleton Downloader Thread"""
# Improves get/set performance, even though it's inherited from Thread
# Due to the huge number of get-calls in run(), it can actually make a difference
__slots__ = (
"paused",
"bandwidth_limit",
"bandwidth_perc",
"sleep_time",
"paused_for_postproc",
"shutdown",
"server_restarts",
"force_disconnect",
"read_fds",
"servers",
"timers",
)
def __init__(self, paused=False):
super().__init__()
logging.debug("Initializing downloader")
# Used for scheduled pausing
self.paused: bool = paused
# Used for reducing speed
self.bandwidth_limit: int = 0
self.bandwidth_perc: int = 0
cfg.bandwidth_perc.callback(self.speed_set)
cfg.bandwidth_max.callback(self.speed_set)
self.speed_set()
# Used to see if we can add a slowdown to the Downloader-loop
self.sleep_time: float = 0.0
self.sleep_time_set()
cfg.downloader_sleep_time.callback(self.sleep_time_set)
self.paused_for_postproc: bool = False
self.shutdown: bool = False
# A user might change server parms again before server restart is ready.
# Keep a counter to prevent multiple restarts
self.server_restarts: int = 0
self.force_disconnect: bool = False
self.read_fds: Dict[int, NewsWrapper] = {}
self.servers: List[Server] = []
self.timers: Dict[str, List[float]] = {}
for server in config.get_servers():
self.init_server(None, server)
def init_server(self, oldserver: Optional[str], newserver: str):
"""Setup or re-setup single server
When oldserver is defined and in use, delay startup.
Note that the server names are "host:port" strings!
"""
create = False
servers = config.get_servers()
if newserver in servers:
srv = servers[newserver]
enabled = srv.enable()
displayname = srv.displayname()
host = srv.host()
port = srv.port()
timeout = srv.timeout()
threads = srv.connections()
priority = srv.priority()
ssl = srv.ssl()
ssl_verify = srv.ssl_verify()
ssl_ciphers = srv.ssl_ciphers()
username = srv.username()
password = srv.password()
required = srv.required()
optional = srv.optional()
retention = int(srv.retention() * 24 * 3600) # days ==> seconds
send_group = srv.send_group()
create = True
if oldserver:
for server in self.servers:
if server.id == oldserver:
# Server exists, do re-init later
create = False
server.newid = newserver
server.restart = True
server.reset_article_queue()
self.server_restarts += 1
break
if create and enabled and host and port and threads:
self.servers.append(
Server(
newserver,
displayname,
host,
port,
timeout,
threads,
priority,
ssl,
ssl_verify,
ssl_ciphers,
send_group,
username,
password,
required,
optional,
retention,
)
)
# Sort the servers for performance
self.servers.sort(key=lambda svr: "%02d%s" % (svr.priority, svr.displayname.lower()))
def add_socket(self, fileno: int, nw: NewsWrapper):
"""Add a socket ready to be used to the list to be watched"""
self.read_fds[fileno] = nw
def remove_socket(self, nw: NewsWrapper):
"""Remove a socket to be watched"""
if nw.nntp:
self.read_fds.pop(nw.nntp.fileno, None)
@NzbQueueLocker
def set_paused_state(self, state: bool):
"""Set downloader to new paused state if it is changed"""
if self.paused != state:
if cfg.preserve_paused_state():
cfg.start_paused.set(state)
self.paused = state
@NzbQueueLocker
def resume(self):
# Do not notify when SABnzbd is still starting
if self.paused and sabnzbd.WEB_DIR:
logging.info("Resuming")
sabnzbd.notifier.send_notification("SABnzbd", T("Resuming"), "pause_resume")
if cfg.preserve_paused_state():
cfg.start_paused.set(False)
self.paused = False
@NzbQueueLocker
def pause(self):
"""Pause the downloader, optionally saving admin"""
if not self.paused:
self.paused = True
logging.info("Pausing")
sabnzbd.notifier.send_notification("SABnzbd", T("Paused"), "pause_resume")
if cfg.preserve_paused_state():
cfg.start_paused.set(True)
if self.is_paused():
sabnzbd.BPSMeter.reset()
if cfg.autodisconnect():
self.disconnect()
def wait_for_postproc(self):
logging.info("Waiting for post-processing to finish")
self.paused_for_postproc = True
@NzbQueueLocker
def resume_from_postproc(self):
logging.info("Post-processing finished, resuming download")
self.paused_for_postproc = False
@NzbQueueLocker
def disconnect(self):
logging.info("Forcing disconnect")
self.force_disconnect = True
def limit_speed(self, value: Union[str, int]):
"""Set the actual download speed in Bytes/sec
When 'value' ends with a '%' sign or is within 1-100, it is interpreted as a pecentage of the maximum bandwidth
When no '%' is found, it is interpreted as an absolute speed (including KMGT notation).
"""
if value:
mx = cfg.bandwidth_max.get_int()
if "%" in str(value) or (0 < from_units(value) < 101):
limit = value.strip(" %")
self.bandwidth_perc = from_units(limit)
if mx:
self.bandwidth_limit = mx * self.bandwidth_perc / 100
else:
logging.warning_helpful(T("You must set a maximum bandwidth before you can set a bandwidth limit"))
else:
self.bandwidth_limit = from_units(value)
if mx:
self.bandwidth_perc = self.bandwidth_limit / mx * 100
else:
self.bandwidth_perc = 100
else:
self.speed_set()
logging.info("Speed limit set to %s B/s", self.bandwidth_limit)
def get_limit(self):
return self.bandwidth_perc
def get_limit_abs(self):
return self.bandwidth_limit
def speed_set(self):
limit = cfg.bandwidth_max.get_int()
perc = cfg.bandwidth_perc()
if limit and perc:
self.bandwidth_perc = perc
self.bandwidth_limit = limit * perc / 100
else:
self.bandwidth_perc = 0
self.bandwidth_limit = 0
def sleep_time_set(self):
self.sleep_time = cfg.downloader_sleep_time() * 0.0001
logging.debug("Sleep time: %f seconds", self.sleep_time)
def is_paused(self):
if not self.paused:
return False
else:
if sabnzbd.NzbQueue.has_forced_items():
return False
else:
return True
def highest_server(self, me: Server):
"""Return True when this server has the highest priority of the active ones
0 is the highest priority
"""
for server in self.servers:
if server is not me and server.active and server.priority < me.priority:
return False
return True
def nzo_servers(self, nzo):
return list(filter(nzo.server_in_try_list, self.servers))
def maybe_block_server(self, server: Server):
# Was it resolving problem?
if server.info is False:
# Warn about resolving issues
errormsg = T("Cannot connect to server %s [%s]") % (server.host, T("Server name does not resolve"))
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.warning(errormsg)
if not server.required:
logging.warning(T("Server %s will be ignored for %s minutes"), server.host, _PENALTY_TIMEOUT)
# Not fully the same as the code below for optional servers
server.bad_cons = 0
if server.required:
sabnzbd.Scheduler.plan_required_server_resume()
else:
server.deactivate()
self.plan_server(server, _PENALTY_TIMEOUT)
# Optional and active server had too many problems.
# Disable it now and send a re-enable plan to the scheduler
if server.optional and server.active and (server.bad_cons / server.threads) > 3:
# Deactivate server
server.bad_cons = 0
server.deactivate()
logging.warning(T("Server %s will be ignored for %s minutes"), server.host, _PENALTY_TIMEOUT)
self.plan_server(server, _PENALTY_TIMEOUT)
# Remove all connections to server
for nw in server.idle_threads + server.busy_threads:
self.__reset_nw(nw, "forcing disconnect", warn=False, wait=False, retry_article=False, send_quit=False)
# Make sure server address resolution is refreshed
server.info = None
def decode(self, article, raw_data: Optional[List[bytes]]):
"""Decode article and check the status of
the decoder and the assembler
"""
# Article was requested and fetched, update article stats for the server
sabnzbd.BPSMeter.register_server_article_tried(article.fetcher.id)
# Handle broken articles directly
if not raw_data:
if not article.search_new_server():
sabnzbd.NzbQueue.register_article(article, success=False)
article.nzf.nzo.increase_bad_articles_counter("missing_articles")
return
# Send to decoder-queue
sabnzbd.Decoder.process(article, raw_data)
# See if we need to delay because the queues are full
logged = False
while not self.shutdown and (sabnzbd.Decoder.queue_full() or sabnzbd.Assembler.queue_full()):
if not logged:
# Only log once, to not waste any CPU-cycles
logging.debug(
"Delaying - Decoder queue: %s - Assembler queue: %s",
sabnzbd.Decoder.decoder_queue.qsize(),
sabnzbd.Assembler.queue.qsize(),
)
logged = True
time.sleep(0.01)
def run(self):
# First check IPv6 connectivity
sabnzbd.EXTERNAL_IPV6 = sabnzbd.test_ipv6()
logging.debug("External IPv6 test result: %s", sabnzbd.EXTERNAL_IPV6)
# Then we check SSL certificate checking
sabnzbd.CERTIFICATE_VALIDATION = sabnzbd.test_cert_checking()
logging.debug("SSL verification test: %s", sabnzbd.CERTIFICATE_VALIDATION)
# Kick BPS-Meter to check quota
BPSMeter = sabnzbd.BPSMeter
BPSMeter.update()
next_bpsmeter_update = 0
# can_be_slowed variables
can_be_slowed: Optional[float] = None
can_be_slowed_timer: float = 0.0
next_stable_speed_check: float = 0.0
# Check server expiration dates
check_server_expiration()
while 1:
now = time.time()
# Set Article to None so references from this
# thread do not keep the parent objects alive (see #1628)
article = None
for server in self.servers:
# Skip this server if there's no point searching for new stuff to do
if not server.busy_threads and server.next_article_search > now:
continue
if server.next_busy_threads_check < now:
server.next_busy_threads_check = now + _SERVER_CHECK_DELAY
for nw in server.busy_threads[:]:
if (nw.nntp and nw.nntp.error_msg) or (nw.timeout and now > nw.timeout):
if nw.nntp and nw.nntp.error_msg:
# Already showed error
self.__reset_nw(nw)
else:
self.__reset_nw(nw, "timed out", warn=True)
server.bad_cons += 1
self.maybe_block_server(server)
if server.restart:
if not server.busy_threads:
newid = server.newid
server.stop()
self.servers.remove(server)
if newid:
self.init_server(None, newid)
self.server_restarts -= 1
# Have to leave this loop, because we removed element
break
else:
# Restart pending, don't add new articles
continue
if (
not server.idle_threads
or self.is_paused()
or self.shutdown
or self.paused_for_postproc
or not server.active
):
continue
for nw in server.idle_threads[:]:
if nw.timeout:
if now < nw.timeout:
continue
else:
nw.timeout = None
if not server.info:
# Only request info if there's stuff in the queue
if not sabnzbd.NzbQueue.is_empty():
self.maybe_block_server(server)
server.request_info()
break
# Get article from pre-fetched ones or fetch new ones
if server.article_queue:
article = server.article_queue.pop(0)
else:
# Pre-fetch new articles
server.article_queue = sabnzbd.NzbQueue.get_articles(
server, self.servers, max(1, server.threads // 4)
)
if server.article_queue:
article = server.article_queue.pop(0)
# Mark expired articles as tried on this server
if server.retention and article.nzf.nzo.avg_stamp < now - server.retention:
self.decode(article, None)
while server.article_queue:
self.decode(server.article_queue.pop(), None)
# Move to the next server, allowing the next server to already start
# fetching the articles that were too old for this server
break
else:
# Skip this server for a short time
server.next_article_search = now + _SERVER_CHECK_DELAY
break
server.idle_threads.remove(nw)
server.busy_threads.append(nw)
nw.article = article
if nw.connected:
self.__request_article(nw)
else:
try:
logging.info("%s@%s: Initiating connection", nw.thrdnum, server.host)
nw.init_connect()
except:
logging.error(
T("Failed to initialize %s@%s with reason: %s"),
nw.thrdnum,
server.host,
sys.exc_info()[1],
)
self.__reset_nw(nw, "failed to initialize", warn=True)
if self.force_disconnect or self.shutdown:
for server in self.servers:
for nw in server.idle_threads + server.busy_threads:
# Send goodbye if we have open socket
if nw.nntp:
self.__reset_nw(
nw, "forcing disconnect", wait=False, count_article_try=False, send_quit=True
)
# Make sure server address resolution is refreshed
server.info = None
server.reset_article_queue()
self.force_disconnect = False
# Make sure we update the stats
BPSMeter.update()
# Exit-point
if self.shutdown:
logging.info("Shutting down")
break
# Use select to find sockets ready for reading/writing
readkeys = self.read_fds.keys()
if readkeys:
read, _, _ = select.select(readkeys, (), (), 1.0)
# Add a sleep if there are too few results compared to the number of active connections
if self.sleep_time:
if can_be_slowed and len(read) < 1 + len(readkeys) / 10:
time.sleep(self.sleep_time)
# Initialize by waiting for stable speed and then enable sleep
if can_be_slowed is None or can_be_slowed_timer:
# Wait for stable speed to start testing
if not can_be_slowed_timer and now > next_stable_speed_check:
if BPSMeter.get_stable_speed(timespan=10):
can_be_slowed_timer = now + 8
can_be_slowed = 1
else:
next_stable_speed_check = now + _BPSMETER_UPDATE_DELAY
# Check 10 seconds after enabling slowdown
if can_be_slowed_timer and now > can_be_slowed_timer:
# Now let's check if it was stable in the last 10 seconds
can_be_slowed = BPSMeter.get_stable_speed(timespan=10)
can_be_slowed_timer = 0
if not can_be_slowed:
self.sleep_time = 0
logging.debug("Downloader-slowdown: %r", can_be_slowed)
else:
read = []
BPSMeter.reset()
time.sleep(1.0)
with DOWNLOADER_CV:
while (
(sabnzbd.NzbQueue.is_empty() or self.is_paused() or self.paused_for_postproc)
and not self.shutdown
and not self.force_disconnect
and not self.server_restarts
):
DOWNLOADER_CV.wait()
if now > next_bpsmeter_update:
BPSMeter.update()
next_bpsmeter_update = now + _BPSMETER_UPDATE_DELAY
if not read:
continue
for selected in read:
nw = self.read_fds[selected]
article = nw.article
server = nw.server
try:
bytes_received, done, skip = nw.recv_chunk()
except:
bytes_received, done, skip = (0, False, False)
if skip:
continue
if bytes_received < 1:
self.__reset_nw(nw, "server closed connection", wait=False)
continue
else:
try:
article.nzf.nzo.update_download_stats(BPSMeter.bps, server.id, bytes_received)
except AttributeError:
# In case nzf has disappeared because the file was deleted before the update could happen
pass
BPSMeter.update(server.id, bytes_received)
if self.bandwidth_limit:
if BPSMeter.bps + BPSMeter.sum_cached_amount > self.bandwidth_limit:
BPSMeter.update()
while BPSMeter.bps > self.bandwidth_limit:
time.sleep(0.01)
BPSMeter.update()
if nw.status_code != 222 and not done:
if not nw.connected or nw.status_code == 480:
try:
nw.finish_connect(nw.status_code)
if sabnzbd.LOG_ALL:
logging.debug(
"%s@%s last message -> %s", nw.thrdnum, nw.server.host, nntp_to_msg(nw.data)
)
nw.clear_data()
except NNTPPermanentError as error:
# Handle login problems
block = False
penalty = 0
msg = error.response
ecode = int_conv(msg[:3])
display_msg = " [%s]" % msg
logging.debug("Server login problem: %s, %s", ecode, msg)
if ecode in (502, 400, 481, 482) and clues_too_many(msg):
# Too many connections: remove this thread and reduce thread-setting for server
# Plan to go back to the full number after a penalty timeout
if server.active:
errormsg = T("Too many connections to server %s") % display_msg
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.warning(T("Too many connections to server %s"), server.host)
# Don't count this for the tries (max_art_tries) on this server
self.__reset_nw(nw, send_quit=True)
self.plan_server(server, _PENALTY_TOOMANY)
server.threads -= 1
elif ecode in (502, 481, 482) and clues_too_many_ip(msg):
# Account sharing?
if server.active:
errormsg = T("Probable account sharing") + display_msg
if server.errormsg != errormsg:
server.errormsg = errormsg
name = " (%s)" % server.host
logging.warning(T("Probable account sharing") + name)
penalty = _PENALTY_SHARE
block = True
elif ecode in (452, 481, 482, 381) or (ecode == 502 and clues_login(msg)):
# Cannot login, block this server
if server.active:
errormsg = T("Failed login for server %s") % display_msg
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.error(T("Failed login for server %s"), server.host)
penalty = _PENALTY_PERM
block = True
elif ecode in (502, 482):
# Cannot connect (other reasons), block this server
if server.active:
errormsg = T("Cannot connect to server %s [%s]") % ("", display_msg)
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.warning(T("Cannot connect to server %s [%s]"), server.host, msg)
if clues_pay(msg):
penalty = _PENALTY_PERM
else:
penalty = _PENALTY_502
block = True
elif ecode == 400:
# Temp connection problem?
if server.active:
logging.debug("Unspecified error 400 from server %s", server.host)
penalty = _PENALTY_VERYSHORT
block = True
else:
# Unknown error, just keep trying
if server.active:
errormsg = T("Cannot connect to server %s [%s]") % ("", display_msg)
if server.errormsg != errormsg:
server.errormsg = errormsg
logging.warning(T("Cannot connect to server %s [%s]"), server.host, msg)
penalty = _PENALTY_UNKNOWN
block = True
if block or (penalty and server.optional):
retry_article = False
if server.active:
if server.required:
sabnzbd.Scheduler.plan_required_server_resume()
retry_article = True
else:
server.deactivate()
if penalty and (block or server.optional):
self.plan_server(server, penalty)
# Note that the article is discard for this server if the server is not required
self.__reset_nw(nw, retry_article=retry_article, send_quit=True)
continue
except:
logging.error(
T("Connecting %s@%s failed, message=%s"),
nw.thrdnum,
nw.server.host,
nntp_to_msg(nw.data),
)
# No reset-warning needed, above logging is sufficient
self.__reset_nw(nw, retry_article=False)
if nw.connected:
logging.info("Connecting %s@%s finished", nw.thrdnum, nw.server.host)
self.__request_article(nw)
elif nw.status_code == 223:
done = True
logging.debug("Article <%s> is present", article.article)
elif nw.status_code == 211:
logging.debug("group command ok -> %s", nntp_to_msg(nw.data))
nw.group = nw.article.nzf.nzo.group
nw.clear_data()
self.__request_article(nw)
elif nw.status_code in (411, 423, 430):
done = True
logging.debug(
"Thread %s@%s: Article %s missing (error=%s)",
nw.thrdnum,
nw.server.host,
article.article,
nw.status_code,
)
nw.clear_data()
elif nw.status_code == 500:
if article.nzf.nzo.precheck:
# Assume "STAT" command is not supported
server.have_stat = False
logging.debug("Server %s does not support STAT", server.host)
else:
# Assume "BODY" command is not supported
server.have_body = False
logging.debug("Server %s does not support BODY", server.host)
nw.clear_data()
self.__request_article(nw)
if done:
# Successful data, clear "bad" counter
server.bad_cons = 0
server.errormsg = server.warning = ""
if sabnzbd.LOG_ALL:
logging.debug("Thread %s@%s: %s done", nw.thrdnum, server.host, article.article)
self.decode(article, nw.data)
# Reset connection for new activity
nw.soft_reset()
server.busy_threads.remove(nw)
server.idle_threads.append(nw)
self.remove_socket(nw)
def __reset_nw(
self,
nw: NewsWrapper,
reset_msg: Optional[str] = None,
warn: bool = False,
wait: bool = True,
count_article_try: bool = True,
retry_article: bool = True,
send_quit: bool = False,
):
# Some warnings are errors, and not added as server.warning
if warn and reset_msg:
nw.server.warning = reset_msg
logging.info("Thread %s@%s: %s", nw.thrdnum, nw.server.host, reset_msg)
elif reset_msg:
logging.debug("Thread %s@%s: %s", nw.thrdnum, nw.server.host, reset_msg)
# Make sure this NewsWrapper is in the idle threads
if nw in nw.server.busy_threads:
nw.server.busy_threads.remove(nw)
if nw not in nw.server.idle_threads:
nw.server.idle_threads.append(nw)
# Make sure it is not in the readable sockets
self.remove_socket(nw)
if nw.article:
# Only some errors should count towards the total tries for each server
if count_article_try:
nw.article.tries += 1
# Do we discard, or try again for this server
if not retry_article or nw.article.tries > cfg.max_art_tries():
# Too many tries on this server, consider article missing
self.decode(nw.article, None)
nw.article.tries = 0
else:
# Retry again with the same server
logging.debug(
"Re-adding article %s from %s to server %s",
nw.article.article,
nw.article.nzf.filename,
nw.article.fetcher,
)
nw.article.fetcher.article_queue.append(nw.article)
# Reset connection object
nw.hard_reset(wait, send_quit=send_quit)
# Empty SSL info, it might change on next connect
nw.server.ssl_info = ""
def __request_article(self, nw: NewsWrapper):
try:
nzo = nw.article.nzf.nzo
if nw.server.send_group and nzo.group != nw.group:
group = nzo.group
if sabnzbd.LOG_ALL:
logging.debug("Thread %s@%s: GROUP <%s>", nw.thrdnum, nw.server.host, group)
nw.send_group(group)
else:
if sabnzbd.LOG_ALL:
logging.debug("Thread %s@%s: BODY %s", nw.thrdnum, nw.server.host, nw.article.article)
nw.body()
# Mark as ready to be read
self.read_fds[nw.nntp.fileno] = nw
except socket.error as err:
logging.info("Looks like server closed connection: %s", err)
self.__reset_nw(nw, "server broke off connection", warn=True, send_quit=False)
except:
logging.error(T("Suspect error in downloader"))
logging.info("Traceback: ", exc_info=True)
self.__reset_nw(nw, "server broke off connection", warn=True, send_quit=False)
# ------------------------------------------------------------------------------
# Timed restart of servers admin.
# For each server all planned events are kept in a list.
# When the first timer of a server fires, all other existing timers
# are neutralized.
# Each server has a dictionary entry, consisting of a list of timestamps.
@synchronized(TIMER_LOCK)
def plan_server(self, server: Server, interval: int):
"""Plan the restart of a server in 'interval' minutes"""
if cfg.no_penalties() and interval > _PENALTY_SHORT:
# Overwrite in case of no_penalties
interval = _PENALTY_SHORT
logging.debug("Set planned server resume %s in %s mins", server.host, interval)
if server.id not in self.timers:
self.timers[server.id] = []
stamp = time.time() + 60.0 * interval
self.timers[server.id].append(stamp)
if interval:
sabnzbd.Scheduler.plan_server(self.trigger_server, [server.id, stamp], interval)
@synchronized(TIMER_LOCK)
def trigger_server(self, server_id: str, timestamp: float):
"""Called by scheduler, start server if timer still valid"""
logging.debug("Trigger planned server resume for server-id %s", server_id)
if server_id in self.timers:
if timestamp in self.timers[server_id]:
del self.timers[server_id]
self.init_server(server_id, server_id)
@NzbQueueLocker
@synchronized(TIMER_LOCK)
def unblock(self, server_id: str):
# Remove timer
try:
del self.timers[server_id]
except KeyError:
pass
# Activate server if it was inactive
for server in self.servers:
if server.id == server_id and not server.active:
logging.debug("Unblock server %s", server.host)
self.init_server(server_id, server_id)
break
def unblock_all(self):
for server_id in self.timers.keys():
self.unblock(server_id)
@NzbQueueLocker
@synchronized(TIMER_LOCK)
def check_timers(self):
"""Make sure every server without a non-expired timer is active"""
# Clean expired timers
now = time.time()
kicked = []
# Create a copy so we can remove during iteration
for server_id in list(self.timers):
if not [stamp for stamp in self.timers[server_id] if stamp >= now]:
logging.debug("Forcing re-evaluation of server-id %s", server_id)
del self.timers[server_id]
self.init_server(server_id, server_id)
kicked.append(server_id)
# Activate every inactive server without an active timer
for server in self.servers:
if server.id not in self.timers:
if server.id not in kicked and not server.active:
logging.debug("Forcing activation of server %s", server.host)
self.init_server(server.id, server.id)
def update_server(self, oldserver: str, newserver: Optional[str]):
"""Update the server and make sure we trigger
the update in the loop to do housekeeping"""
self.init_server(oldserver, newserver)
self.wakeup()
@NzbQueueLocker
def wakeup(self):
"""Just rattle the semaphore"""
pass
@NzbQueueLocker
def stop(self):
"""Shutdown, wrapped so the semaphore is notified"""
self.shutdown = True
sabnzbd.notifier.send_notification("SABnzbd", T("Shutting down"), "startup")
def clues_login(text: str) -> bool:
"""Check for any "failed login" clues in the response code"""
text = text.lower()
for clue in ("username", "password", "invalid", "authen", "access denied"):
if clue in text:
return True
return False
def clues_too_many(text: str) -> bool:
"""Check for any "too many connections" clues in the response code"""
text = text.lower()
for clue in ("exceed", "connections", "too many", "threads", "limit"):
# Not 'download limit exceeded' error
if (clue in text) and ("download" not in text) and ("byte" not in text):
return True
return False
def clues_too_many_ip(text: str) -> bool:
"""Check for any "account sharing" clues in the response code"""
text = text.lower()
for clue in ("simultaneous ip", "multiple ip"):
if clue in text:
return True
return False
def clues_pay(text: str) -> bool:
"""Check for messages about payments"""
text = text.lower()
for clue in ("credits", "paym", "expired", "exceeded"):
if clue in text:
return True
return False
def check_server_expiration():
"""Check if user should get warning about server date expiration"""
for server in config.get_servers().values():
if server.expire_date():
days_to_expire = ceil(
(time.mktime(time.strptime(server.expire_date(), "%Y-%m-%d")) - time.time()) / (60 * 60 * 24)
)
# Notify from 5 days in advance
if days_to_expire < 6:
logging.warning(T("Server %s is expiring in %s day(s)"), server.displayname(), days_to_expire)
# Reset on the day of expiration
if days_to_expire <= 0:
server.expire_date.set("")
config.save_config()
def check_server_quota():
"""Check quota on servers"""
for srv, server in config.get_servers().items():
if server.quota():
if server.quota.get_int() + server.usage_at_start() < sabnzbd.BPSMeter.grand_total.get(srv, 0):
logging.warning(T("Server %s has used the specified quota"), server.displayname())
server.quota.set("")
config.save_config()
|
multithreading_tut.py
|
import threading
def threadsafe_generator(f):
def g(*a, **kw):
return threadsafe_iter(f(*a, **kw))
return g
class threadsafe_iter:
def __init__(self, it):
self.it = it
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
return self.it.next()
@threadsafe_generator
def count():
i = 0
while True:
i += 1
yield i
class Counter:
def __init__(self):
self.i = 0
self.lock = threading.Lock()
def __iter__(self):
return self
def next(self):
with self.lock:
self.i += 1
return self.i
def loop(func, n):
for i in range(n):
func()
def run(f, repeats=1000, nthreads=10):
threads = [threading.Thread(target=loop, args=(f, repeats))
for i in range(nthreads)]
for t in threads:
t.start()
for t in threads:
t.join()
def main():
c1 = count()
c2 = Counter()
run(c1.next, repeats=100000, nthreads=2)
print("c1", c1.next())
run(c2.next, repeats=100000, nthreads=2)
print("c2", c2.next())
if __name__ == '__main__':
main()
|
TCPServer.py
|
import socket
import threading
bind_ip = "0.0.0.0"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Passing in the IP address and port we want to listen on
server.bind((bind_ip, bind_port))
#Tell the server to start listening with a maximum backlog of 5 connections
server.listen(5)
print "[*] Listening on %s:%d" % (bind_ip, bind_port)
# This is our our client-handling thread
def handle_client(client_socket):
#Performs the recv function
request = client_socket.recv(1024)
#Print out what the client sends
print "[*] Received %s" % request
#Send back a packet
client_socket.send("ACK!")
client_socket.close()
while True:
#When the client connects, save the client socket into client variable
#Save the remote connection details into the addr variable
client, addr = server.accept()
print "[*] Accepted connection from: %s:%d" %(addr[0], addr[1])
#Spin up up our client thread to handle incoming data
#Then our server is ready to handle another connection
client_handler = threading.Thread(target=handle_client, args=(client,))
#Start the client handler
client_handler.start()
|
security.py
|
from threading import Thread
from mail import send_email
import datetime
import time
import cv2
class Security(Thread):
def __init__(self, video):
super().__init__()
self.video = video
self.classifier = None
self.flip = False
self.save_video = True
self.out = None
self.recording = False
self.record_time_after_found = 10.0
self.email_update = True
self.email_update_interval = 600.0
self.running = False
def set_classifier(self, name):
if name in self.video.models.keys():
self.classifier = cv2.CascadeClassifier(self.video.models.get(name))
def stop(self):
self.running = False
def run(self):
self.running = True
last_epoch = 0
last_found = 0
while self.running:
frame, jpeg, found_obj = self.video.get_object(self.classifier, self.flip)
if self.save_video:
if found_obj:
last_found = time.time()
if not self.recording:
self.start_recording(datetime.datetime.today().strftime('%Y-%m-%d-%H:%M:%S.avi'))
elif self.recording and (time.time() - last_found) > self.record_time_after_found:
self.stop_recording()
if self.recording:
self.record_frame(frame)
if self.email_update:
if found_obj and (time.time() - last_epoch) > self.email_update_interval:
last_epoch = time.time()
Thread(target=Security.send_mail, args=(jpeg,)).start()
def change_classifier(self, classifier):
self.classifier = cv2.CascadeClassifier(classifier)
def record_frame(self, frame):
if self.out:
self.out.write(frame)
def start_recording(self, file, frames=5, size=(640, 480)):
self.recording = True
self.out = cv2.VideoWriter(file, cv2.VideoWriter_fourcc(*"MJPG"), frames, size)
print("Started recording to {} at {} frames with {} resolution".format(file, frames, size))
def stop_recording(self):
if self.recording:
self.recording = False
self.out.release()
print("Stopped recording")
@staticmethod
def send_mail(frame):
try:
print("Sending email...")
send_email(frame)
print("done!")
except Exception as e:
print("Error sending email: ", str(e))
|
Threader.py
|
import subprocess
import multiprocessing
import time
import atexit
import os
import sys
#from __future__ import print_function
def run(fs):
print("Running Thread ID: "+repr(len(threads)+1))
print(("Total Running Threads: "+repr(threads.n_alive())))
for f in fs:
f()
print("Thread ID "+repr(len(threads)+1)+" done!")
#Need to do more with thread safety
class Threads:
"""
Class for managing threads, killing them on program exit.
"""
def __init__(self):
self.thread_classes = []
self.total_allowed_threads = multiprocessing.cpu_count() - 1
#atexit.register(self.kill_all); Disable default use of this for app-specific uses.
def set_allowed_threads(self, n):
self.total_allowed_threads = n
def kill_all(self):
for th in self.thread_classes:
if th.sub_p and th.sub_p.poll() != 0:
th.sub_p.kill()
if th.multi_p:
th.multi_p.terminate()
def append(self, thread):
self.thread_classes.append(thread)
def __len__(self):
return len(self.thread_classes)
def n_alive(self):
i = 0
for th in self.thread_classes:
if th.sub_p and th.sub_p.poll() == None:
i+=1
if th.multi_p and th.multi_p.exitcode == None:
i+=1
return i
def is_alive(self, pid):
th = self.thread_classes[pid]
if th.sub_p and th.sub_p.poll() == None:
return True
elif th.multi_p and th.multi_p.exitcode == None:
return True
else:
return False
def get_exitcode(self, pid):
th = self.thread_classes[pid]
if th.sub_p:
return th.sub_p.poll()
if th.multi_p:
return th.multi_p.exitcode
def new_thread_allowed(self):
if self.n_alive() == self.total_allowed_threads:
return False
else:
return True
global threads
threads = Threads()
class Threader(object):
"""
Class for starting 2 new threads. One that runs a system process and one that waits and prints info to std::out or whatever you currently have set as std::out.
Use print interval to set the wait time between prints.
Useful for GUI subprocessing.
"""
def __init__(self, print_interval = 0):
self.print_interval = print_interval
self.sub_p = None; #A subprocess.Popen object
self.multi_p = None; #A multiprocessing.Process object.
def run_system_command(self, command):
"""
Run a system command using Popen. Prints out at end. Probably should remove this.
:param command:
:return:
"""
def start():
##You absolutely NEED to use PIPE for standard out or you will hang.
p = subprocess.Popen(command, shell = True, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
wait_p = multiprocessing.Process(target=print_loop, args=(p, self.print_interval))
wait_p.start()
self.sub_p = p; self.multi_p = wait_p
threads.append(self)
print(("Running Thread ID: "+repr(len(threads)+1)+" "+command))
print(("Total Running Threads: "+repr(threads.n_alive())))
return p, wait_p
if threads.new_thread_allowed():
return start()
else:
print("Too many threads running. Waiting to start next...")
while not threads.new_thread_allowed():
pass
return start()
def run_functions(self, functions):
"""
Run a bunch of lambda functions together with multiprocessing
:param functions:
:return:
"""
if threads.new_thread_allowed():
p = multiprocessing.Process(target=run, args = (functions,))
else:
while not threads.new_thread_allowed():
pass
p = multiprocessing.Process(target=run, args = (functions,))
p.start()
self.multi_p = p
threads.append(self)
def print_loop(p, print_interval = 0):
#This all really does not work with the PIPE in subprocess. Not sure how to get it to work.
# The idea was to keep it printing stuff as things were running. Now, it just prints at end.
while p and not p.poll():
if not print_interval == 0:
time.sleep(print_interval)
try:
err, out = p.communicate()
if err:
print(err)
if out:
print(out)
except ValueError:
#Due to subprocess.Popen being fairly shitty.
# If you don't set the returncode, the value error basically breaks the process, so returncode is NEVER set.
# The valueerror comes from trying to use p.communicate on a process that is already done? Perhaps this is done between the while and the communicate?
# I don't know, but seems pretty fishy!!
p.returncode = 0
break
print("Process done!")
print(repr(threads.n_alive())+" still running...")
def test_function(i, extra = ""):
print(extra+" "+repr(i)+"\n")
time.sleep(3+i)
print(extra+" "+repr(i)+"done\n")
def test_threading():
functions = []
functions.append(lambda: test_function(1, "one: "))
functions.append(lambda: os.system("echo This works baby!; echo $PATH"))
f2 = []
f2.append(lambda: test_function(3, "three: "))
f3 = [lambda: test_function(5, "five: ")]
threader = Threader()
threader.run_functions(functions)
threader.run_functions(f2)
threader.run_functions(f3)
if __name__ == "__main__":
functions = []
functions.append(lambda: test_function(1, "one: "))
functions.append(lambda: os.system("echo This works baby!; echo $PATH"))
f2 = []
f2.append(lambda: test_function(3, "three: "))
f3 = [lambda: test_function(5, "five: ")]
threader = Threader()
threader.run_functions(functions)
threader.run_functions(f2)
threader.run_functions(f3)
|
__init__.py
|
from threading import Thread
from flask import Flask
from flask_migrate import Migrate
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
migrate = Migrate()
def create_app():
app = Flask(__name__)
from . import config
app.config.from_object(config)
# database init
__import__("app.models")
db.init_app(app)
migrate.init_app(app, db)
# blueprint init
from . import views
for view in views.__all__:
app.register_blueprint(getattr(getattr(getattr(__import__(f"app.views.{view}"), "views"), view), "bp"))
# background task
from . import task
Thread(target=task.core, args=(app,), daemon=True).start()
# register error handler
from .error import error_map
for code in error_map:
app.register_error_handler(code, error_map[code])
return app
|
chooser.py
|
import argparse
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
import yaml
import os
import subprocess
import re
import time
import threading
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--configpath",
type=str,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), "../launch/"),
help="Path to the configuration *.yaml files")
parser.add_argument(
"--stm32Fw",
type=str,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../crazyflie-firmware/cf2.bin"),
help="Path to cf2.bin")
parser.add_argument(
"--nrf51Fw",
type=str,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), "../../../../crazyflie2-nrf-firmware/cf2_nrf.bin"),
help="Path to cf2_nrf.bin")
args = parser.parse_args()
if not os.path.exists(os.path.join(args.configpath, "allCrazyflies.yaml")) or \
not os.path.exists(os.path.join(args.configpath, "crazyflieTypes.yaml")) or \
not os.path.exists(os.path.join(args.configpath, "crazyflies.yaml")):
print("ERROR: Could not find all yaml configuration files in configpath ({}).".format(args.configpath))
exit()
if not os.path.exists(args.stm32Fw):
print("WARNING: Could not find STM32 firmware ({}).".format(args.stm32Fw))
if not os.path.exists(args.nrf51Fw):
print("WARNING: Could not find NRF51 firmware ({}).".format(args.nrf51Fw))
# read a yaml file
def read_by_id(path):
by_id = {}
with open(path, 'r') as ymlfile:
root = yaml.load(ymlfile)
for node in root["crazyflies"]:
id = int(node["id"])
by_id[id] = node
return by_id
def selected_cfs():
nodes = [node for id, node in allCrazyflies.items() if widgets[id].checked.get()]
return nodes
def save():
nodes = selected_cfs()
with open(os.path.join(args.configpath, "crazyflies.yaml"), 'w') as outfile:
yaml.dump({"crazyflies": nodes}, outfile)
allCrazyflies = read_by_id(os.path.join(args.configpath, "allCrazyflies.yaml"))
enabled = read_by_id(os.path.join(args.configpath, "crazyflies.yaml")).keys()
with open(os.path.join(args.configpath, "crazyflieTypes.yaml"), 'r') as ymlfile:
data = yaml.load(ymlfile)
cfTypes = data["crazyflieTypes"]
# compute absolute pixel coordinates from the initial positions
positions = [node["initialPosition"] for node in allCrazyflies.values()]
DOWN_DIR = [-1, 0]
RIGHT_DIR = [0, -1]
def dot(a, b):
return a[0] * b[0] + a[1] * b[1]
pixel_x = [120 * dot(pos, RIGHT_DIR) for pos in positions]
pixel_y = [120 * dot(pos, DOWN_DIR) for pos in positions]
xmin, ymin = min(pixel_x), min(pixel_y)
xmax, ymax = max(pixel_x), max(pixel_y)
# construct the main window
top = Tkinter.Tk()
top.title('Crazyflie Chooser')
# construct the frame containing the absolute-positioned checkboxes
width = xmax - xmin + 50 # account for checkbox + text width
height = ymax - ymin + 50 # account for checkbox + text height
frame = Tkinter.Frame(top, width=width, height=height)
class CFWidget(Tkinter.Frame):
def __init__(self, parent, name):
Tkinter.Frame.__init__(self, parent)
self.checked = Tkinter.BooleanVar()
checkbox = Tkinter.Checkbutton(self, variable=self.checked, command=save,
padx=0, pady=0)
checkbox.grid(row=0, column=0, sticky='E')
nameLabel = Tkinter.Label(self, text=name, padx=0, pady=0)
nameLabel.grid(row=0, column=1, sticky='W')
self.batteryLabel = Tkinter.Label(self, text="", fg="#999999", padx=0, pady=0)
self.batteryLabel.grid(row=1, column=0, columnspan=2, sticky='E')
self.versionLabel = Tkinter.Label(self, text="", fg="#999999", padx=0, pady=0)
self.versionLabel.grid(row=2, column=0, columnspan=2, sticky='E')
# construct all the checkboxes
widgets = {}
for (id, node), x, y in zip(allCrazyflies.items(), pixel_x, pixel_y):
w = CFWidget(frame, str(id))
w.place(x = x - xmin, y = y - ymin)
w.checked.set(id in enabled)
widgets[id] = w
# dragging functionality - TODO alt-drag to deselect
drag_start = None
drag_startstate = None
def minmax(a, b):
return min(a, b), max(a, b)
def mouseDown(event):
global drag_start, drag_startstate
drag_start = (event.x_root, event.y_root)
drag_startstate = [cf.checked.get() for cf in widgets.values()]
def mouseUp(event):
save()
def drag(event, select):
x, y = event.x_root, event.y_root
dragx0, dragx1 = minmax(drag_start[0], x)
dragy0, dragy1 = minmax(drag_start[1], y)
def dragcontains(widget):
x0 = widget.winfo_rootx()
y0 = widget.winfo_rooty()
x1 = x0 + widget.winfo_width()
y1 = y0 + widget.winfo_height()
return not (x0 > dragx1 or x1 < dragx0 or y0 > dragy1 or y1 < dragy0)
# depending on interation over dicts being consistent
for initial, cf in zip(drag_startstate, widgets.values()):
if dragcontains(cf):
cf.checked.set(select)
else:
cf.checked.set(initial)
top.bind('<ButtonPress-1>', mouseDown)
top.bind('<ButtonPress-3>', mouseDown)
top.bind('<B1-Motion>', lambda event: drag(event, True))
top.bind('<B3-Motion>', lambda event: drag(event, False))
top.bind('<ButtonRelease-1>', mouseUp)
top.bind('<ButtonRelease-3>', mouseUp)
# buttons for clearing/filling all checkboxes
def clear():
for box in widgets.values():
box.checked.set(False)
save()
def fill():
for box in widgets.values():
box.checked.set(True)
save()
def mkbutton(parent, name, command):
button = Tkinter.Button(parent, text=name, command=command)
button.pack(side='left')
buttons = Tkinter.Frame(top)
mkbutton(buttons, "Clear", clear)
mkbutton(buttons, "Fill", fill)
# construct bottom buttons for utility scripts
def sysOff():
nodes = selected_cfs()
for crazyflie in nodes:
id = "{0:02X}".format(crazyflie["id"])
uri = "radio://0/{}/2M/E7E7E7E7{}".format(crazyflie["channel"], id)
subprocess.call(["rosrun crazyflie_tools reboot --uri " + uri + " --mode sysoff"], shell=True)
def reboot():
nodes = selected_cfs()
for crazyflie in nodes:
id = "{0:02X}".format(crazyflie["id"])
uri = "radio://0/{}/2M/E7E7E7E7{}".format(crazyflie["channel"], id)
print(crazyflie["id"])
subprocess.call(["rosrun crazyflie_tools reboot --uri " + uri], shell=True)
def flashSTM():
nodes = selected_cfs()
for crazyflie in nodes:
id = "{0:02X}".format(crazyflie["id"])
uri = "radio://0/{}/2M/E7E7E7E7{}".format(crazyflie["channel"], id)
print("Flash STM32 FW to {}".format(uri))
subprocess.call(["rosrun crazyflie_tools flash --uri " + uri + " --target stm32 --filename " + args.stm32Fw], shell=True)
def flashNRF():
nodes = selected_cfs()
for crazyflie in nodes:
id = "{0:02X}".format(crazyflie["id"])
uri = "radio://0/{}/2M/E7E7E7E7{}".format(crazyflie["channel"], id)
print("Flash NRF51 FW to {}".format(uri))
subprocess.call(["rosrun crazyflie_tools flash --uri " + uri + " --target nrf51 --filename " + args.nrf51Fw], shell=True)
def checkBattery():
# reset color
for id, w in widgets.items():
w.batteryLabel.config(foreground='#999999')
# query each CF
nodes = selected_cfs()
for crazyflie in nodes:
id = "{0:02X}".format(crazyflie["id"])
uri = "radio://0/{}/2M/E7E7E7E7{}".format(crazyflie["channel"], id)
cfType = crazyflie["type"]
bigQuad = cfTypes[cfType]["bigQuad"]
if not bigQuad:
voltage = subprocess.check_output(["rosrun crazyflie_tools battery --uri " + uri], shell=True)
else:
voltage = subprocess.check_output(["rosrun crazyflie_tools battery --uri " + uri + " --external 1"], shell=True)
voltage = float(voltage)
color = '#000000'
if voltage < cfTypes[cfType]["batteryVoltageWarning"]:
color = '#FF8800'
if voltage < cfTypes[cfType]["batteryVoltateCritical"]:
color = '#FF0000'
widgets[crazyflie["id"]].batteryLabel.config(foreground=color, text="{:.2f} v".format(voltage))
# def checkVersion():
# for id, w in widgets.items():
# w.versionLabel.config(foreground='#999999')
# proc = subprocess.Popen(
# ['python3', SCRIPTDIR + 'version.py'], stdout=subprocess.PIPE)
# versions = dict()
# versionsCount = dict()
# versionForMost = None
# versionForMostCount = 0
# for line in iter(proc.stdout.readline, ''):
# print(line)
# match = re.search("(\d+): ([0-9a-fA-F]+),(\d),([0-9a-fA-F]+)", line)
# if match:
# addr = int(match.group(1))
# v1 = match.group(2)
# modified = int(match.group(3)) == 1
# v2 = match.group(4)
# v = (v1,v2)
# versions[addr] = v
# if v in versionsCount:
# versionsCount[v] += 1
# else:
# versionsCount[v] = 1
# if versionsCount[v] > versionForMostCount:
# versionForMostCount = versionsCount[v]
# versionForMost = v
# for addr, v in versions.items():
# color = '#000000'
# if v != versionForMost:
# color = '#FF0000'
# widgets[addr].versionLabel.config(foreground=color, text=str(v[0])[0:3] + "," + str(v[1])[0:3])
scriptButtons = Tkinter.Frame(top)
mkbutton(scriptButtons, "battery", checkBattery)
# currently not supported
# mkbutton(scriptButtons, "version", checkVersion)
mkbutton(scriptButtons, "sysOff", sysOff)
mkbutton(scriptButtons, "reboot", reboot)
mkbutton(scriptButtons, "flash (STM)", flashSTM)
mkbutton(scriptButtons, "flash (NRF)", flashNRF)
# start background threads
def checkBatteryLoop():
while True:
# rely on GIL
checkBattery()
time.sleep(10.0) # seconds
# checkBatteryThread = threading.Thread(target=checkBatteryLoop)
# checkBatteryThread.daemon = True # so it exits when the main thread exit
# checkBatteryThread.start()
# place the widgets in the window and start
buttons.pack()
frame.pack(padx=10, pady=10)
scriptButtons.pack()
top.mainloop()
|
Lab_5.py
|
import random
import threading
from tree import AVLTree, GraphicalTree
tree = AVLTree()
random.seed(0)
arr = random.sample(range(10), 10)
for i in arr:
tree.add(i)
g_tree = GraphicalTree(tree)
def deleter():
while tree:
print("Обход слева направо:", tree.in_order())
del tree[int((input("Введите элемент, который следует удалить: ")))]
g_tree.update()
threading.Thread(target=deleter).start()
g_tree.start()
|
client.py
|
# Copyright 2018 Vincent Deuschle. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import random
import os
import logging
import pickle
from pathlib import Path
from datetime import datetime
from threading import Thread
from contextlib import ExitStack
import numpy as np
from .utils.misc import create_blueprint_string, set_framework_seed
from .utils.factory import get_model, get_monitor
from .utils.aws import (EnvironmentManager,
s3_upload_blueprint,
s3_download_results,
register_and_execute_job)
from .utils.ec2_instances import instances
def run_on_aws(bp,
framework: str,
instance: str) -> Thread:
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
instance_list = instance.split('.')
result_folder_path = Path(bp.job_name, framework, *instance_list)
local_result_folder_path = Path(bp.result_folder_path) / result_folder_path
s3_result_folder_path = Path('dl-benchmarking', 'results') / result_folder_path
try:
experiment_idx = max(map(int, os.listdir(local_result_folder_path))) + 1
except (FileNotFoundError, ValueError):
experiment_idx = 1
finally:
local_result_folder_path /= str(experiment_idx)
s3_result_folder_path /= str(experiment_idx)
os.makedirs(local_result_folder_path)
logging.info(f'Local result folder {local_result_folder_path} created')
bp_file_name = Path(bp.__file__).parts[-1]
with open(bp.__file__, 'r') as bp_input, open(local_result_folder_path / bp_file_name, 'w') as bp_output:
blueprint_string = create_blueprint_string(bp_input.read(),
framework,
timestamp,
instance)
bp_output.write(blueprint_string)
env_name = instance.replace('.', '_')
if instance.startswith('p'):
try:
container_image = bp.container_images[1]
job_def_name = bp.job_def_names[1]
except IndexError:
container_image = bp.container_images[0]
job_def_name = bp.job_def_names[0]
else:
container_image = bp.container_images[0]
job_def_name = bp.job_def_names[0]
job_name = f'{bp.job_name}_{framework}_{env_name}_{experiment_idx}'
job_num_vcpu = bp.job_num_vcpu or instances[instance]['cpu']
job_memory_size = bp.job_memory_size or instances[instance]['memory'] - 2000
s3_upload_blueprint(bp.bucket_name,
s3_result_folder_path,
blueprint_string)
def run():
env_manager = EnvironmentManager(bp.region_name,
env_name, instance,
bp.ami_id,
bp.env_min_cpu,
bp.env_desired_cpu,
bp.env_max_cpu,
bp.subnets,
bp.security_group_ids,
bp.instance_role,
bp.service_role,
job_name,
bp.tear_down_comp_env)
with env_manager:
job_succesfull = register_and_execute_job(bp.region_name,
job_name,
job_def_name,
env_name,
bp.bucket_name,
s3_result_folder_path,
bp.account_id,
container_image,
job_num_vcpu,
job_memory_size)
if job_succesfull:
s3_download_results(bp.bucket_name,
local_result_folder_path,
s3_result_folder_path)
return Thread(target=run, name=job_name)
def run_locally(bp, framework: str) -> None:
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
result_folder_path = Path(bp.result_folder_path, bp.job_name, framework, 'local')
try:
experiment_idx = max(map(int, os.listdir(result_folder_path))) + 1
except (FileNotFoundError, ValueError):
experiment_idx = 1
finally:
result_folder_path /= str(experiment_idx)
os.makedirs(result_folder_path)
logging.info(f'Local result folder {result_folder_path} created')
bp_file_name = Path(bp.__file__).parts[-1]
with open(bp.__file__, 'r') as bp_input, open(result_folder_path / bp_file_name, 'w') as bp_output:
blueprint_string = create_blueprint_string(bp_input.read(), framework, timestamp)
bp_output.write(blueprint_string)
random.seed(bp.python_seed)
np.random.seed(bp.numpy_seed)
set_framework_seed(framework, bp.framework_seed)
train_data = np.load(Path(bp.train_data_path)).reshape(*bp.reshape_input_data)
train_label = np.load(Path(bp.train_label_path))
try:
test_data = np.load(Path(bp.test_data_path)).reshape(*bp.reshape_input_data)
test_label = np.load(Path(bp.test_label_path))
except (TypeError, AttributeError):
test_data = None
test_label = None
try:
with open(bp.model_params_path, 'rb') as f:
params = pickle.load(f)
except (TypeError, FileNotFoundError):
params = None
if framework in ['pytorch', 'mxnet']: # Use NCHW format for image data in pytorch and mxnet
if len(train_data.shape) > 3:
train_data = train_data.swapaxes(-3, -1)
if isinstance(test_data, np.ndarray):
test_data = test_data.swapaxes(-3, -1)
model = get_model(bp.architecture, framework, params)
if bp.mode == 'training':
with ExitStack() as stack:
for monitor in bp.monitors:
stack.enter_context(get_monitor(monitor, result_folder_path))
model.training(train_data,
train_label,
test_data,
test_label,
bp.loss_function,
bp.optimizer,
bp.optimizer_mode,
bp.epochs,
bp.learning_rate,
result_folder_path,
bp.batch_size,
bp.train_seq_length,
bp.test_seq_length,
bp.state_sizes,
bp.truncated_backprop_length)
elif bp.mode == 'inference':
with ExitStack() as stack:
for monitor in bp.monitors:
stack.enter_context(get_monitor(monitor, result_folder_path))
model.inference(train_data,
bp.batch_size,
result_folder_path,
bp.mode,
bp.optimizer_mode,
bp.state_sizes)
|
test_comm.py
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import multiprocessing
import sys
from typing import Union, List, Tuple, Type, Dict
import numpy as np
import pandas as pd
import pytest
from .....lib.aio import AioEvent
from .....tests.core import require_cudf, require_cupy
from .....utils import get_next_port
from .. import (
SocketChannel,
SocketServer,
UnixSocketServer,
DummyChannel,
DummyServer,
get_client_type,
SocketClient,
UnixSocketClient,
DummyClient,
Server,
)
test_data = np.random.RandomState(0).rand(10, 10)
port = get_next_port()
# server_type, config, con
params: List[Tuple[Type[Server], Dict, str]] = [
(SocketServer, dict(host="127.0.0.1", port=port), f"127.0.0.1:{port}"),
]
if sys.platform != "win32":
params.append((UnixSocketServer, dict(process_index="0"), f"unixsocket:///0"))
local_params = params.copy()
local_params.append((DummyServer, dict(), "dummy://0"))
@pytest.mark.parametrize("server_type, config, con", local_params)
@pytest.mark.asyncio
async def test_comm(server_type, config, con):
async def check_data(chan: Union[SocketChannel, DummyChannel]):
np.testing.assert_array_equal(test_data, await chan.recv())
await chan.send("success")
config = config.copy()
config["handle_channel"] = check_data
# create server
server = await server_type.create(config)
await server.start()
assert isinstance(server.info, dict)
# create client
client = await server_type.client_type.connect(con)
assert isinstance(client.info, dict)
assert isinstance(client.channel.info, dict)
await client.send(test_data)
assert "success" == await client.recv()
await client.close()
assert client.closed
# create client2
async with await server_type.client_type.connect(con) as client2:
assert not client2.closed
assert client2.closed
await server.join(0.001)
await server.stop()
assert server.stopped
async with await server_type.create(config) as server2:
assert not server2.stopped
assert server2.stopped
def _wrap_test(server_started_event, conf, tp):
async def _test():
async def check_data(chan: SocketChannel):
np.testing.assert_array_equal(test_data, await chan.recv())
await chan.send("success")
nonlocal conf
conf = conf.copy()
conf["handle_channel"] = check_data
# create server
server = await tp.create(conf)
await server.start()
server_started_event.set()
await server.join()
asyncio.run(_test())
@pytest.mark.parametrize("server_type, config, con", params)
@pytest.mark.asyncio
async def test_multiprocess_comm(server_type, config, con):
server_started = multiprocessing.Event()
p = multiprocessing.Process(
target=_wrap_test, args=(server_started, config, server_type)
)
p.daemon = True
p.start()
try:
await AioEvent(server_started).wait()
# create client
client = await server_type.client_type.connect(con)
await client.channel.send(test_data)
assert "success" == await client.recv()
await client.close()
assert client.closed
finally:
p.kill()
cupy_data = np.arange(100).reshape((10, 10))
cudf_data = pd.DataFrame({"col1": np.arange(10), "col2": [f"s{i}" for i in range(10)]})
def _wrap_cuda_test(server_started_event, conf, tp):
async def _test():
async def check_data(chan: SocketChannel):
import cupy
r = await chan.recv()
if isinstance(r, cupy.ndarray):
np.testing.assert_array_equal(cupy.asnumpy(r), cupy_data)
else:
pd.testing.assert_frame_equal(r.to_pandas(), cudf_data)
await chan.send("success")
conf["handle_channel"] = check_data
# create server
server = await tp.create(conf)
await server.start()
server_started_event.set()
await server.join()
asyncio.run(_test())
@require_cupy
@require_cudf
@pytest.mark.asyncio
async def test_multiprocess_cuda_comm():
import cupy
import cudf
mp_ctx = multiprocessing.get_context("spawn")
server_started = mp_ctx.Event()
port = get_next_port()
p = mp_ctx.Process(
target=_wrap_cuda_test,
args=(server_started, dict(host="127.0.0.1", port=port), SocketServer),
)
p.daemon = True
p.start()
await AioEvent(server_started).wait()
# create client
client = await SocketServer.client_type.connect(f"127.0.0.1:{port}")
await client.channel.send(cupy.asarray(cupy_data))
assert "success" == await client.recv()
client = await SocketServer.client_type.connect(f"127.0.0.1:{port}")
await client.channel.send(cudf.DataFrame(cudf_data))
assert "success" == await client.recv()
await client.close()
def test_get_client_type():
assert issubclass(get_client_type("127.0.0.1"), SocketClient)
assert issubclass(get_client_type("unixsocket:///1"), UnixSocketClient)
assert issubclass(get_client_type("dummy://"), DummyClient)
|
utils.py
|
"""Utility functions."""
import json
import os
import threading
import datetime
from pytz import timezone
from queue import Queue
from requests_oauthlib import OAuth1Session
def merge_two_dicts(a, b):
"""Merge 2 dictionaries."""
c = a.copy()
c.update(b)
return c
class TwitterClient:
"""Client to get data from Twitter."""
def __init__(self, user_social=None):
"""Return client instance with tokens."""
if user_social:
self.AT = user_social.access_token['oauth_token']
self.AS = user_social.access_token['oauth_token_secret']
else:
self.AT = os.environ['tw_at']
self.AS = os.environ['tw_as']
self.CK = os.environ['SOCIAL_AUTH_TWITTER_KEY']
self.CS = os.environ['SOCIAL_AUTH_TWITTER_SECRET']
self.session = OAuth1Session(self.CK, self.CS, self.AT, self.AS)
self.urls = {
'timeline':
'https://api.twitter.com/1.1/statuses/home_timeline.json',
'favlist': 'https://api.twitter.com/1.1/favorites/list.json',
'user': 'https://api.twitter.com/1.1/users/show.json',
'oembed': 'https://publish.twitter.com/oembed',
'request_token': 'https://twitter.com/oauth/request_token',
'access_token': 'https://twitter.com/oauth/access_token',
'authorize': 'https://twitter.com/oauth/authorize',
'account_verified':
'https://api.twitter.com/1.1/account/verify_credentials.json',
'tweet': 'https://api.twitter.com/1.1/statuses/show.json',
'update': 'https://api.twitter.com/1.1/statuses/update.json'
}
def timeline(self):
"""Show self timeline."""
res = self.session.get(self.urls['timeline'], params={})
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def favlist(self, user_id, page=1, count=100):
"""Show someone's favorite list."""
params = {
'user_id': user_id,
'count': count,
'page': page,
}
res = self.session.get(self.urls['favlist'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def user_from_screen_name(self, screen_name):
"""Show user's profile from screen_name."""
params = {
'screen_name': screen_name,
}
res = self.session.get(self.urls['user'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def show_tweets(self, tweets):
"""Print given tweets."""
for item in tweets:
print(item['text'])
def show_user(self, user):
"""Print given user's profile."""
print('User ID: {}'.format(user['id_str']))
print('Screen Name: {}'.format(user['screen_name']))
print('Name: {}'.format(user['name']))
def user_id_from_screen_name(self, screen_name):
"""Show user's id from screen_name."""
try:
user = self.user_from_screen_name(screen_name)
except:
raise Exception()
return user['id_str']
def html_embedded(self, tweet, q):
"""Add HTML data for Twitter widget on single tweet."""
# Remove private account
if tweet['user']['protected']:
q.put({})
return
url = 'https://twitter.com/{screen_name}/status/{tweet_id}'.format(
screen_name=tweet['user']['screen_name'], tweet_id=tweet['id_str'])
params = {
'url': url,
'maxwidth': 300,
}
res = self.session.get(self.urls['oembed'], params=params)
if res.status_code != 200:
return ''
q.put(json.loads(res.text)['html'])
def add_htmls_embedded(self, tweets):
"""Add HTML data for Twitter widget on tweets."""
threads = []
queues = []
for tweet in tweets:
q = Queue()
queues.append(q)
th = threading.Thread(target=self.html_embedded, args=(tweet, q))
th.start()
threads.append(th)
tweets_add = []
for th, q, tweet in zip(threads, queues, tweets):
th.join()
if tweet['user']['protected']:
continue
tweet_add = merge_two_dicts(tweet, {'html_embedded': q.get()})
tweets_add.append(tweet_add)
return tweets_add
def tweet_from_id(self, tweet_id):
"""Get tweet from id_str."""
params = {
'id': tweet_id,
}
res = self.session.get(self.urls['tweet'], params=params)
if res.status_code != 200:
raise Exception()
return json.loads(res.text)
def status_update(self, text):
"""Update status."""
params = {"status": text}
res = self.session.post(self.urls['update'], params=params)
print(res)
if res.status_code != 200:
raise Exception()
return True
def is_pc(request):
"""Whether user agent is pc or not."""
from user_agents import parse
ua_string = request.META['HTTP_USER_AGENT']
user_agent = parse(ua_string)
return not user_agent.is_mobile
# return True
def ignore_exceptions(func, items):
"""Ignore exceptions with multi-thread."""
def carry_out(func, item, q):
"""For each execusion."""
try:
q.put(func(item))
except:
q.put(None)
threads = []
queues = []
for item in items:
q = Queue()
queues.append(q)
th = threading.Thread(target=carry_out, args=(func, item, q))
th.start()
threads.append(th)
result = []
for th, q, item in zip(threads, queues, items):
th.join()
res = q.get()
if res:
result.append(res)
print(len(items))
return result
def parse_datetime(string):
"""Convert string to datetime object."""
dt = datetime.datetime.strptime(string, '%a %b %d %H:%M:%S +0000 %Y')
return dt.astimezone(timezone('Asia/Tokyo'))
if __name__ == '__main__':
user_id = '1212759744'
screen_name = 'kemomimi_oukoku'
twitter = TwitterClient()
# user = twitter.user_from_screen_name(screen_name)
# user_id = user['id_str']
# twitter.show_user(user)
# tweets = twitter.timeline()
tweets = twitter.favlist(user_id)
# twitter.show_tweets(tweets)
# tweets = twitter.add_htmls_embedded(tweets)
print(tweets[0]["favorite_count"])
# print(twitter.issue_request_url())
|
thread.py
|
#!/usr/bin/env python3
import urllib.request,json,pymysql,sys,datetime,time,threading
sys.path.append("/root/Python/Video_Monitor")
from model.mariadb import insert
def hqxrdb(ztaddr,ztname):
lock.acquire()
global i
url=ztaddr[i]
name=ztname[i]
ztstatus=0
i=i+1
lock.release()
try:
req=urllib.request.urlopen(url)
#捕捉异常
except TypeError as e:
return e
except urllib.error.HTTPError:
ztstatus=404
except urllib.error.URLError:
ztstatus=404
else:
ztstatus=req.getcode()
tick = datetime.datetime.now()
ticks=tick.strftime('%Y-%m-%d %H:%M')
if name[0]=="B" or name[0]=="T":
tingfang="AGQJ"
elif name[0]=="C" or name[0]=="V":
tingfang="AGGB"
elif name[0]=="D":
tingfang="AGEU"
else:
tingfang="NW"
insert(tingfang,name,url,ztstatus,ticks)
def Readjson(path):
#读取json视频列表为对象
try:
with open(path, 'r') as f:
list = json.load(f)
except FileNotFoundError as e:
return e,None
except json.decoder.JSONDecodeError as e:
return e,None
#拿到第一次嵌套的list
else:
#遍历拿到第二层嵌套的dict返回
ztname=[]
ztaddr=[]
for x in range(len(list)):
ztname.append(list[x][0])
ztaddr.append(list[x][1])
ztxx=[ztname,ztaddr]
return None,ztxx
if __name__ == "__main__":
#时间戳
tick = datetime.datetime.now()
ticks=tick.strftime('%Y-%m-%d %H:%M')
i=0
lock = threading.Lock()
path="/root/Python/Video_Monitor/video_list.json"
req=Readjson(path)
if req[0] == None:
ztaddr=req[1][1]
ztname=req[1][0]
threads=[]
for j in range(len(ztaddr)):
t=threading.Thread(target=hqxrdb,args=(ztaddr,ztname))
threads.append(t)
for thr in threads:
thr.start()
for thr in threads:
if thr.is_alive():
thr.join()
print(("总耗时 %s 秒" % (datetime.datetime.now()-tick).seconds))
else:
print(req[0])
|
evaluation.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pickle
from queue import Queue
import os
import sys
from threading import Thread, stack_size as set_thread_stack_size
from mathics import settings
from mathics.core.expression import ensure_context, KeyComparable
FORMATS = ['StandardForm', 'FullForm', 'TraditionalForm',
'OutputForm', 'InputForm',
'TeXForm', 'MathMLForm',
'MatrixForm', 'TableForm']
class EvaluationInterrupt(Exception):
pass
class AbortInterrupt(EvaluationInterrupt):
pass
class TimeoutInterrupt(EvaluationInterrupt):
pass
class ReturnInterrupt(EvaluationInterrupt):
def __init__(self, expr):
self.expr = expr
class BreakInterrupt(EvaluationInterrupt):
pass
class ContinueInterrupt(EvaluationInterrupt):
pass
def _thread_target(request, queue):
try:
result = request()
queue.put((True, result))
except BaseException:
exc_info = sys.exc_info()
queue.put((False, exc_info))
# MAX_RECURSION_DEPTH gives the maximum value allowed for $RecursionLimit. it's usually set to its
# default settings.DEFAULT_MAX_RECURSION_DEPTH.
MAX_RECURSION_DEPTH = max(settings.DEFAULT_MAX_RECURSION_DEPTH, int(os.getenv(
'MATHICS_MAX_RECURSION_DEPTH', settings.DEFAULT_MAX_RECURSION_DEPTH)))
def python_recursion_depth(n):
# convert Mathics recursion depth to Python recursion depth. this estimates how many Python calls
# we need at worst to process one Mathics recursion.
return 200 + 30 * n
def python_stack_size(n): # n is a Mathics recursion depth
# python_stack_frame_size is the (maximum) number of bytes Python needs for one call on the stack.
python_stack_frame_size = 512 # value estimated experimentally
return python_recursion_depth(n) * python_stack_frame_size
def set_python_recursion_limit(n):
"Sets the required python recursion limit given $RecursionLimit value"
python_depth = python_recursion_depth(n)
sys.setrecursionlimit(python_depth)
if sys.getrecursionlimit() != python_depth:
raise OverflowError
def run_with_timeout_and_stack(request, timeout):
'''
interrupts evaluation after a given time period. provides a suitable stack environment.
'''
# only use set_thread_stack_size if max recursion depth was changed via the environment variable
# MATHICS_MAX_RECURSION_DEPTH. if it is set, we always use a thread, even if timeout is None, in
# order to be able to set the thread stack size.
if MAX_RECURSION_DEPTH > settings.DEFAULT_MAX_RECURSION_DEPTH:
set_thread_stack_size(python_stack_size(MAX_RECURSION_DEPTH))
elif timeout is None:
return request()
queue = Queue(maxsize=1) # stores the result or exception
thread = Thread(target=_thread_target, args=(request, queue))
thread.start()
thread.join(timeout)
if thread.is_alive():
raise TimeoutInterrupt()
success, result = queue.get()
if success:
return result
else:
raise result[0].with_traceback(result[1], result[2])
class Out(KeyComparable):
def __init__(self):
self.is_message = False
self.is_print = False
self.text = ''
def get_sort_key(self):
(self.is_message, self.is_print, self.text)
class Message(Out):
def __init__(self, symbol, tag, text):
super(Message, self).__init__()
self.is_message = True
self.symbol = symbol
self.tag = tag
self.text = text
def __str__(self):
return '{}::{}: {}'.format(self.symbol, self.tag, self.text)
def __eq__(self, other):
return self.is_message == other.is_message and self.text == other.text
def get_data(self):
return {
'message': True,
'symbol': self.symbol,
'tag': self.tag,
'prefix': '%s::%s' % (self.symbol, self.tag),
'text': self.text,
}
class Print(Out):
def __init__(self, text):
super(Print, self).__init__()
self.is_print = True
self.text = text
def __str__(self):
return self.text
def __eq__(self, other):
return self.is_message == other.is_message and self.text == other.text
def get_data(self):
return {
'message': False,
'text': self.text,
}
class Result(object):
def __init__(self, out, result, line_no):
self.out = out
self.result = result
self.line_no = line_no
def get_data(self):
return {
'out': [out.get_data() for out in self.out],
'result': self.result,
'line': self.line_no,
}
class Output(object):
def max_stored_size(self, settings):
return settings.MAX_STORED_SIZE
def out(self, out):
pass
def clear(self, wait):
raise NotImplementedError
def display(self, data, metadata):
raise NotImplementedError
class Evaluation(object):
def __init__(self, definitions=None,
output=None, format='text', catch_interrupt=True):
from mathics.core.definitions import Definitions
if definitions is None:
definitions = Definitions()
self.definitions = definitions
self.recursion_depth = 0
self.timeout = False
self.stopped = False
self.out = []
self.output = output if output else Output()
self.listeners = {}
self.options = None
self.predetermined_out = None
self.quiet_all = False
self.format = format
self.catch_interrupt = catch_interrupt
def parse(self, query):
'Parse a single expression and print the messages.'
from mathics.core.parser import SingleLineFeeder
return self.parse_feeder(SingleLineFeeder(query))
def parse_evaluate(self, query, timeout=None):
expr = self.parse(query)
if expr is not None:
return self.evaluate(expr, timeout)
def parse_feeder(self, feeder):
'Parse a single expression from feeder and print the messages.'
from mathics.core.parser import parse, TranslateError
try:
result = parse(self.definitions, feeder)
except TranslateError as exc:
self.recursion_depth = 0
self.stopped = False
result = None
feeder.send_messages(self)
return result
def evaluate(self, query, timeout=None):
'Evaluate an expression.'
from mathics.core.expression import Symbol, Expression
from mathics.core.rules import Rule
self.recursion_depth = 0
self.timeout = False
self.stopped = False
line_no = self.definitions.get_line_no()
line_no += 1
self.definitions.set_line_no(line_no)
history_length = self.definitions.get_history_length()
result = None
exc_result = None
def check_io_hook(hook):
return len(self.definitions.get_ownvalues(hook))>0
def evaluate():
if history_length > 0:
self.definitions.add_rule('In', Rule(
Expression('In', line_no), query))
if check_io_hook('System`$Pre'):
result = Expression('System`$Pre', query).evaluate(self)
else:
result = query.evaluate(self)
if check_io_hook('System`$Post'):
result = Expression('System`$Post', result).evaluate(self)
if history_length > 0:
if self.predetermined_out is not None:
out_result = self.predetermined_out
self.predetermined_out = None
else:
out_result = result
stored_result = self.get_stored_result(out_result)
self.definitions.add_rule('Out', Rule(
Expression('Out', line_no), stored_result))
if result != Symbol('Null'):
if check_io_hook('System`$PrePrint'):
result = Expression('System`$PrePrint', result).evaluate(self)
return self.format_output(result, self.format)
else:
return None
try:
try:
result = run_with_timeout_and_stack(evaluate, timeout)
except KeyboardInterrupt:
if self.catch_interrupt:
exc_result = Symbol('$Aborted')
else:
raise
except ValueError as exc:
text = str(exc)
if (text == 'mpz.pow outrageous exponent' or # noqa
text == 'mpq.pow outrageous exp num'):
self.message('General', 'ovfl')
exc_result = Expression('Overflow')
else:
raise
except OverflowError:
self.message('General', 'ovfl')
exc_result = Expression('Overflow')
except BreakInterrupt:
self.message('Break', 'nofdw')
exc_result = Expression('Hold', Expression('Break'))
except ContinueInterrupt:
self.message('Continue', 'nofdw')
exc_result = Expression('Hold', Expression('Continue'))
except TimeoutInterrupt:
self.stopped = False
self.timeout = True
self.message('General', 'timeout')
exc_result = Symbol('$Aborted')
except AbortInterrupt: # , error:
exc_result = Symbol('$Aborted')
except ReturnInterrupt as ret:
exc_result = ret.expr
if exc_result is not None:
self.recursion_depth = 0
if exc_result != Symbol('Null'):
result = self.format_output(exc_result, self.format)
result = Result(self.out, result, line_no)
self.out = []
finally:
self.stop()
history_length = self.definitions.get_history_length()
line = line_no - history_length
while line > 0:
unset_in = self.definitions.unset('In', Expression('In', line))
unset_out = self.definitions.unset('Out', Expression('Out', line))
if not (unset_in or unset_out):
break
line -= 1
return result
def get_stored_result(self, result):
from mathics.core.expression import Symbol
# Remove outer format
if result.has_form(FORMATS, 1):
result = result.leaves[0]
# Prevent too large results from being stored, as this can exceed the
# DB's max_allowed_packet size
max_stored_size = self.output.max_stored_size(settings)
if max_stored_size is not None:
try:
data = pickle.dumps(result)
if len(data) > max_stored_size:
return Symbol('Null')
except (ValueError, pickle.PicklingError):
return Symbol('Null')
return result
def stop(self):
self.stopped = True
def format_output(self, expr, format=None):
if format is None:
format = self.format
if isinstance(format, dict):
return dict((k, self.format_output(expr, f)) for k, f in format.items())
from mathics.core.expression import Expression, BoxError
if format == 'text':
result = expr.format(self, 'System`OutputForm')
elif format == 'xml':
result = Expression(
'StandardForm', expr).format(self, 'System`MathMLForm')
elif format == 'tex':
result = Expression('StandardForm', expr).format(
self, 'System`TeXForm')
else:
raise ValueError
try:
boxes = result.boxes_to_text(evaluation=self)
except BoxError:
self.message('General', 'notboxes',
Expression('FullForm', result).evaluate(self))
boxes = None
return boxes
def set_quiet_messages(self, messages):
from mathics.core.expression import Expression, String
value = Expression('List', *messages)
self.definitions.set_ownvalue('Internal`$QuietMessages', value)
def get_quiet_messages(self):
from mathics.core.expression import Expression
value = self.definitions.get_definition('Internal`$QuietMessages').ownvalues
if value:
try:
value = value[0].replace
except AttributeError:
return []
if not isinstance(value, Expression):
return []
return value.leaves
def message(self, symbol, tag, *args):
from mathics.core.expression import (String, Symbol, Expression,
from_python)
# Allow evaluation.message('MyBuiltin', ...) (assume
# System`MyBuiltin)
symbol = ensure_context(symbol)
quiet_messages = set(self.get_quiet_messages())
pattern = Expression('MessageName', Symbol(symbol), String(tag))
if pattern in quiet_messages or self.quiet_all:
return
# Shorten the symbol's name according to the current context
# settings. This makes sure we print the context, if it would
# be necessary to find the symbol that this message is
# attached to.
symbol_shortname = self.definitions.shorten_name(symbol)
if settings.DEBUG_PRINT:
print('MESSAGE: %s::%s (%s)' % (symbol_shortname, tag, args))
text = self.definitions.get_value(
symbol, 'System`Messages', pattern, self)
if text is None:
pattern = Expression('MessageName', Symbol('General'), String(tag))
text = self.definitions.get_value(
'System`General', 'System`Messages', pattern, self)
if text is None:
text = String("Message %s::%s not found." % (symbol_shortname, tag))
text = self.format_output(Expression(
'StringForm', text, *(from_python(arg) for arg in args)), 'text')
self.out.append(Message(symbol_shortname, tag, text))
self.output.out(self.out[-1])
def print_out(self, text):
from mathics.core.expression import from_python
text = self.format_output(from_python(text), 'text')
self.out.append(Print(text))
self.output.out(self.out[-1])
if settings.DEBUG_PRINT:
print('OUT: ' + text)
def error(self, symbol, tag, *args):
# Temporarily reset the recursion limit, to allow the message being
# formatted
self.recursion_depth, depth = 0, self.recursion_depth
try:
self.message(symbol, tag, *args)
finally:
self.recursion_depth = depth
raise AbortInterrupt
def error_args(self, symbol, given, *needed):
self.message_args(symbol, given, *needed)
raise AbortInterrupt
def message_args(self, symbol, given, *needed):
from mathics.core.expression import Symbol
if len(needed) == 1:
needed = needed[0]
if given > 1 and needed > 1:
self.message(symbol, 'argrx', Symbol(symbol), given, needed)
elif given == 1:
self.message(symbol, 'argr', Symbol(symbol), needed)
elif needed == 1:
self.message(symbol, 'argx', Symbol(symbol), given)
elif len(needed) == 2:
if given == 1:
self.message(symbol, 'argtu', Symbol(symbol), *needed)
else:
self.message(symbol, 'argt', Symbol(symbol), *needed)
else:
raise NotImplementedError
def check_stopped(self):
if self.stopped:
raise TimeoutInterrupt
def inc_recursion_depth(self):
self.check_stopped()
limit = self.definitions.get_config_value(
'$RecursionLimit', MAX_RECURSION_DEPTH)
if limit is not None:
if limit < 20:
limit = 20
self.recursion_depth += 1
if self.recursion_depth > limit:
self.error('$RecursionLimit', 'reclim', limit)
def dec_recursion_depth(self):
self.recursion_depth -= 1
def add_listener(self, tag, listener):
existing = self.listeners.get(tag)
if existing is None:
existing = self.listeners[tag] = []
existing.insert(0, listener)
def remove_listener(self, tag, listener):
self.listeners.get(tag).remove(listener)
def publish(self, tag, *args, **kwargs):
listeners = self.listeners.get(tag, [])
for listener in listeners:
if listener(*args, **kwargs):
break
|
bbuild.py
|
import sys
import os
import json
import subprocess
from queue import Queue
from threading import Thread
from threading import Lock
import locale
import traceback
#run %comspec% /k "D:\ProgramFiles\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvars64.bat" first!!
source_dirs=[]
bin_search_dirs=[]
root_modules=[]
compiler_path=""
outpath=""
bd_home=''
link_path=[]
prepared_mod=dict()
link_target=None
link_executable=False
runtime_lib_path=""
max_bin_timestamp =0
num_worker_threads = 1
thread_worker=None
link_cmd = ""
exe_postfix = ""
obj_postfix = '.o'
if os.name == 'nt':
exe_postfix = ".exe"
obj_postfix = '.obj'
class compile_worker:
RUNNING=0
DONE=1
ABORT=2
ABORT_DONE=3
def __init__(self):
self.q = Queue()
def worker():
while True:
cu = self.q.get()
try:
compile_module(cu.modu,cu.source_path,cu.is_main)
for dep_cu in cu.reverse_dependency:
if dep_cu.source_path and dep_cu.dependency_done():
#the module dep_cu depends on cu and it is a source code module,
#mark the current cu as done, and check if there are other dependencies
self.put(dep_cu)
cu.done=True
except Exception as e:
#print(traceback.format_exc())
print("\n"+str(e))
if num_worker_threads == 1:
print(traceback.format_exc())
self.state=compile_worker.ABORT
finally:
self.q.task_done()
for i in range(num_worker_threads):
t = Thread(target=worker)
t.daemon = True
t.start()
self.joiner=Thread(target=lambda:self.q.join())
self.state=compile_worker.RUNNING
def start_joiner(self):
self.joiner.start()
def join(self,timeout):
self.joiner.join(timeout)
if self.joiner.isAlive():
if self.state==compile_worker.RUNNING:
return compile_worker.RUNNING
if self.state==compile_worker.ABORT:
return compile_worker.ABORT
else:
raise RuntimeError("illegal state of compile_worker")
else:
if self.state==compile_worker.RUNNING:
self.state=compile_worker.DONE
if self.state==compile_worker.ABORT:
self.state=compile_worker.ABORT_DONE
return self.state
def put(self,cu):
if self.state!=compile_worker.ABORT and self.state!=compile_worker.ABORT_DONE:
self.q.put(cu)
class compile_unit:
def __init__(self,is_main,source_path,modu):
self.is_main=is_main
self.source_path=source_path
self.modu=modu
self.reverse_dependency=set()
self.dependency_cnt = 0
self.lock = Lock()
self.done = source_path is None
def add_reverse_dependency(self,cu):
self.reverse_dependency.add(cu)
#decrease dependency_cnt by 1, return true when this cu is ready to compile
def dependency_done(self):
self.lock.acquire()
self.dependency_cnt-=1
if self.dependency_cnt<0:
self.lock.release()
raise RuntimeError("Bad dependency_cnt")
ret = self.dependency_cnt==0
self.lock.release()
return ret
def add_dependency(self,cu):
if cu.source_path: #if the compile unit is a source file
cu.add_reverse_dependency(self)
self.dependency_cnt += 1
#if cu is a binary module, we don't need to wait for it, so just ignore the dependency
def update_max_bin_timestamp(fn):
global max_bin_timestamp
ts=os.path.getmtime(fn)
if ts>max_bin_timestamp:
max_bin_timestamp=ts
def get_next(idx,args):
if idx+1>=len(args):
raise RuntimeError("Expecting an argument after "+args[idx])
return (idx+1,args[idx+1])
def parse_args(args):
i=1
global source_dirs,bin_search_dirs,root_modules,outpath,link_target,link_executable,num_worker_threads,link_cmd
while i<len(args):
if args[i]=='-i' or args[i]=='--in-source':
i,v = get_next(i,args)
source_dirs.append(v)
elif args[i]=='-o' or args[i]=='--out':
i,outpath = get_next(i,args)
elif args[i]=='-bs' or args[i]=='--bin-search-path':
i,v = get_next(i,args)
bin_search_dirs.append(v)
elif args[i]=='-le' or args[i]=='--link-executable':
i,link_target = get_next(i,args)
link_executable=True
elif args[i]=='-lc' or args[i]=='--link-cmd':
i,link_cmd = get_next(i,args)
elif args[i]=='-j':
i,v = get_next(i,args)
num_worker_threads = int(v)
if num_worker_threads<1:
raise RuntimeError("Bad number of threads")
else:
if(args[i][0]=='-'):
raise RuntimeError("Unknown command "+args[i])
root_modules.append(args[i].split('.'))
i+=1
if len(root_modules)==0 :
raise RuntimeError("No root modules specified")
if len(outpath)==0 :
outpath='.'
bin_search_dirs.append(os.path.join(bd_home,"blib"))
if '.' not in source_dirs: source_dirs.append('.')
if '.' not in bin_search_dirs: bin_search_dirs.append('.')
def search_bin(modu):
for path in bin_search_dirs:
raw_path=os.path.join(path,*modu)
p = raw_path +".bmm"
if os.path.exists(p) and os.path.isfile(p) :
#we have found the binary file, but if it need updating?
src=search_src(modu)
if src: #if it is in the source, check if source is newer than binary
mtime_src=os.path.getmtime(src)
mtime_bin=os.path.getmtime(p)
if mtime_src > mtime_bin:
return #act as if we do not find the binary file
return raw_path
raw_path=os.path.join(outpath,*modu)
p = raw_path +".bmm"
if os.path.exists(p) and os.path.isfile(p) :
#we have found the binary file, but if it need updating?
src=search_src(modu)
if src: #if it is in the source, check if source is newer than binary
mtime_src=os.path.getmtime(src)
mtime_bin=os.path.getmtime(p)
if mtime_src > mtime_bin:
return #act as if we do not find the binary file
return raw_path
def search_src(modu):
for path in source_dirs:
raw_path=os.path.join(path,*modu)
p = raw_path +".bdm"
if os.path.exists(p) and os.path.isfile(p) :
return p
p = raw_path +".txt"
if os.path.exists(p) and os.path.isfile(p) :
return p
def parse_bmm_dependency(bmm_path,self_cu):
with open(bmm_path) as f:
data = json.load(f)
dependencies=data['Imports']
for dep in dependencies:
if dep[-1][0]==':' or dep[-1][0]=='*': #if it is a "name import"
dep.pop() #pop the imported name
self_cu.add_dependency(prepare_module(dep,False))
def create_dirs(root,modu):
dirname=os.path.join(root,*modu[:-1])
if not os.path.exists(dirname):
os.makedirs(dirname)
def prepare_module(modu,is_main):
tuple_modu=tuple(modu)
if tuple_modu in prepared_mod:
return prepared_mod[tuple_modu]
bmm=search_bin(modu)
cu=None
if bmm:
cu = compile_unit(False,None,tuple_modu)
prepared_mod[tuple_modu] = cu
update_max_bin_timestamp(bmm+".bmm")
link_path.append(bmm)
parse_bmm_dependency(bmm+".bmm", cu)
else:
src=search_src(modu)
if not src:
raise RuntimeError("Cannot resolve module dependency: " + ".".join(modu))
cu = compile_unit(is_main,src,tuple_modu)
prepared_mod[tuple_modu] = cu
update_max_bin_timestamp(src)
cmdarr=[compiler_path,'-i',src, "--print-import"]
cmd=" ".join(cmdarr)
print("Running command " + cmd)
proc = subprocess.Popen(cmdarr,stdout=subprocess.PIPE)
dependencies_list=[]
while True:
line = proc.stdout.readline().decode(locale.getpreferredencoding())
if line != '':
dependencies_list.append(line.rstrip())
else:
break
dependencies_list.pop() #delete the last element, which is the package name of the source itself
if proc.wait()!=0:
raise RuntimeError("Compile failed, exit code: "+ str(proc.returncode))
proc=None #hope to release resource for the process pipe
for depstr in dependencies_list:
dep=depstr.split(".")
if dep[-1][0]==':' or dep[-1][0]=='*': #if it is a "name import"
dep.pop() #delete the imported name
cu.add_dependency(prepare_module(dep,False))
create_dirs(outpath,modu)
return cu
def compile_module(modu,src,is_main):
outfile=os.path.join(outpath,*modu)
cmdarr=[compiler_path,'-i',src, "-o", outfile+ obj_postfix]
for bpath in bin_search_dirs:
cmdarr.append("-l")
cmdarr.append(bpath)
cmdarr.append("-l")
cmdarr.append(outpath)
if is_main:
cmdarr.append("-e")
print("Running command " + " ".join(cmdarr))
ret=subprocess.run(cmdarr)
if ret.returncode!=0:
raise RuntimeError("Compile failed")
link_path.append(outfile)
def init_path():
global bd_home,compiler_path
bd_home=os.environ.get('BIRDEE_HOME')
if not bd_home:
raise RuntimeError("The environment variable BIRDEE_HOME is not set")
compiler_path=os.path.join(bd_home,"bin","birdeec"+exe_postfix)
if not os.path.exists(compiler_path) or not os.path.isfile(compiler_path):
raise RuntimeError("Cannot find birdee compiler")
def link_msvc():
linker_path='link.exe'
msvc_command='''{} /OUT:"{}" /MANIFEST /NXCOMPAT /PDB:"{}" {} /DYNAMICBASE {} "kernel32.lib" "user32.lib" "gdi32.lib" "winspool.lib" "comdlg32.lib" "advapi32.lib" "shell32.lib" "ole32.lib" "oleaut32.lib" "uuid.lib" "odbc32.lib" "odbccp32.lib" /DEBUG /MACHINE:X64 /INCREMENTAL /SUBSYSTEM:CONSOLE /MANIFESTUAC:"level='asInvoker' uiAccess='false'" /ManifestFile:"{}" /ERRORREPORT:PROMPT /NOLOGO /TLBID:1 '''
runtime_lib_path = os.path.join(bd_home,"bin","BirdeeRuntime.lib")
pdb_path= os.path.splitext(link_target)[0]+".pdb"
obj_files='"{runtime_lib_path}"'.format(runtime_lib_path=runtime_lib_path)
for lpath in link_path:
lpath += obj_postfix
obj_files += ' "{lpath}"'.format(lpath=lpath)
cmd=msvc_command.format(linker_path,link_target,pdb_path,link_cmd,obj_files,link_target+".manifest")
print("Running command " + cmd)
ret=subprocess.run(cmd)
if ret.returncode!=0:
raise RuntimeError("Compile failed")
def link_gcc():
linker_path='gcc'
cmdarr = [linker_path,'-o',link_target, "-Wl,--start-group"]
runtime_lib_path = os.path.join(bd_home,"lib","libBirdeeRuntime.a")
cmdarr.append(runtime_lib_path)
for lpath in link_path:
lpath += obj_postfix
cmdarr.append(lpath)
cmdarr.append("-lgc")
cmdarr.append("-Wl,--end-group")
if len(link_cmd):
cmdarr.append(link_cmd)
print("Running command " + ' '.join(cmdarr))
ret=subprocess.run(cmdarr)
if ret.returncode!=0:
raise RuntimeError("Compile failed")
init_path()
parse_args(sys.argv)
root_modules.append(['birdee'])
file_cnt=0
for modu in root_modules:
prepare_module(modu,file_cnt==0)
file_cnt += 1
thread_worker=compile_worker()
for modu,cu in prepared_mod.items():
if cu.source_path and cu.dependency_cnt==0: #if a module is waiting for compiling and all dependencies are resolved
thread_worker.put(cu)
thread_worker.start_joiner()
while True:
status = thread_worker.join(0.1)
if status == compile_worker.ABORT:
print("Aborted, waiting for tasks completion")
while thread_worker.join(0.1)!=compile_worker.ABORT_DONE: pass
break
if status == compile_worker.ABORT_DONE:
print("Aborted")
break
if status == compile_worker.DONE:
break
for modu,cu in prepared_mod.items():
if not cu.done:
raise RuntimeError("The compile unit " + ".".join(cu.modu) + " is not compiled")
if link_executable and link_target:
if os.path.exists(link_target) and os.path.isfile(link_target) and os.path.getmtime(link_target)>max_bin_timestamp:
print("The link target is up to date")
else:
if os.name=='nt':
link_msvc()
else:
link_gcc()
|
MonoTime.py
|
# Copyright (c) 2006-2014 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from time import strftime, gmtime, localtime
from sippy_lite.Math.recfilter import recfilter
from sippy_lite.Time.clock_dtime import clock_getdtime, CLOCK_REALTIME, CLOCK_MONOTONIC
from threading import local
class MonoGlobals(local):
realt_flt = None
monot_max = None
def __init__(self):
realt = clock_getdtime(CLOCK_REALTIME)
self.monot_max = clock_getdtime(CLOCK_MONOTONIC)
self.realt_flt = recfilter(0.99, realt - self.monot_max)
class MonoTime(object):
monot = None
realt = None
globals = MonoGlobals()
def __init__(self, s = None, monot = None, realt = None, trust_realt = False):
if s != None:
parts = s.split('-', 1)
self.realt = float(parts[0])
if len(parts) == 1:
self.__initFromRealt()
else:
self.monot = float(parts[1])
return
if monot == None and realt == None:
if trust_realt:
raise TypeError('MonoTime.__init__: realt could not be None when trust_realt is set')
realt = clock_getdtime(CLOCK_REALTIME)
self.monot = clock_getdtime(CLOCK_MONOTONIC)
diff_flt = self.globals.realt_flt.apply(realt - self.monot)
if self.globals.monot_max < self.monot:
self.globals.monot_max = self.monot
self.realt = self.monot + diff_flt
return
if monot != None:
self.monot = monot
if realt != None:
self.realt = realt
else:
self.realt = monot + self.globals.realt_flt.lastval
return
self.realt = realt
self.__initFromRealt(trust_realt)
def __initFromRealt(self, trust_realt = False):
self.monot = self.realt - self.globals.realt_flt.lastval
if not trust_realt and self.monot > self.globals.monot_max:
monot_now = clock_getdtime(CLOCK_MONOTONIC)
if monot_now > self.globals.monot_max:
self.globals.monot_max = monot_now
self.monot = self.globals.monot_max
def getdiff(self):
return (self.realt - self.monot)
def __str__(self):
rstr = '%.6f-%.6f' % (self.realt, self.monot)
return (rstr)
def ftime(self, base = None):
if base != None:
realt = base.realt - (base.monot - self.monot)
else:
realt = self.realt
return strftime('%Y-%m-%d %H:%M:%S+00', gmtime(round(realt)))
def fptime(self, base = None):
if base != None:
realt = base.realt - (base.monot - self.monot)
else:
realt = self.realt
return '%s.%.3d' % (strftime('%d %b %H:%M:%S', localtime(realt)), \
(realt % 1) * 1000)
def frtime(self, base = None):
if base != None:
realt = base.realt - (base.monot - self.monot)
else:
realt = self.realt
gt = gmtime(realt)
day = strftime('%d', gt)
if day[0] == '0':
day = day[1]
return strftime('%%H:%%M:%%S.000 GMT %%a %%b %s %%Y' % day, gt)
def __add__(self, x):
if isinstance(x, MonoTime):
return (self.monot + x.monot)
return (self.monot + x)
def __sub__(self, x):
if isinstance(x, MonoTime):
return (self.monot - x.monot)
return (self.monot - x)
def __radd__(self, x):
if isinstance(x, MonoTime):
return (self.monot + x.monot)
return (self.monot + x)
def __rsub__(self, x):
if isinstance(x, MonoTime):
return (x.monot - self.monot)
return (x - self.monot)
def __cmp__(self, other):
if other == None:
return (1)
if isinstance(other, int):
otime = float(other)
elif isinstance(other, float):
otime = other
else:
otime = other.monot
return cmp(self.monot, otime)
def offsetFromNow(self):
now = clock_getdtime(CLOCK_MONOTONIC)
return (now - self.monot)
def getOffsetCopy(self, offst):
return self.__class__(monot = self.monot + offst, realt = self.realt + offst)
def offset(self, offst):
self.monot += offst
self.realt += offst
def getCopy(self):
return self.__class__(monot = self.monot, realt = self.realt)
class selftest(object):
mg1 = None
mg2 = None
def run_t1(self):
m = MonoTime()
self.mg1 = m.globals.realt_flt
def run_t2(self):
m = MonoTime()
self.mg2 = m.globals.realt_flt
def run(self):
for x in range (0, 100000):
m1 = MonoTime()
m2 = MonoTime()
if x == 0:
print m1, m2
print m1.ftime(), m2.ftime()
#print m1.getdiff() - m2.getdiff()
print m1, m2
print m1.ftime(), m2.ftime()
ms1 = str(m1)
ms2 = str(m2)
m3 = MonoTime(s = ms1)
m4 = MonoTime(s = ms2)
print m3, m4
print m3.ftime(), m4.ftime()
m5 = MonoTime(realt = m3.realt)
m6 = MonoTime(monot = m4.monot)
print m5.ftime(), m6.ftime()
print m5.globals.realt_flt == m1.globals.realt_flt
from threading import Thread
t1 = Thread(target = self.run_t1)
t2 = Thread(target = self.run_t2)
t1.start()
t2.start()
t1.join()
t2.join()
print(self.mg1 != self.mg2)
if __name__ == '__main__':
selftest().run()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum_ltc as electrum
from electrum_ltc.bitcoin import TYPE_ADDRESS
from electrum_ltc import WalletStorage, Wallet
from electrum_ltc_gui.kivy.i18n import _
from electrum_ltc.paymentrequest import InvoiceStore
from electrum_ltc.util import profiler, InvalidPassword
from electrum_ltc.plugins import run_hook
from electrum_ltc.util import format_satoshis, format_satoshis_plain
from electrum_ltc.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_ltc_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_ltc_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_ltc_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_ltc_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('garlium_ltc_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='garlium_ltc_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum_ltc.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum_ltc import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == b.get_name():
self.network.follow_chain(index)
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'garlicoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', 8)
return decimal_point_to_base_unit_name(decimal_point)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Garlium App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum_ltc.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('garlicoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum_ltc.transaction import Transaction
from electrum_ltc.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum_ltc.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum_ltc.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.garlium.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Garlium: Wallet not found or action needed. Launching install wizard')
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of garlium. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='garlium_ltc_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='garlium_ltc_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_ltc_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_ltc_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/garlium.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Garlium', message,
app_icon=icon, app_name='Garlium')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast_transaction(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
replication.py
|
from threading import Thread
from queue import Empty
# from posix_ipc import Semaphore, ExistentialError, O_CREAT
from time import sleep
from datetime import datetime
from ujson import loads, dumps
from bson import ObjectId
# from posix_ipc import BusyError
from .database import Database
#from server_protocol import Protocol
from .utils import size_gb, semaphore_path
SYNC_CMD = '@'
SYNC_KEY = 'K'
SYNC_DATA = 'D'
SYNC_NODE = 'N'
SYNC_TABLES = 'T'
SYNC_HELLO = 'H'
SYNC_ACK = 'A'
SYNC_NAK = 'Z'
SYNC_HWM = 'M'
SYNC_RECORD_NEW = 'N'
SYNC_RECORD_UPDATE = 'U'
# TODO: sync db structure (attributes and indexes)
# TODO: changes to attributes and indexes should be cause a sync
class ReplicationSync(object):
def __init__(self, node, peer, incoming, outgoing, path, logger):
"""
Setup replication between this node and our peer for the chosen database
:param database: database handle
:param peer: peer name
:param incoming: incoming messages from other peers
:param outgoing: outgoing messages to other peers
"""
self._debug = True
self._node = node
self._path = path
self._database = None
self._peer = peer
self._log = logger
self._incoming = incoming
self._outgoing = outgoing
self._syncing = False
self._hwm = None
self._finished = False
#self._handlers = {
# SYNC_HELLO: self.hello,
# SYNC_ACK: self.ack,
# SYNC_RECORD_NEW: self.record_new,
# SYNC_RECORD_UPDATE: self.record_update
#}
self._listener = Thread(target=self._protocol.start)
self._syncer = Thread(target=self._protocol.rsync)
self._listener.start()
self._listener.join()
if self._syncer.is_alive():
self._syncer.join()
def debug(self, text):
self._log('{}'.format(text))
# def listener(self):
# """
# Listen for new messages and dispatch via handler
# """
# self._protocol.start()
# self.debug('* Listener thread running for "{}:{}"'.format(self._node, self._peer.name))
# #self.send_hello()
# while True:
# try:
# obj = self._incoming.get(block=True, timeout=1.0)
# #print(">>", obj)
# if not obj:
# self.debug('* Incoming break signal')
# break
# self._protocol.handle(obj)
# # if obj[SYNC_CMD] == SYNC_HELLO:
# # self.hello(obj)
# # continue
# # elif obj[SYNC_CMD] == SYNC_ACK:
# # self.ack(obj)
# # continue
# # elif obj[SYNC_CMD] == SYNC_NAK:
# # self.debug('* DB sync is broken')
# # sleep(10)
# # break
# # elif obj[SYNC_CMD] == SYNC_HWM:
# # self.hwm(obj)
# # continue
# # batch = 0
# # beg = datetime.now()
# # with self._database.begin() as txn:
# # last = obj
# # while obj[SYNC_CMD] == SYNC_RECORD_NEW:
# # self.record_new(obj, txn)
# # batch += 1
# # if batch > 5000: break
# # try:
# # last = obj
# # obj = self._incoming.get(block=True, timeout=0.01)
# # except Empty:
# # break
# # except ConnectionResetError:
# # break
# # self.update_hwm(last, txn)
#
# except KeyboardInterrupt:
# self._finished = True
# except (EOFError, BrokenPipeError):
# break
# except Empty:
# continue
# # end = datetime.now()
# # span = end - beg
# # self.debug('+ Completed batch of "{}" @ {}/sec'.format(batch, int(batch/span.total_seconds())))
#
# #print("Batch=", batch, span, span.total_seconds(), 10*batch/span.total_seconds(), "/sec")
# #except Exception as e:
# ## self.debug('Receiver error> {} - {}'.format(e, obj))
# # break
#
# self._finished = True
# self.debug('* Listener thread terminated for "{}:{}"'.format(self._node, self._peer.name))
# def resync(self):
#
# with self._database.env.begin(write=True) as txn:
#
# with txn.cursor(db=self._database._binlog) as cursor:
# if cursor.last():
# self._hwm = cursor.key()
# else:
# self._hwm = str(ObjectId()).encode()
# txn.put(self._hwm, dumps({'txn': []}).encode(), db=self._database.binlog, append=False)
#
# for table_name in self._database.tables:
# table = self._database.table(table_name, txn=txn)
# self.debug('+ Begin Sync for table "{}"'.format(table_name))
# count = 0
# with txn.cursor(db=table._db) as cursor:
# move = cursor.first()
# while move:
# key, val = cursor.item()
# rec = {'txn': [{'tab': table_name, 'doc': loads(val.decode())}]}
# self.send_new(self._hwm, rec)
# move = cursor.next()
# count += 1
# self.debug('+ End Sync, copied {} records'.format(count))
# self.send_hwm(self._hwm)
# def syncer(self):
# """
# Continually push out updates to our peer when they happen
#
# Syncer Startup Logic
#
# Is incoming HWM == None?
# If it is, remote wants to do a full sync with this, which is valid only if we're empty ...
# If not len(db.tables) -> Resync
# Else, go into sleep mode until recovery complete
# Otherwise, set HWM ..
# If we can't, then the binlog is out of date and we need a resync so
# go into sleep mode until recovery complete
#
# Flush Semaphores
# Sync HWM to current
# Sync on Semaphore
#
# Recovery modes;
#
# a. Delete one db and retry
# b. Compare all records and update one from the other
# - this requires a last updated timestamp
#
# - Resync required a host level lock to prevent full sync from two peers ...
#
# """
#
# self.debug('* Syncer thread running for "{}:{}"'.format(self._node, self._peer.name))
# self.debug('* Syncer HWM = "{}"'.format(self._hwm))
#
# if not self._hwm:
# self.debug('* Initial synchronisation')
# self.resync()
# with self._database.env.begin() as txn:
# with txn.cursor(self._database._binlog) as cursor:
# if not cursor.set_key(self._hwm):
# self.debug('* Synchronisation lost - recovering')
# self.resync()
#
# semaphore = Semaphore(semaphore_path(self._path, self._peer.name))
# count = 0
# while semaphore.value:
# semaphore.acquire()
# count += 1
#
# self.debug('* Cleared "{}" old semaphores'.format(count))
#
# with self._database.env.begin() as txn:
# with txn.cursor(self._database._binlog) as cursor:
# if not cursor.set_key(self._hwm):
# self.debug('* Failed to sync BINLOG')
# return
# count = 0
# while cursor.next():
# key, val = cursor.item()
# self.send_new(key, loads(val.decode()))
# count += 1
# self._hwm = key
# self.debug('* Sync "{}" new items'.format(count))
#
# try:
# while not self._finished:
# try:
# semaphore.acquire(1.0)
# except BusyError:
# continue
#
# count = 0
# while True:
# with self._database.env.begin() as txn:
# with txn.cursor(self._database._binlog) as cursor:
# if not cursor.set_key(self._hwm):
# raise PyMambaBinlogCorrupt
# if not cursor.next():
# self.debug('* Semaphore was too quick!')
# break
# key, val = cursor.item()
# self.send_new(key, loads(val.decode()))
# count += 1
# if not self._database._semaphore.value:
# break
# self._database._semaphore.acquire()
# self._hwm = key
#
# except KeyboardInterrupt:
# self.debug('* Ctrl+c in syncer')
# self._finished = True
# except EOFError:
# self.debug('* Exit replication syncer')
# raise
#
# self._outgoing.put({})
# self.debug('* Syncer thread terminated for "{}:{}"'.format(self._node, self._peer.name))
# def send_hello(self):
# doc = {
# SYNC_CMD: SYNC_HELLO,
# SYNC_NODE: self._node,
# SYNC_TABLES: len(self._database.tables)
# }
# self._outgoing.put(doc)
# #print("HELLO:", doc)
#
# def hello(self, obj, txn=None):
# """
# Handle a hello request
# :param obj: incoming HELLO packet
# """
# with self._database.env.begin() as txn:
# key = '__hwm__{}__'.format(self._peer.name).encode()
# value = txn.get(key, default=None, db=self._database._metadata)
#
# #if obj[SYNC_TABLES] and not value:
# # self.send_nak()
# #else:
# self.debug('* Table length = {} and hwm = {}'.format(obj[SYNC_TABLES], value))
#
# self.send_ack(value)
#
# def send_new(self, key, val):
# doc = {
# SYNC_CMD: SYNC_RECORD_NEW,
# SYNC_KEY: key,
# SYNC_DATA: val,
# SYNC_NODE: self._node
# }
# self._outgoing.put(doc)
#
# def send_ack(self, value):
# doc = {
# SYNC_CMD: SYNC_ACK,
# SYNC_NODE: self._node,
# SYNC_KEY: value
# }
# self._outgoing.put(doc)
# #print("ACK:", doc)
#
# def send_nak(self):
# doc = {
# SYNC_CMD: SYNC_NAK,
# SYNC_NODE: self._node,
# }
# self._outgoing.put(doc)
#
# def send_hwm(self, value):
# doc = {
# SYNC_CMD: SYNC_HWM,
# SYNC_NODE: self._node,
# SYNC_KEY: value
# }
# self._outgoing.put(doc)
# #print("HWM:", doc)
#
#
# def ack(self, obj, txn=None):
# """
# (re)Initialise the syncer
# :param obj: an incoming RECORD_INIT packet
# """
# self._hwm = obj[SYNC_KEY]
# #self.debug('* Setting HWM to [{}]'.format(self._hwm))
# self._syncer.start()
#
# def hwm(self, obj):
# with self._database.env.begin(write=True) as txn:
# key = '__hwm__{}__'.format(self._peer.name).encode()
# txn.put(key, obj[SYNC_KEY], db=self._database._metadata)
#
# def record_new(self, obj, txn=None):
# """
# Handle a new incoming record
# """
# transactions = obj[SYNC_DATA]['txn']
# #
# # Make sure all the tables are open
# #
# for transaction in transactions:
# self._database.table(transaction['tab'], txn._txn)
#
# for transaction in transactions:
# table = self._database.table(transaction['tab'])
# doc = transaction['doc']
# txn.append(table, doc)
#
# def update_hwm(self, obj, txn):
# txn._replicated = True
# key = '__hwm__{}__'.format(self._peer.name).encode()
# txn._txn.put(key, obj[SYNC_KEY], db=self._database._metadata)
#
# def record_update(self, obj, txn):
# """
# Handle and update to a pre-existing record
# """
# pass
#
# class PyMambaBinlogCorrupt(Exception):
# pass
# def make_pass():
# """
# Make a single pass through this transaction
# """
# with self._database.env.begin() as txn:
# with txn.cursor(self._database._binlog) as cursor:
# if not self._hwm:
# move = cursor.first()
# else:
# if not cursor.set_key(self._hwm):
# raise PyMambaBinlogCorrupt
# move = cursor.next()
#
# count = 0
# if move:
# while True:
# key, val = cursor.item()
# print("<{}> <{}>".format(key, val))
# self.send_new(key, loads(val.decode()))
# count += 1
# if not cursor.next():
# break
# self._database._semaphore.acquire()
# self._hwm = key
# print("Batch=", count, move)
# return move
#
# We listen for _events until _finished
#
#
# last_transaction = None
# retry = 0
# semaphore = Semaphore(semaphore_path(self._path, self._peer.name))
# try:
# while not self._finished:
# try:
# semaphore.acquire(1.0)
# except BusyError:
# continue
# while True:
# with self._database.env.begin() as txn:
# with txn.cursor(self._database._binlog) as cursor:
# if not self._hwm:
# move = cursor.first()
# else:
# if not cursor.set_key(self._hwm):
# raise PyMambaBinlogCorrupt
# move = cursor.next()
# if move:
# key, val = cursor.item()
# self.send_new(key, loads(val.decode()))
# self._hwm = key
# break
# else:
# retry += 1
# print("Retry=", retry, "Value=", self._database._semaphore.value,self._database._semaphore.name)
# last_txnid = self._database._env.info()['last_txnid']
# if last_txnid != last_transaction:
# make_pass()
# last_transaction = last_txnid
# else:
# sleep(0.01)
# if self._outgoing.qsize():
# print("OLen=", self._outgoing.qsize())
#
# After we've quit, make sure we flush the pending queue
#
# while make_pass():
# sleep(0.01)
#
# Shut down the listener too ...
#
# self._outgoing.put({})
# self.debug('* Syncer thread terminated for "{}:{}"'.format(self._node, self._peer.name))
# except (KeyboardInterrupt, EOFError) as e:
# self.debug('* Exit replication syncer')
# raise
"""
if a.binlog.empty:
create dummy entry
if a.empty and b.empty:
if a.binlog.empty:
"""
# if not hwm:
# if not len(self._rs._database.tables) and siz:
# self.debug('Request for a full resync')
# else:
# self.debug('Everyone is empty, sync and start')
# with self._rs._database.env.begin(write=True) as txn:
# with txn.cursor(self._rs._database._binlog) as cursor:
# cursor.last()
# hwm = cursor.key()
# # set hwm to new entry
# self.send({
# PROTOCOL_CMD: PROTOCOL_HWM,
# PROTOCOL_VAL: hwm
# })
# # start syncer
# self._hwm = hwm
# self._rs._syncer.start()
#
# else:
# with self._rs._database.env.begin() as self._txn:
# with self._txn.cursor(self._rs._database._binlog) as cursor:
# if not cursor.set_key(hwm):
# self.debug('Binlog is out of sync - manual recovery required')
# else:
# count = 0
# self.flush()
# while cursor.next():
# count += 1
# hwm, val = cursor.item()
# self.send({
# PROTOCOL_CMD: PROTOCOL_NEW,
# PROTOCOL_KEY: hwm,
# PROTOCOL_VAL: loads(val.decode())
# })
# if count:
# self.debug('Sync "{}" new items'.format(count))
# self._hwm = hwm
# self._rs._syncer.start()
|
stellar_tidal_evolution.py
|
# Equations for tidal evolution of binary/planetary orbits
# From Hansen 2010, based on Eggleton et al. 1998
import threading
import numpy
import math
from scipy import integrate
from amuse.lab import *
from amuse.support import literature
#2010ApJ...723..285H
def sigma_planet():
sig_p = 3.4e-7
return 5.9e-54 * sig_p | units.g**-1 *units.cm**-2 * units.s**-1
#2010ApJ...723..285H
def sigma_star():
sig_s = 7.8e-8
#sig_s = 1e+9 # Earth
return 6.4e-59 * sig_s | units.g**-1 *units.cm**-2 * units.s**-1
def angular_frequency(Ms, Mp, a):
return (constants.G*(Ms+Mp)/(a**3))**(0.5)
def J_orb(Ms, Mp, a, e):
return Ms*Mp*(constants.G*a*(1-e**2)/(Ms + Mp))**0.5
def interp(x, *args, **kwds):
if type(x) in (float, int):
return numpy.interp([x], *args, **kwds).item()
else :
return numpy.interp(x, *args, **kwds)
class TidalEvolution(literature.LiteratureReferencesMixIn):
"""
Tidal evolution between a planet(esimal) and a star.
Based on
.. [#] ** 2010ApJ...723..285H
.. [#] **
"""
def __init__(self, central_particle=Particles()):
literature.LiteratureReferencesMixIn.__init__(self)
self.current_time = 0 | units.s
self.pericenter_interaction_factor = 4
self.central_particle = central_particle
if not hasattr(self.central_particle, "gyration_radius_sq"):
self.central_particle.gyration_radius_sq = 0.2
if hasattr(self.central_particle, "radius"):
self.central_particle.old_radius = self.central_particle.radius
self.orbiters = Particles(0)
if not hasattr(self.central_particle, "Omega"):
self.central_particle.Omega = 2.6e-6|units.s**-1
self.all_merged_orbiters = Particles()
@property
def particles(self):
return ParticlesSuperset([self.central_particle, self.orbiters])
def set_current_time(self, time):
self.current_time = time
def get_semimajor_axis(self):
return self.semimajor_axis
def get_eccentricity(self):
return self.eccentricity
def get_pericenter_interaction_factor(self):
return self.pericenter_interaction_factor
def orbital_evolution_time_scale(self):
e = self.orbiters[0].eccentricity
a = self.orbiters[0].semimajor_axis
m = self.orbiters[0].mass
r = self.orbiters[0].radius
M = self.central_particle[0].mass
R = self.central_particle[0].radius
O = self.central_particle[0].Omega
Op = angular_frequency(M, m, a)
dt = abs(e/self.edot_star(a, M, R, m, r, O, Op, e))
return dt
def add_particles(self, p):
self.orbiters.add_particles(p)
def delete_particles(self, p):
self.orbiters.removel_particles(p)
def evolve_model(self, time):
M = self.central_particle.mass
R = self.central_particle.radius
Os = self.central_particle.Omega
interacting_bodies = self.orbiters.select(lambda a, e: a*(1-e)<self.pericenter_interaction_factor*R,["semimajor_axis", "eccentricity"])
if len(interacting_bodies):
print "N tidal:", len(self.orbiters), "of which N=", len(interacting_bodies), "tidally interacting"
self.orbiters_with_error = Particles()
"""
#Interestingly enough, integrate.quadpack is not thread-safe.
# AvanE and SPZ, 7 Jan 2014
#
threads = []
nproc = 3
Nper_proc = max(1, len(interacting_bodies)/nproc)
print "npp=", Nper_proc
for offset in range(0, len(interacting_bodies), Nper_proc):
subset = interacting_bodies[offset:offset+Nper_proc].copy()
print "lss=", len(subset)
thread = threading.Thread(target=self.evolve_multiple_orbiters, args=[subset, time, M, R, Os])
threads.append(thread)
for ti in threads:
ti.start()
for ti in threads:
ti.join()
"""
for pi in interacting_bodies:
self.evolve_individual_orbiter(pi, time, M, R, Os)
print "Central_particle:", self.central_particle.Omega
self.current_time = time
if len(interacting_bodies):
print "Post tidal interaction", len(interacting_bodies)
if len(self.orbiters_with_error)>0:
print "Error in N=", len(self.orbiters_with_error), "orbiters."
print self.orbiters_with_error
merged_orbiters = interacting_bodies.select(lambda a, e, r: a*(1-e)<R+r,["semimajor_axis", "eccentricity", "radius"])
self.all_merged_orbiters = Particles()
if len(merged_orbiters)>0:
print "Merged orbiters N= ", len(merged_orbiters)
print merged_orbiters
self.all_merged_orbiters.add_particles(merged_orbiters-self.orbiters_with_error)
def contains_nan(self, dO):
has_nan = False
for xi in dO:
if math.isnan(xi):
has_nan = True
return has_nan
def evolve_multiple_orbiters(self, interacting_bodies, time, M, R, Os):
for pi in interacting_bodies:
print "ev=", pi.key
self.evolve_individual_orbiter(pi, time, M, R, Os)
def evolve_individual_orbiter(self, oi, time, M, R, Os):
dOmega = zero
m = oi.mass
r = oi.radius
current_time = self.current_time
while current_time<time:
#print "Time=", oi.name, current_time, oi.semi_major_axis, oi.eccentricity, self.central_particle.Omega
a = oi.semimajor_axis
e = oi.eccentricity
Op = angular_frequency(M, m, a)
dt = time-current_time
dt = min(dt, abs(e/self.edot_star(a, M, R, m, r, Os, Op, e)) )
t_end = current_time + dt
da = integrate.quad(lambda x: self.adot_star(a, M, R, m, r, Os, Op, e).value_in(units.RSun/units.s), current_time.value_in(units.s), t_end.value_in(units.s))
de = integrate.quad(lambda x: self.edot_star(a, M, R, m, r, Os, Op, e).value_in(units.s**-1), current_time.value_in(units.s), t_end.value_in(units.s))
dO = integrate.quad(lambda x: self.Omegadot_star(a, M, R, m, r, Os, Op, e).value_in(units.s**-2), current_time.value_in(units.s), t_end.value_in(units.s))
if self.contains_nan(da):
print "NAN's detected in da", da
self.orbiters_with_error.add_particles(oi.as_set())
break
if self.contains_nan(de):
print "NAN's detected, de", de
self.orbiters_with_error.add_particles(oi.as_set())
break
if self.contains_nan(dO):
print "NAN's detectedm dO", dO
self.orbiters_with_error.add_particles(oi.as_set())
break
oi.semimajor_axis += da[0] | units.RSun
oi.eccentricity += de[0]
if oi.eccentricity<0:
oi.eccentricity = 0
dOmega += dO[0] | units.s**-1
######oi.age += dt
current_time += dt
#print "Time=", oi.name, current_time, oi.semi_major_axis, oi.eccentricity, self.central_particle.Omega
self.central_particle.Omega += dOmega
# self.central_particle.Omega = self.central_particle.Omega * (self.central_particle.old_radius/self.central_particle.radius)**2
def J_star(self, Ms, Rs, Omega_s):
k2s = self.central_particle[0].gyration_radius_sq
Is = k2s*Ms*Rs*Rs
return Is*Omega_s
def J_planet(self, Mp, Rp, k2p, Omega_p):
k2p = 0.2 # depends on the planet
Ip = k2p*Mp*Rp*Rp
return Ip*Omega_p
# timescale for particle with mass Mb
def tidal_timescale(self, Ma, Mb, Rb, a, sigma):
denominator = (9*Ma*(Ma + Mb)*(Rb**10)*sigma)
t_tidal = float("infinity") | units.s
if not denominator==zero:
t_tidal = Mb*(a**8) / denominator
return t_tidal
def Tp(self, Ms, Mp, Rp, a):
return self.tidal_timescale(Ms, Mp, Rp, a, sigma_planet())
def Ts(self, Ms, Mp, Rs, a):
return self.tidal_timescale(Mp, Ms, Rs, a, sigma_star())
def adot_planet(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_p = self.Tp(Ms, Mp, Rp, a)
omega = angular_frequency(Ms, Mp, a)
return -(a/T_p) * (self.f1(e) - Omega_p/omega * self.f2(e))
def adot_star(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_s = self.Ts(Ms, Mp, Rs, a)
omega = angular_frequency(Ms, Mp, a)
adot = -(a/T_s) * (self.f1(e) - Omega_s/omega * self.f2(e))
return adot
def edot_planet(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_p = self.Tp(Ms, Mp, Rp, a)
omega = angular_frequency(Ms, Mp, a)
return -9./2. * e/T_p * (self.f3(e) - 11./18. * Omega_p/omega * self.f4(e))
def edot_star(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_s = self.Ts(Ms, Mp, Rs, a)
omega = angular_frequency(Ms, Mp, a)
edot = -9./2. * e/T_s * (self.f3(e) - 11./18. * Omega_s/omega * self.f4(e))
return edot
def Omegadot_planet(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_p = self.Tp(Ms, Mp, Rp, a)
omega = angular_frequency(Ms, Mp, a)
gamma = J_orb(Ms, Mp, a, e)/self.J_planet(Mp, Rp, Omega_p)
return gamma/2. * Omega_p/T_p * (self.f5(e) - Omega_p/omega * self.f6(e))
def Omegadot_star(self, a, Ms, Rs, Mp, Rp, Omega_s, Omega_p, e):
T_s = self.Ts(Ms, Mp, Rs, a)
omega = angular_frequency(Ms, Mp, a)
gamma = J_orb(Ms, Mp, a, e)/self.J_star(Ms, Rs, Omega_s)
return gamma/2. * Omega_s/T_s * (self.f5(e) - Omega_s/omega * self.f6(e))
def f1(self, e):
res = 1
if (e > 0):
res = (1 + 31./2. *e*e + 255./8. *(e**4) + 185./16. *(e**6) + 25./64. *(e**8)) \
/math.pow((1 - e*e), 7.5)
return res
def f2(self, e):
res = 1
if (e > 0):
res = (1 + 15./2. *e*e + 45./8. *math.pow(e,4) + 5./16. *math.pow(e, 6)) \
/math.pow((1 - e*e), 6)
return res
def f3(self, e):
res = 1
if (e > 0):
res = (1 + 15./4. * e*e + 15./8. *math.pow(e,4) + 5./64. *math.pow(e, 6)) \
/math.pow((1 - e*e), 6.5)
return res
def f4(self, e):
res = 1
if (e > 0):
res = (1 + 3./2. *e*e + 1./8. *math.pow(e,4)) /math.pow((1 - e*e), 5)
return res
def f5(self, e):
res = 1
if (e > 0):
res = (1 + 15./2. * e*e + 45./8. *math.pow(e,4) + 5./16. *math.pow(e, 6)) \
/math.pow((1 - e*e), 6.5)
return res
def f6(self, e):
res = 1
if (e > 0):
res = (1 + 3 * e*e + 3./8. *math.pow(e,4)) /math.pow((1 - e*e), 5)
return res
#import unittest
from amuse.test.amusetest import TestCase
class TestTidalInteraction(TestCase):
def test_remove_orbiters(self):
M = 1 | units.MSun
m = 1 | units.MJupiter
a = 1 | units.AU
e = 0.99
Omega_s = 2.6e-6|units.s**-1
star = Particles(1)
star.mass = M
star.Omega = Omega_s
star.stellar_type = 1|units.stellar_type
stellar = SeBa()
stellar.particles.add_particles(star)
channel_from_se_to_framework = stellar.particles.new_channel_to(star)
channel_from_se_to_framework.copy_attributes(["age", "mass", "radius", "luminosity", "temperature", "stellar_type"])
tidal = TidalEvolution(star)
planet = Particles(2)
planet.mass = [1, 100] * m
planet.radius = [0.001, 0.001] |units.RSun
planet.semimajor_axis = [1, 5.2] * a
planet.eccentricity = [e, 0.1]
tidal.add_particles(planet)
channel_from_tc_to_framework = tidal.central_particle.new_channel_to(star)
channel_from_to_to_framework = tidal.orbiters.new_channel_to(planet)
channel_from_framework_to_tc = star.new_channel_to(tidal.central_particle)
channel_from_framework_to_to = planet.new_channel_to(tidal.orbiters)
dt = 1|units.Myr
time = 0*dt
He_WD = 10 | units.stellar_type
while star.stellar_type<He_WD:
print "T=", stellar.model_time
stellar.particles.evolve_one_step()
time = stellar.particles.age
adiabatic_expansion_factor = star[0].mass/stellar.particles[0].mass
tidal.central_particle.mass = stellar.particles[0].mass
star[0].Omega = star[0].Omega * (star[0].radius/stellar.particles[0].radius)**2
planet.semimajor_axis *= adiabatic_expansion_factor
channel_from_se_to_framework.copy_attributes(["age", "mass", "radius", "luminosity", "temperature", "stellar_type"])
channel_from_framework_to_tc.copy_attributes(["mass", "radius", "Omega"])
channel_from_framework_to_to.copy_attributes(["semimajor_axis"])
tidal.central_particle.Omega = star[0].Omega
tidal.central_particle.radius = star[0].radius
tidal.evolve_model(time)
channel_from_to_to_framework.copy_attributes(["semimajor_axis", "eccentricity"])
if len(tidal.orbiters_with_error)>0:
print "Remove orbiter with error:", len(tidal.orbiters_with_error)
tidal.orbiters.remove_particles(tidal.orbiters_with_error)
planet.remove_particle(tidal.orbiters_with_error)
if len(tidal.all_merged_orbiters)>0:
print "Merged planets/asteroids: N=", len(tidal.all_merged_orbiters)
print "removed:", tidal.all_merged_orbiters
tidal.orbiters.remove_particles(tidal.all_merged_orbiters)
planet.remove_particle(tidal.all_merged_orbiters)
print "Remaining orbiters:", tidal.orbiters
print "N=", len(tidal.orbiters)
self.assertEquals(len(tidal.orbiters), 1)
def tidal_interaction(M, m, a, e, Omega_s, tend):
star = Particles(1)
star.mass = M
star.Omega = Omega_s
star.stellar_type = 1|units.stellar_type
stellar = SeBa()
stellar.particles.add_particles(star)
channel_from_se_to_framework = stellar.particles.new_channel_to(star)
channel_from_se_to_framework.copy_attributes(["age", "mass", "radius", "luminosity", "temperature", "stellar_type"])
tidal = TidalEvolution(star)
planet = Particles(1)
planet.mass = 1*m
planet.radius = 0.001 |units.RSun
planet.semimajor_axis = 1.*a
planet.eccentricity = e
tidal.add_particles(planet)
channel_from_tc_to_framework = tidal.central_particle.new_channel_to(star)
channel_from_to_to_framework = tidal.orbiters.new_channel_to(planet)
channel_from_framework_to_tc = star.new_channel_to(tidal.central_particle)
channel_from_framework_to_to = planet.new_channel_to(tidal.orbiters)
# bodies = ParticlesSuperset([star, planet])
# dt = 1|units.Myr
dt = tidal.orbital_evolution_time_scale()
time = zero
while time<tend:
dt_se = stellar.particles[0].time_step
dt = min(dt, dt_se)
dt = max(1|units.Myr, dt)
print "dt_tidal=", dt
time += dt
stellar.evolve_model(time)
adiabatic_expansion_factor = star[0].mass/stellar.particles[0].mass
tidal.central_particle.mass = stellar.particles[0].mass
star[0].Omega = star[0].Omega * (star[0].radius/stellar.particles[0].radius)**2
#expand planetary orbit due to stellar mass loss
planet.semimajor_axis *= adiabatic_expansion_factor
channel_from_se_to_framework.copy_attributes(["age", "mass", "radius", "luminosity", "temperature", "stellar_type"])
channel_from_framework_to_tc.copy_attributes(["mass", "radius", "Omega"])
channel_from_framework_to_to.copy_attributes(["semimajor_axis"])
tidal.central_particle.Omega = star[0].Omega
tidal.central_particle.radius = star[0].radius
tidal.evolve_model(time)
channel_from_to_to_framework.copy_attributes(["semimajor_axis", "eccentricity"])#, "merged_with_central_star"])
if len(tidal.all_merged_orbiters)>0:
print "Merged planets/asteroids: N=", len(tidal.all_merged_orbiters)
print "removed:", tidal.all_merged_orbiters
tidal.orbiters.remove_particles(tidal.all_merged_orbiters)
planet.remove_particle(tidal.all_merged_orbiters)
print "Remaining orbiters:", tidal.orbiters
print "N=", len(tidal.orbiters)
if len(tidal.orbiters)==0:
return
print "current time=", tidal.current_time, tidal.orbiters.semimajor_axis, tidal.orbiters.eccentricity
def new_option_parser():
# from optparse import OptionParser
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-M", unit=units.MSun,
dest="M", type="float", default = 1|units.MSun,
help="stellar mass [%default]")
result.add_option("-m", unit=units.MJupiter,
dest="m", type="float", default = 0.001|units.MJupiter,
help="planet mass [%default]")
result.add_option("-a", unit=units.RSun,
dest="a", type="float", default = 1|units.RSun,
help="planet semi major axis [%default]")
result.add_option("-e",
dest="e", type="float", default = 0.6,
help="planet eccentricity [%default]")
result.add_option("-t", unit=units.Myr,
dest="tend", type="float", default = 1|units.Myr,
help="end time of integration [%default]")
result.add_option("-O", unit=units.s**-1,
dest="Omega_sun", type="float", default = 2.6e-6|units.s**-1,
help="Stellar angular something [%default]")
return result
if __name__ in ('__main__', '__plot__'):
set_printing_strategy("custom", #nbody_converter = converter,
preferred_units = [units.MSun, units.RSun, units.Myr],
precision = 11, prefix = "",
separator = " [", suffix = "]")
o, arguments = new_option_parser().parse_args()
tidal_interaction(o.M, o.m, o.a, o.e, o.Omega_sun, o.tend)
|
sawppy.py
|
import datetime
import json
import logging
import sys
import threading
import time
if (sys.version_info > (3, 0)):
import urllib.request as urllib2
from urllib.parse import urlencode
else:
import urllib2 # pylint: disable=import-error
from urllib import urlencode
log = logging.getLogger('RemoTV.hardware.sawppy')
sawppy = None
def setup(robot_config):
global sawppy
sawppy = Sawppy(robot_config)
def move(args):
global log
global sawppy
try:
sawppy.handle_input(args)
except Exception as e:
log.critical("Sawppy Error: " + str(e))
class Sawppy:
def __init__(self, robot_config):
self.motor_time = 0.5
if robot_config.has_option('sawppy', 'motor_time'):
self.motor_time = robot_config.getfloat('sawppy', 'motor_time')
self.driving_speed = 0.6
if robot_config.has_option('sawppy', 'driving_speed'):
self.driving_speed = robot_config.getfloat('sawppy', 'driving_speed')
self.turn_angle = 0.4
if robot_config.has_option('sawppy', 'turn_angle'):
self.turn_angle = robot_config.getfloat('sawppy', 'turn_angle')
self.user_slice = 60
if robot_config.has_option('sawppy', 'user_slice'):
self.user_slice = robot_config.getfloat('sawppy', 'user_slice')
log.critical("Sawppy: speed= %f angle= %f motor_time= %f user_slice= %f" % (self.driving_speed, self.turn_angle, self.motor_time, self.user_slice))
self.owners = robot_config.get('robot', 'owner').split(',')
self.last_command_time = datetime.datetime.utcnow()
self.last_command_user = None
self.prev_command_user = None
self.last_user_time = datetime.datetime.utcnow()
self.stopped = False
self.lock = threading.Lock()
self.magnitude = 0.0
self.angle = 0.0
# Start the main_loop() in a separate thread
self.tsk = threading.Thread(target = self.main_loop)
self.tsk.daemon = True
self.tsk.start()
log.debug("Sawppy Activated")
def handle_input(self, args):
t = datetime.datetime.utcnow()
user = args['user']['username']
if (t - self.last_user_time).total_seconds > self.user_slice:
self.prev_command_user = self.last_command_user
self.last_command_user = None
if user in self.owners:
log.debug("Sawppy owner (%s) took over" % user)
self.prev_command_user = None
self.last_command_user = user
self.last_user_time = t
if self.last_command_user == None:
if user == self.prev_command_user:
if (t - self.last_user_time).total_seconds <= self.user_slice / 3:
return
self.last_command_user = user
self.last_user_time = t
if self.last_command_user == user:
command = args['button']['command']
log.debug("Sawppy Got command %s from %s" % (command, user))
with self.lock:
if command == 'forward':
self.magnitude = self.driving_speed
self.angle = 0.0
elif command == 'forwardleft':
self.magnitude = self.driving_speed
self.angle = -self.turn_angle
elif command == 'forwardright':
self.magnitude = self.driving_speed
self.angle = self.turn_angle
elif command == 'left':
self.magnitude = 0.15
self.angle = -1.0
elif command == 'right':
self.magnitude = 0.15
self.angle = 1.0
elif command == 'reverse':
self.magnitude = -self.driving_speed
self.angle = 0.0
elif command == 'reverseleft':
self.magnitude = -self.driving_speed
self.angle = -self.turn_angle
elif command == 'reverseright':
self.magnitude = -self.driving_speed
self.angle = self.turn_angle
elif command == 'stop':
self.magnitude = 0.0
self.angle = 0.0
self.last_command_time = datetime.datetime.utcnow()
def main_loop(self):
self.running = True
log.debug("Sawppy Loop starting!")
send_last_command = datetime.datetime.utcnow()
try:
while self.running:
with self.lock:
lc = self.last_command_time
m = self.magnitude
a = self.angle
s = self.stopped
t = datetime.datetime.utcnow()
# log.critical("main_loop send_last_command= %s lc = %s (%f), m= %f, a= %f, s= %d, t= %s" % (str(send_last_command), str(lc), (send_last_command - lc).total_seconds(), m, a, s, str(t)))
if (send_last_command - lc).total_seconds() < 0.0:
# log.critical("main_loop ===> m= %f, a= %f" % (m, a))
self.drive(m, a)
send_last_command = t
elif (t - send_last_command).total_seconds() > self.motor_time and not s:
# log.critical("main_loop timeout !! stop")
self.drive(0, 0)
if self.last_command_user != None and (t - self.last_user_time).total_seconds() > self.user_slice:
# log.critical("user switch")
self.last_command_user = None
time.sleep(0.05)
except Exception as e:
print("Sawppy Loop Error: " + str(e))
log.critical("Sawppy Loop Error: " + str(e))
def drive(self, speed, angle):
data = urlencode({ 'magnitude': int(speed * 100), 'pct_angle': int(angle * 100)})
# log.critical("mw data= " + data)
req = urllib2.Request('http://127.0.0.1:5000/drive_command', data = data)
try:
f = urllib2.urlopen(req)
except Exception as e:
print("Sawppy drive Error: " + str(e))
with self.lock:
if speed == 0.0:
self.stopped = True
else:
self.stopped = False
|
pyshell.py
|
#! /usr/bin/env python3
import sys
if __name__ == "__main__":
sys.modules['idlelib.pyshell'] = sys.modules['__main__']
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
try:
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (ImportError, AttributeError, OSError):
pass
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
from textwrap import TextWrapper
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, PseudoInputFile, PseudoOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import iomenu
# try:
# source = source.encode(iomenu.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Connection Error",
"IDLE's subprocess didn't make connection.\n"
"See the 'Startup failure' section of the IDLE doc, online at\n"
"https://docs.python.org/3/library/idle.html#startup-failure",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# Extend right-click context menu
rmenu_specs = OutputWindow.rmenu_specs + [
("Squeeze", "<<squeeze-current-text>>"),
]
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
squeezer = self.Squeezer(self)
text.bind("<<squeeze-current-text>>",
squeezer.squeeze_current_text_event)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = PseudoInputFile(self, "stdin", iomenu.encoding)
self.stdout = PseudoOutputFile(self, "stdout", iomenu.encoding)
self.stderr = PseudoOutputFile(self, "stderr", iomenu.encoding)
self.console = PseudoOutputFile(self, "console", iomenu.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def show_warning(self, msg):
width = self.interp.tkconsole.width
wrapper = TextWrapper(width=width, tabsize=8, expand_tabs=True)
wrapped_msg = '\n'.join(wrapper.wrap(msg))
if not wrapped_msg.endswith('\n'):
wrapped_msg += '\n'
self.per.bottom.insert("iomark linestart", wrapped_msg, "stderr")
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif not macosx.isAquaTk():
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic issues and print warning message(s) in
# the IDLE shell window; this is less intrusive than always
# opening a separate window.
# Warn if using a problematic OS X Tk version.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.show_warning(tkversionwarning)
# Warn if the "Prefer tabs when opening documents" system
# preference is set to "Always".
prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning()
if prefer_tabs_preference_warning:
shell.show_warning(prefer_tabs_preference_warning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
app.py
|
#############################################################################
# Copyright (c) 2018, Voilà Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
from zmq.eventloop import ioloop
import gettext
import io
import json
import logging
import threading
import tempfile
import os
import shutil
import signal
import socket
import webbrowser
import errno
import random
try:
from urllib.parse import urljoin
from urllib.request import pathname2url
except ImportError:
from urllib import pathname2url
from urlparse import urljoin
import jinja2
import tornado.ioloop
import tornado.web
from traitlets.config.application import Application
from traitlets.config.loader import Config
from traitlets import Unicode, Integer, Bool, Dict, List, default
from jupyter_server.services.kernels.kernelmanager import AsyncMappingKernelManager
from jupyter_server.services.kernels.handlers import KernelHandler, ZMQChannelsHandler
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.base.handlers import FileFindHandler, path_regex
from jupyter_server.config_manager import recursive_update
from jupyter_server.utils import url_path_join, run_sync
from jupyter_server.services.config import ConfigManager
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_core.paths import jupyter_config_path, jupyter_path
from ipython_genutils.py3compat import getcwd
from .paths import ROOT, STATIC_ROOT, collect_template_paths, collect_static_paths
from .handler import VoilaHandler
from .treehandler import VoilaTreeHandler
from ._version import __version__
from .static_file_handler import MultiStaticFileHandler, WhiteListFileHandler
from .configuration import VoilaConfiguration
from .execute import VoilaExecutor
from .exporter import VoilaExporter
ioloop.install()
_kernel_id_regex = r"(?P<kernel_id>\w+-\w+-\w+-\w+-\w+)"
def _(x):
return x
class Voila(Application):
name = 'voila'
version = __version__
examples = 'voila example.ipynb --port 8888'
flags = {
'debug': ({'Voila': {'log_level': logging.DEBUG}}, _("Set the log level to logging.DEBUG")),
'no-browser': ({'Voila': {'open_browser': False}}, _('Don\'t open the notebook in a browser after startup.'))
}
description = Unicode(
"""voila [OPTIONS] NOTEBOOK_FILENAME
This launches a stand-alone server for read-only notebooks.
"""
)
option_description = Unicode(
"""
notebook_path:
File name of the Jupyter notebook to display.
"""
)
notebook_filename = Unicode()
port = Integer(
8866,
config=True,
help=_(
'Port of the Voilà server. Default 8866.'
)
)
autoreload = Bool(
False,
config=True,
help=_(
'Will autoreload to server and the page when a template, js file or Python code changes'
)
)
root_dir = Unicode(config=True, help=_('The directory to use for notebooks.'))
static_root = Unicode(
STATIC_ROOT,
config=True,
help=_(
'Directory holding static assets (HTML, JS and CSS files).'
)
)
aliases = {
'port': 'Voila.port',
'static': 'Voila.static_root',
'strip_sources': 'VoilaConfiguration.strip_sources',
'autoreload': 'Voila.autoreload',
'template': 'VoilaConfiguration.template',
'theme': 'VoilaConfiguration.theme',
'base_url': 'Voila.base_url',
'server_url': 'Voila.server_url',
'enable_nbextensions': 'VoilaConfiguration.enable_nbextensions'
}
classes = [
VoilaConfiguration,
VoilaExecutor,
VoilaExporter
]
connection_dir_root = Unicode(
config=True,
help=_(
'Location of temporry connection files. Defaults '
'to system `tempfile.gettempdir()` value.'
)
)
connection_dir = Unicode()
base_url = Unicode(
'/',
config=True,
help=_(
'Path for Voilà API calls. If server_url is unset, this will be \
used for both the base route of the server and the client. \
If server_url is set, the server will server the routes prefixed \
by server_url, while the client will prefix by base_url (this is \
useful in reverse proxies).'
)
)
server_url = Unicode(
None,
config=True,
allow_none=True,
help=_(
'Path to prefix to Voilà API handlers. Leave unset to default to base_url'
)
)
notebook_path = Unicode(
None,
config=True,
allow_none=True,
help=_(
'path to notebook to serve with Voilà'
)
)
template_paths = List(
[],
config=True,
help=_(
'path to jinja2 templates'
)
)
static_paths = List(
[STATIC_ROOT],
config=True,
help=_(
'paths to static assets'
)
)
port_retries = Integer(50, config=True,
help=_("The number of additional ports to try if the specified port is not available.")
)
ip = Unicode('localhost', config=True,
help=_("The IP address the notebook server will listen on."))
open_browser = Bool(True, config=True,
help=_("""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
"""))
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webbrowser_open_new = Integer(2, config=True,
help=_("""Specify Where to open the notebook on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""))
custom_display_url = Unicode(u'', config=True,
help=_("""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter notebook server (proxified
or containerized setups for example)."""))
@property
def display_url(self):
if self.custom_display_url:
url = self.custom_display_url
if not url.endswith('/'):
url += '/'
else:
if self.ip in ('', '0.0.0.0'):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
url = self._url(ip)
# TODO: do we want to have the token?
# if self.token:
# # Don't log full token if it came from config
# token = self.token if self._token_generated else '...'
# url = (url_concat(url, {'token': token})
# + '\n or '
# + url_concat(self._url('127.0.0.1'), {'token': token}))
return url
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
# TODO: https / certfile
# proto = 'https' if self.certfile else 'http'
proto = 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
config_file_paths = List(
Unicode(),
config=True,
help=_(
'Paths to search for voila.(py|json)'
)
)
tornado_settings = Dict(
{},
config=True,
help=_(
'Extra settings to apply to tornado application, e.g. headers, ssl, etc'
)
)
@default('config_file_paths')
def _config_file_paths_default(self):
return [os.getcwd()] + jupyter_config_path()
@default('connection_dir_root')
def _default_connection_dir(self):
connection_dir = tempfile.gettempdir()
self.log.info('Using %s to store connection files' % connection_dir)
return connection_dir
@default('log_level')
def _default_log_level(self):
return logging.INFO
# similar to NotebookApp, except no extra path
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
path = jupyter_path('nbextensions')
# FIXME: remove IPython nbextensions path after a migration period
try:
from IPython.paths import get_ipython_dir
except ImportError:
pass
else:
path.append(os.path.join(get_ipython_dir(), 'nbextensions'))
return path
@default('root_dir')
def _default_root_dir(self):
if self.notebook_path:
return os.path.dirname(os.path.abspath(self.notebook_path))
else:
return getcwd()
def initialize(self, argv=None):
self.log.debug("Searching path %s for config files", self.config_file_paths)
# to make config_file_paths settable via cmd line, we first need to parse it
super(Voila, self).initialize(argv)
if len(self.extra_args) == 1:
arg = self.extra_args[0]
# I am not sure why we need to check if self.notebook_path is set, can we get rid of this?
if not self.notebook_path:
if os.path.isdir(arg):
self.root_dir = arg
elif os.path.isfile(arg):
self.notebook_path = arg
else:
raise ValueError('argument is neither a file nor a directory: %r' % arg)
elif len(self.extra_args) != 0:
raise ValueError('provided more than 1 argument: %r' % self.extra_args)
# then we load the config
self.load_config_file('voila', path=self.config_file_paths)
# common configuration options between the server extension and the application
self.voila_configuration = VoilaConfiguration(parent=self)
self.setup_template_dirs()
signal.signal(signal.SIGTERM, self._handle_signal_stop)
def setup_template_dirs(self):
if self.voila_configuration.template:
template_name = self.voila_configuration.template
self.template_paths = collect_template_paths(['voila', 'nbconvert'], template_name, prune=True)
self.static_paths = collect_static_paths(['voila', 'nbconvert'], template_name)
conf_paths = [os.path.join(d, 'conf.json') for d in self.template_paths]
for p in conf_paths:
# see if config file exists
if os.path.exists(p):
# load the template-related config
with open(p) as json_file:
conf = json.load(json_file)
# update the overall config with it, preserving CLI config priority
if 'traitlet_configuration' in conf:
recursive_update(conf['traitlet_configuration'], self.voila_configuration.config.VoilaConfiguration)
# pass merged config to overall Voilà config
self.voila_configuration.config.VoilaConfiguration = Config(conf['traitlet_configuration'])
self.log.debug('using template: %s', self.voila_configuration.template)
self.log.debug('template paths:\n\t%s', '\n\t'.join(self.template_paths))
self.log.debug('static paths:\n\t%s', '\n\t'.join(self.static_paths))
if self.notebook_path and not os.path.exists(self.notebook_path):
raise ValueError('Notebook not found: %s' % self.notebook_path)
def _handle_signal_stop(self, sig, frame):
self.log.info('Handle signal %s.' % sig)
self.ioloop.add_callback_from_signal(self.ioloop.stop)
def start(self):
self.connection_dir = tempfile.mkdtemp(
prefix='voila_',
dir=self.connection_dir_root
)
self.log.info('Storing connection files in %s.' % self.connection_dir)
self.log.info('Serving static files from %s.' % self.static_root)
self.kernel_spec_manager = KernelSpecManager(
parent=self
)
self.kernel_manager = AsyncMappingKernelManager(
parent=self,
connection_dir=self.connection_dir,
kernel_spec_manager=self.kernel_spec_manager,
allowed_message_types=[
'comm_open',
'comm_close',
'comm_msg',
'comm_info_request',
'kernel_info_request',
'shutdown_request'
]
)
jenv_opt = {"autoescape": True} # we might want extra options via cmd line like notebook server
env = jinja2.Environment(loader=jinja2.FileSystemLoader(self.template_paths), extensions=['jinja2.ext.i18n'], **jenv_opt)
nbui = gettext.translation('nbui', localedir=os.path.join(ROOT, 'i18n'), fallback=True)
env.install_gettext_translations(nbui, newstyle=False)
self.contents_manager = LargeFileManager(parent=self)
# we create a config manager that load both the serverconfig and nbconfig (classical notebook)
read_config_path = [os.path.join(p, 'serverconfig') for p in jupyter_config_path()]
read_config_path += [os.path.join(p, 'nbconfig') for p in jupyter_config_path()]
self.config_manager = ConfigManager(parent=self, read_config_path=read_config_path)
# default server_url to base_url
self.server_url = self.server_url or self.base_url
self.app = tornado.web.Application(
base_url=self.base_url,
server_url=self.server_url or self.base_url,
kernel_manager=self.kernel_manager,
kernel_spec_manager=self.kernel_spec_manager,
allow_remote_access=True,
autoreload=self.autoreload,
voila_jinja2_env=env,
jinja2_env=env,
static_path='/',
server_root_dir='/',
contents_manager=self.contents_manager,
config_manager=self.config_manager
)
self.app.settings.update(self.tornado_settings)
handlers = []
handlers.extend([
(url_path_join(self.server_url, r'/api/kernels/%s' % _kernel_id_regex), KernelHandler),
(url_path_join(self.server_url, r'/api/kernels/%s/channels' % _kernel_id_regex), ZMQChannelsHandler),
(
url_path_join(self.server_url, r'/voila/static/(.*)'),
MultiStaticFileHandler,
{
'paths': self.static_paths,
'default_filename': 'index.html'
}
)
])
# Serving notebook extensions
if self.voila_configuration.enable_nbextensions:
handlers.append(
(
url_path_join(self.server_url, r'/voila/nbextensions/(.*)'),
FileFindHandler,
{
'path': self.nbextensions_path,
'no_cache_paths': ['/'], # don't cache anything in nbextensions
},
)
)
handlers.append(
(
url_path_join(self.server_url, r'/voila/files/(.*)'),
WhiteListFileHandler,
{
'whitelist': self.voila_configuration.file_whitelist,
'blacklist': self.voila_configuration.file_blacklist,
'path': self.root_dir,
},
)
)
tree_handler_conf = {
'voila_configuration': self.voila_configuration
}
if self.notebook_path:
handlers.append((
url_path_join(self.server_url, r'/(.*)'),
VoilaHandler,
{
'notebook_path': os.path.relpath(self.notebook_path, self.root_dir),
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}
))
else:
self.log.debug('serving directory: %r', self.root_dir)
handlers.extend([
(self.server_url, VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/tree' + path_regex),
VoilaTreeHandler, tree_handler_conf),
(url_path_join(self.server_url, r'/voila/render/(.*)'),
VoilaHandler,
{
'template_paths': self.template_paths,
'config': self.config,
'voila_configuration': self.voila_configuration
}),
])
self.app.add_handlers('.*$', handlers)
self.listen()
def stop(self):
shutil.rmtree(self.connection_dir)
run_sync(self.kernel_manager.shutdown_all())
def random_ports(self, port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def listen(self):
for port in self.random_ports(self.port, self.port_retries+1):
try:
self.app.listen(port)
self.port = port
self.log.info('Voilà is running at:\n%s' % self.display_url)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info(_('The port %i is already in use, trying another port.') % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warning(_("Permission to listen on port %i denied") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical(_('ERROR: the Voilà server could not be started because '
'no available port could be found.'))
self.exit(1)
if self.open_browser:
self.launch_browser()
self.ioloop = tornado.ioloop.IOLoop.current()
try:
self.ioloop.start()
except KeyboardInterrupt:
self.log.info('Stopping...')
finally:
self.stop()
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_('No web browser found: %s.') % e)
browser = None
if not browser:
return
uri = self.base_url
fd, open_file = tempfile.mkstemp(suffix='.html')
# Write a temporary file to open in the browser
with io.open(fd, 'w', encoding='utf-8') as fh:
# TODO: do we want to have the token?
# if self.token:
# url = url_concat(url, {'token': self.token})
url = url_path_join(self.connection_url, uri)
jinja2_env = self.app.settings['jinja2_env']
template = jinja2_env.get_template('browser-open.html')
fh.write(template.render(open_url=url, base_url=url))
def target():
return browser.open(urljoin('file:', pathname2url(open_file)), new=self.webbrowser_open_new)
threading.Thread(target=target).start()
main = Voila.launch_instance
|
multiprocessing_daemon_join_timeout.py
|
#
"""Daemon vs. non-daemon processes.
"""
# end_pymotw_header
import multiprocessing
import time
import sys
def daemon():
name = multiprocessing.current_process().name
print("Starting:", name)
time.sleep(2)
print("Exiting :", name)
def non_daemon():
name = multiprocessing.current_process().name
print("Starting:", name)
print("Exiting :", name)
if __name__ == "__main__":
d = multiprocessing.Process(name="daemon", target=daemon)
d.daemon = True
n = multiprocessing.Process(name="non-daemon", target=non_daemon)
n.daemon = False
d.start()
n.start()
d.join(1)
print("d.is_alive()", d.is_alive())
n.join()
|
sharedctypes.py
|
"""
"Multiprocessing" section example showing how
to use sharedctypes submodule to share data
between multiple processes.
"""
from multiprocessing import Process, Value, Array
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
if __name__ == "__main__":
num = Value("d", 0.0)
arr = Array("i", range(10))
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print(num.value)
print(arr[:])
|
create_test_products.py
|
#!/usr/bin/env python
"""
Stand-alone data generation routine that uses the ecommerce taxonomy found on
Google Base to generate a significant amount of category and product data, as
well as using the Flickr API to retrieve images for the products. The
multiprocessing module is also used for parallelization.
The Django models and environment used here are specific to the Cartridge
project but the approach could easily be reused with any ecommerce database.
"""
import sys
import os
path = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(path, "..", ".."))
os.environ["DJANGO_SETTINGS_MODULE"] = "cartridge.project_template.settings"
from multiprocessing import Process, Queue
from os.path import exists, join
from shutil import move
from sys import exit
from urllib import urlopen, urlretrieve
from django.contrib.webdesign.lorem_ipsum import paragraph
from django.db import connection
from django.db.models import F
from mezzanine.conf import settings
from mezzanine.core.models import CONTENT_STATUS_PUBLISHED
from cartridge.shop.models import Category, Product, ProductOption
try:
import flickr
except ImportError:
print "flickr.py must be installed from http://code.google.com/p/flickrpy/"
exit()
WORKERS = 10
image_dir = join(settings.STATIC_ROOT, "product")
queue = Queue()
product_options = {"Size": ("Small", "Medium", "Large"),
"Colour": ("Red", "Orange", "Yellow", "Green", "Blue", "Indigo", "Violet")}
def create_products(queue):
"""
Download an image from Flickr for the product on the queue and if
successful now or previously, create the applicable product records.
"""
# Close the connection for this process to avoid the issue discussed here:
# http://groups.google.com/group/django-users/
# browse_thread/thread/2c7421cdb9b99e48
connection.close()
product_options = ProductOption.objects.as_fields()
while True:
# Get next set of data from queue.
data = queue.get()
if data is None:
break
main_category, sub_category, product = data[0], data[1], data[-1]
# Try and download a product image from Flickr.
image = join(image_dir, "%s.jpg" % product)
if exists(image):
message = "Using already downloaded image for %s" % data
else:
try:
images = flickr.photos_search(tags=[product], per_page=1)
if not images:
raise Exception("No images found")
url = images[0].getURL(size="Large", urlType="source")
urlretrieve(url, image)
except Exception, e:
message = "Error [%s] for %s" % (e, data)
else:
message = "Successfully downloaded image for %s" % data
remaining = "%s remaining" % queue.qsize()
print remaining.ljust(20, "."), message
# Create database records for the product.
if exists(image):
product = Category.objects.get(parent__title=main_category,
title=sub_category).products.create(title=product,
available=True, status=CONTENT_STATUS_PUBLISHED,
content="<p>%s</p>" % paragraph())
image = "product/%s.jpg" % product.title
product.images.create(file=image)
product.variations.create_from_options(product_options)
product.variations.manage_empty()
product.variations.update(unit_price=F("id") + "10000")
product.variations.update(unit_price=F("unit_price") / "1000.0")
product.copy_default_variation()
if __name__ == "__main__":
# Load the Google Base data.
category_url = "http://www.google.com/basepages/producttype/taxonomy.txt"
try:
category_data = urlopen(category_url).read()
except Exception, e:
print "Failed to load category data: %s" % e
exit()
# Clear out the database, moving the product images to a temp location and
# restoring them so that they're not deleted.
print "Resetting product options"
ProductOption.objects.all().delete()
for type, name in settings.SHOP_OPTION_TYPE_CHOICES:
for name in product_options[unicode(name)]:
ProductOption.objects.create(type=type, name=name)
Category.objects.all().delete()
print "Deleting categories"
Category.objects.all().delete()
print "Backing up images"
move(image_dir, "tmp_products")
print "Deleting products"
Product.objects.all().delete()
print "Restoring images"
move("tmp_products", image_dir)
# Parse the category data into triples of main category, sub category and
# product, create the categories and put the triples onto the queue. The
# categories must be created here in a single process due to the non-atomic
# nature of Django's Model.objects.get_or_create()
print "Creating categories"
for line in category_data.split("\n"):
parts = line.split(" > ")
if len(parts) > 2:
if len(parts) == 3:
main_category, created = Category.objects.get_or_create(
title=parts[0], status=CONTENT_STATUS_PUBLISHED)
sub_category, created = Category.objects.get_or_create(
title=parts[1], status=CONTENT_STATUS_PUBLISHED,
parent=main_category)
queue.put(parts)
# Create worker processes and run the main function in them.
workers = []
for _ in range(WORKERS):
queue.put(None)
workers.append(Process(target=create_products, args=(queue,)))
for worker in workers:
worker.start()
for worker in workers:
worker.join()
|
PickleViewer.py
|
print("Loading Modules...")
import pickle
import os.path
import pprint
import ast
import sys
import threading
import uuid
import urllib.request
import configparser
import hashlib
import subprocess
import ctypes
import json
import time
import tkinter as tk
from tkinter import *
from tkinter import messagebox, filedialog, font, ttk
from TkSStausBar import StatusBar
from pygments.lexers.python import PythonLexer
from pygments.styles import get_style_by_name
# Checking if wait mode
wait_mode = False
update_mode = False
if "--wait" in sys.argv:
wait_mode = True
print("Wait mode enabled")
if "--update" in sys.argv:
update_mode = True
print("Update mode enabled")
# Initial sys Frozen
frozen = 'not'
bol_frozen = False
if getattr(sys, 'frozen', False):
# we are running in a bundle
frozen = 'bundle'
bol_frozen = True
bundle_dir = None
try:
bundle_dir = sys._MEIPASS
print('bundle dir is', bundle_dir)
except BaseException as e:
print("Error while getting bundle dir!", e)
messagebox.showerror("Bundle Error", "Error while getting bundle dir! " + str(e))
# print('sys.argv[0] is', sys.argv[0])
# print('sys.argv[1] is', sys.argv[1])
input("Press enter to continue...") if wait_mode else False
else:
# we are running in a normal Python environment
bundle_dir = os.path.dirname(os.path.abspath(__file__))
print('PickleViewer v0.7.6 is', frozen, 'frozen')
SW_HIDE = 0
SW_SHOW = 0
# Hide Console
if sys.platform=='win32':
kernel32 = ctypes.WinDLL('kernel32')
user32 = ctypes.WinDLL('user32')
hWnd = kernel32.GetConsoleWindow()
if hWnd:
user32.ShowWindow(hWnd, SW_HIDE) if not wait_mode else False
# Loading settings
print("Loading settings")
conf_path = os.path.join(os.path.dirname(sys.argv[0]), "config.ini")
# Creating config file, if not exists
if not os.path.isfile(conf_path):
print("Settings file does not exists. Creating...")
tmp_conf_file = open(conf_path, "w")
tmp_conf_file.write("[WINDOW]\ngeometry = 981x407+182+182\nstate = normal")
tmp_conf_file.close()
# Generating fallback
local_config = configparser.ConfigParser()
local_config["WINDOW"] = {"geometry": "981x407+182+182", "state": "normal"}
# loading config file
try:
local_config.read(conf_path)
except configparser.Error as ex:
print("Error while reading config-file:", ex)
# TODO: Software Info's
software_name = "PickleViewer v0.7.7"
software_version = "0.77"
software_version_string = "0.7.7-beta.5"
software_title = software_name + " (" + software_version_string + ")"
# Global Variables
data = {}
pp = pprint.PrettyPrinter(indent=2)
editing = True
open_filename = ""
open_filetitle = "*untitled*"
file_loaded = False
file_changed = False
rf_running = False
rf_allDone = False
rf_threads = 0
tv_threads = 0
last_text = ""
# Functions
def getSHA(filename):
sha256_hash = hashlib.sha256()
with open(filename, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def checkForUpdates():
print("Checking for updates...")
req_data = urllib.request.urlopen(
"https://raw.githubusercontent.com/Matix-Media/PickleViewer/master/versions/info.ini").read()
body = req_data.decode("utf-8")
config = configparser.ConfigParser()
config.read_string(body)
try:
recent_version = config["RECENT"]["version"]
recent_installer_path = config["RECENT"]["installer_path"]
recent_version_info = urllib.request.urlopen(config["RECENT"]["version_info"]).read().decode("utf-8")
recent_installer_sha = config["RECENT"]["sha256"]
if float(recent_version) > float(software_version) or update_mode:
print("Version outdated! Ask for download")
question_result = messagebox.askquestion("Update",
"Your " + software_name + \
" version is outdated. Would you like to download version " + \
config["RECENT"]["version_string"] + " of " + \
software_name + "?\n\nFeatures:\n" + \
recent_version_info)
print("Download installer?", question_result)
if question_result == "yes":
appdata = os.getenv("APPDATA")
save_path = os.path.join(appdata, "PickleViewer v0.7.6", "installer",
os.path.basename(recent_installer_path))
print("Downloading setup to \"", save_path, "\"...", sep="")
SB.set("Downloading \"" + os.path.basename(recent_installer_path) + "\"...")
if not os.path.isdir(os.path.dirname(save_path)):
print("Path \"", os.path.dirname(save_path), "\" does not exists, creating...", sep="")
os.makedirs(os.path.dirname(save_path))
urllib.request.urlretrieve(recent_installer_path, save_path)
SB.set("Downloaded \"" + os.path.basename(recent_installer_path) + "\"")
print("Download done. Checking SHA256...")
if getSHA(save_path) == recent_installer_sha:
print("SHA256 successfully verified! Starting installer...")
subprocess.Popen(r'explorer /run,"' + save_path + '"')
else:
print("! Warning: SHA256 of installer could not be verified!")
messagebox.showwarning("Update", "Warning: SHA256 of installer could not be verified!")
if messagebox.askokcancel("Update", "Run installer of own risk without SHA256 verification?"):
print("Starting installer...")
subprocess.Popen(r'explorer /run,"' + save_path + '"')
elif float(recent_version) == float(software_version):
print("You are using the latest version of", software_name)
elif float(recent_version) < float(software_version):
print("Wow, you are using a version, that can't be even downloaded right now!")
messagebox.showinfo("Update", "Wow, you are using a version of " + software_name + \
", that can't be even downloaded right now!")
except configparser.Error as ex:
print("Can not read Online Version info's:", ex)
def askForOverwrite():
if messagebox.askokcancel("Open file - warning",
"The file current open was not saved! Do you want to overwrite it?", ):
print("Overwriting!")
return True
else:
return False
def load_file(filename):
global open_filename
global file_loaded
global open_filetitle
global editing
global file_changed
global rf_allDone
global last_text
print("Selected Path:", filename)
if filename == "":
print("Opening canceled!")
SB.set("opening canceled!")
return
if file_changed:
if not askForOverwrite():
return
if os.path.isfile(filename):
print("Reading file \"", os.path.basename(filename), "\"...", sep="")
try:
data = pickle.load(open(filename, "rb"))
print("Data:")
format_data = pp.pformat(data)
print(format_data)
print("End.")
T.config(state=NORMAL)
T.delete("1.0", END)
T.insert("1.0", format_data)
last_text = format_data
T.config(state=DISABLED)
file_loaded = True
open_filename = filename
open_filetitle = os.path.basename(filename)
editing = False
root.title(software_title + " - " + open_filetitle)
filemenu.entryconfig("Edit current Pickle file", state=NORMAL)
print("re-colorizing...")
rf_allDone = False
refreshManager()
SB.set("Loaded Pickle file \"" + open_filetitle + "\"")
except BaseException as e:
print("Error while reading Data:\n", e)
error = "Error while reading Data:\n" + str(e)
messagebox.showerror("Error", error)
file_loaded = False
else:
print("Error: The Path \"", filename, "\" is not valid.", sep="")
error = "Error: The Path \"" + filename + "\" is not valid."
messagebox.showerror("Error", error)
file_loaded = False
def save_to_file(event=None):
global open_filename
global file_loaded
global open_filetitle
global editing
global file_changed
print("Starting saving.")
print("Checking if file is from filesystem.")
data_to_write = None
opened_file = None
try:
data_to_write = ast.literal_eval(T.get("1.0", END))
except BaseException as e:
error_msg = "Error while reading data: " + str(e)
print(error_msg)
messagebox.showerror("Error", error_msg)
return
try:
if not file_loaded or not os.path.isfile(open_filename):
tmp_file = filedialog.asksaveasfile(mode="wb", defaultextension=".pkl")
if tmp_file is None:
print("Saving canceled!")
SB.set("Saving canceled!")
return
else:
opened_file = tmp_file
else:
opened_file = open(open_filename, "wb")
pickle.dump(data_to_write, opened_file)
open_filename = opened_file.name
file_loaded = True
open_filetitle = os.path.basename(open_filename)
if editing:
root.title(software_title + " - " + open_filetitle + " [Edit]")
else:
root.title(software_title + " - " + open_filetitle)
file_changed = False
SB.set("Saved Pickle file \"" + open_filetitle + "\"")
print("Saving Done.")
except BaseException as e:
error_msg = "Error while saving file: " + str(e)
print(error_msg)
messagebox.showerror("Error", error_msg)
return
def SaveSettings():
print("Saving settings.")
if str(root.state()) == "normal":
local_config["WINDOW"]["geometry"] = str(root.winfo_geometry())
local_config["WINDOW"]["state"] = str(root.state())
try:
with open(conf_path, "w") as conf_file:
local_config.write(conf_file)
except BaseException as ex_save:
print("Error while saving settings:", ex_save)
messagebox.showerror("Error while saving settings", "Error while saving settings: " + str(ex_save))
def create_tags():
bold_font = font.Font(T, T.cget("font"))
bold_font.configure(weight=font.BOLD)
italic_font = font.Font(T, T.cget("font"))
italic_font.configure(slant=font.ITALIC)
bold_italic_font = font.Font(T, T.cget("font"))
bold_italic_font.configure(weight=font.BOLD, slant=font.ITALIC)
style = get_style_by_name('default')
for ttype, ndef in style:
# print(ttype, ndef)
tag_font = None
if ndef['bold'] and ndef['italic']:
tag_font = bold_italic_font
elif ndef['bold']:
tag_font = bold_font
elif ndef['italic']:
tag_font = italic_font
if ndef['color']:
foreground = "#%s" % ndef['color']
else:
foreground = None
T.tag_configure(str(ttype), foreground=foreground, font=tag_font)
def removecolors():
for tag in root.tagdefs:
T.tag_remove(tag, "1.0", "end")
def recolorize():
code = T.get("1.0", "end-1c")
tokensource = PythonLexer().get_tokens(code)
start_line = 1
start_index = 0
end_line = 1
end_index = 0
for ttype, value in tokensource:
if "\n" in value:
end_line += value.count("\n")
end_index = len(value.rsplit("\n", 1)[1])
else:
end_index += len(value)
if value not in (" ", "\n"):
index1 = "%s.%s" % (start_line, start_index)
index2 = "%s.%s" % (end_line, end_index)
for tagname in T.tag_names(index1):
T.tag_remove(tagname, index1, index2)
# print(ttype, repr(value), index1, index2)
T.tag_add(str(ttype), index1, index2)
start_line = end_line
start_index = end_index
def json_tree(tree, parent, dictionary):
num = 0
for key in dictionary:
uid = uuid.uuid4()
if isinstance(key, dict):
uid2 = uuid.uuid4()
tree.insert(parent, 'end', uid, text=num, value="[...]",
tag=(uid, True, str(key), uid2, False))
tree.insert(uid, END, text="[...] Loading...", iid=uid2, tag=(uid2, False))
# tree.insert(parent, 'end', uid, text=key)
# json_tree(tree, uid, key)
elif isinstance(dictionary[key], list):
uid2 = uuid.uuid4()
tree.insert(parent, 'end', uid, text=key + ' [...]', value="[...]",
tag=(uid, True, str(dictionary[key]), uid2, False))
tree.insert(uid, END, text="[...] Loading...", iid=uid2, tag=(uid2, False))
# try:
# time.sleep(0.5)
# json_tree(tree,
# uid,
# dict([(i, x) for i, x in enumerate(dictionary[key])]))
# except TypeError as ex:
# print("TreeView loading: Error while reading info's:", ex)
else:
value = dictionary[key]
if value is None:
value = 'None'
tree.insert(parent, 'end', uid, text=key, value=value, tag=(uid, False))
num += 1
def loadTreeview():
global tv_threads
TR.delete(*TR.get_children())
inner_data = {}
try:
inner_data = ast.literal_eval(T.get("1.0", END))
except BaseException as e:
error_msg = "TreeView loading: Error while reading data: " + str(e)
print(error_msg)
TR.insert('', 0, text="Can't load DataTree")
tv_threads -= 1
print("Treeview threads =", tv_threads)
return
def myprint(d, parent):
for k, v in d.items():
if isinstance(v, dict):
myprint(v, parent)
else:
to_collapse = TR.insert(parent, 'end', text=v)
TR.item(to_collapse, open=True)
# print("{0} : {1}".format(k, v))
# tr_parent = TR.insert('', 'end', text='PickleFile')
# TR.item(tr_parent, open=True)
# myprint(inner_data, tr_parent)
# TR.insert("", END, inner_data)
try:
json_tree(TR, '', inner_data)
except TypeError as ex:
print("TreeView loading: Error while getting info's:", ex)
tv_threads -= 1
def refreshManager():
global rf_running
global rf_allDone
global rf_threads
global tv_threads
rf_threads += 1
if not rf_running and not rf_allDone:
# print("Running refresh")
rf_running = True
recolorize()
if tv_threads < 1:
tv_threads += 1
# print("Starting Treeview thread")
tree_thread = threading.Thread(target=loadTreeview)
tree_thread.start()
else:
# print("Treeview thread already running")
pass
rf_running = False
rf_allDone = True
else:
if rf_running:
# print("Refresh already running!")
pass
elif rf_allDone:
# print("All refreshes already done.")
pass
rf_threads -= 1
# Textbox events
def event_key(event):
global rf_allDone
global file_loaded
global file_changed
global editing
global rf_running
global rf_threads
global last_text
current_text = T.get("1.0", END)
if file_loaded and editing and last_text != current_text:
file_changed = True
root.title(software_title + " - *" + open_filetitle + " [Edit]")
rf_allDone = False
if editing and not rf_running and rf_threads < 2 and last_text != current_text:
last_text = current_text
th = threading.Thread(target=refreshManager)
th.start()
else:
# print("RefreshManager Thread stack full.")
pass
def event_tab(event=None):
T.insert(tk.INSERT, " " * 2)
return 'break'
# TreeView Events
def selectItem(event=None):
if not TR.focus():
print("No item selected!")
return
curItem = TR.focus()
# print(TR.item(curItem))
itemInfo = TR.item(curItem)
if itemInfo["tags"][1] == "True":
print("Selected TreeView item:", itemInfo["tags"][0], ", Item has subItems:", True, ", Placeholder-Child-IID:",
itemInfo["tags"][3])
if TR.exists(itemInfo["tags"][3]):
TR.delete(itemInfo["tags"][3])
if itemInfo["tags"][4] == "True":
print("Dict already generated.")
return
print("Getting item dict...")
item_dict = ["Error while generating Dict"]
try:
item_dict = ast.literal_eval(itemInfo["tags"][2])
except BaseException as ex:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("Error while generating subItems:", ex, ", Line", exc_tb.tb_lineno)
messagebox.showerror("Error while generating subItems", "Error while generating subItems: " + str(ex) + \
", Line " + str(exc_tb.tb_lineno))
return
json_tree(TR, curItem, item_dict)
itemInfo["tags"][4] = "True"
t1 = itemInfo["tags"][0]
TR.item(curItem,
tag=(itemInfo["tags"][0], itemInfo["tags"][1], itemInfo["tags"][2], itemInfo["tags"][3], "True"))
else:
print("Selected TreeView item:", itemInfo["tags"][0], ", Item has subItems:", False)
# Window events
def menQuit(event=None):
print("Inventing quitting...")
if file_changed:
if messagebox.askokcancel("Quit",
"The current file was not saved! If you quit, the changes will be lost! Do you really want to quit?"):
print("Lost changes")
print("Quitting!")
root.quit()
else:
print("Quitting canceled!")
else:
SaveSettings()
print("Quitting!")
root.quit()
# Defining main window
root = tk.Tk()
root.title(software_title + " - " + open_filetitle + " [Edit]")
try:
root.iconbitmap(os.path.join(os.path.dirname(sys.argv[0]), "icon.ico")) #WINDOWS
except tk.TclError:
try: #linux
img=tk.Image('photo',file='icon.png')
root.tk.call('wm','iconphoto',root._w,img)
except: pass
except BaseException as e:
print("Can not load PicklePreview Icon! " + str(e))
messagebox.showwarning("Load Error", "Can not load PicklePreview Icon! " + str(e))
input("Press enter to continue...") if wait_mode else False
root.protocol("WM_DELETE_WINDOW", menQuit)
print("Set window geometry to:", local_config["WINDOW"]["geometry"])
root.geometry(local_config["WINDOW"]["geometry"])
print("Set window state:", local_config["WINDOW"]["state"])
root.state(local_config["WINDOW"]["state"])
SB = StatusBar(root)
SB.set("no recent Actions")
SB.pack(side=BOTTOM, fill=X)
# PW = PanedWindow(orient=HORIZONTAL)
# PW.pack(fill=BOTH, expand=1)
S = tk.Scrollbar(root)
T = tk.Text(root)
# PW.add(T)
T.pack(side=tk.LEFT, fill=tk.BOTH, anchor=tk.N, expand=True)
# PW.add(S)
S.pack(side=tk.LEFT, fill=tk.Y)
S.config(command=T.yview)
T.config(yscrollcommand=S.set)
T.bind("<Key>", event_key)
T.bind("<Tab>", event_tab)
TR = ttk.Treeview(root, columns="Value")
TR.column("Value", width=100, anchor="center")
TR.heading("Value", text="Value")
TR_S = ttk.Scrollbar(root, orient="vertical", command=TR.yview)
TR_S.pack(side=RIGHT, fill=Y)
# PW.add(TR)
# PW.add(TR_S)
TR.pack(side=RIGHT, fill=Y)
TR.config(yscrollcommand=TR_S.set, selectmode=BROWSE)
TR.bind('<ButtonRelease-1>', selectItem)
# Menubar Actions
def menAbout(event=None):
messagebox.showinfo("About", software_name + """\n\nVersion: """ + software_version_string + """\nCreator: Max Heilmann\nCompany: Matix Media, Inc.
\nCopyright: [c] 2019\n\nThis is a open source Project under the Public MIT (X11-License) license.
\n\nFor more info's, visit \nhttps://www.matix-media.de""")
def menEdit(event=None):
global editing
global last_text
if not editing:
last_text = T.get("1.0", END)
root.title(software_title + " - " + open_filetitle + " [Edit]")
T.config(state=tk.NORMAL)
editing = True
filemenu.entryconfig("Edit current Pickle file", state=tk.DISABLED)
SB.set("Enabled editing for current Pickle file")
def menOpen(event=None):
filename = filedialog.askopenfilename(title="Select Pickle file", filetypes=(("all files", "*.*"),))
load_file(filename)
def menHelp(event=None):
messagebox.showinfo("Help", """HELP
\n\nSave current Pickle file...: With the save function you can save your opened Pickle file
\nOpen Pickle file...: With the open function you can open Pickle files
\nEdit current Pickle file: With the edit function you can edit the open Pickle file
\nNew Pickle file: With the new file function you create a new Pickle file. When creating, the previous file is overwritten
\nExit: With the exit button you can end the program
\n\nI hope I could help!""")
def menNew(event=None):
global editing
global open_filetitle
global open_filename
global file_loaded
global rf_allDone
print("Entering new file Creation.")
if messagebox.askyesno("New Pickle file",
"Do you want to create a new file? It will overwrite the current open file."):
print("Creating new file.")
editing = True
open_filetitle = "*untitled*"
open_filename = ""
file_loaded = False
T.config(state=NORMAL)
T.delete("1.0", END)
filemenu.entryconfig("Edit current Pickle file", state=tk.DISABLED)
root.title(software_title + " - " + open_filetitle + " [Edit]")
SB.set("Created new Pickle file")
rf_allDone = False
refreshManager()
def donothing():
print("No define action.")
# Menubar generating
menubar = tk.Menu(root)
filemenu = tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="New Pickle file", command=menNew, accelerator="Ctrl+N")
filemenu.add_command(label="Open Pickle file...", command=menOpen, accelerator="Ctrl+O")
filemenu.add_command(label="Save current Pickle file...", command=save_to_file, accelerator="Ctrl+S")
filemenu.add_command(label="Edit current Pickle file", command=menEdit, state=DISABLED, accelerator="Ctrl+E")
filemenu.add_separator()
filemenu.add_command(label="Exit", command=menQuit, accelerator="Ctrl+Q")
menubar.add_cascade(label="File", menu=filemenu)
helpmenu = Menu(menubar, tearoff=0)
helpmenu.add_command(label="Help", command=menHelp)
helpmenu.add_command(label="About...", command=menAbout, accelerator="F1")
menubar.add_cascade(label="Help", menu=helpmenu)
root.config(menu=menubar)
# Adding Shortcuts
root.bind("<Control-KeyPress-s>", save_to_file)
root.bind("<Control-KeyPress-Insert>", menNew)
root.bind("<Control-KeyPress-o>", menOpen)
root.bind("<Control-KeyPress-e>", menEdit)
root.bind("<Control-KeyPress-n>", menNew)
root.bind("<F1>", menHelp)
root.bind("<Control-KeyPress-q>", menQuit)
print("Creating color-tags...")
create_tags()
print("Testing coloring...")
recolorize()
# Main Checking process
file_to_open_start = ""
try:
print("Path to icon file:", os.path.join(os.path.dirname(os.path.realpath(__file__)), "icon.ico"))
print("Script Path:", os.path.realpath(__file__))
print("Startup Arguments:", sys.argv)
if len(sys.argv) > 1: #
if not sys.argv[1][:2] == "--":
file_to_open_start = sys.argv[1]
else:
print("Parameter was special:", sys.argv[1][:2])
else:
print("No opened file by startup")
except BaseException as e:
print("Error while starting! Check startup parameters. " + str(e))
messagebox.showerror("Startup Error", "Error while starting! Check startup parameters. " + str(e))
input("Press enter to continue...") if wait_mode else False
if not file_to_open_start == "":
load_file(file_to_open_start)
# print("Window geometry:", root.geometry())
# Check for Updates
update_thread = threading.Thread(target=checkForUpdates)
update_thread.start()
# Main Loop
tk.mainloop()
if sys.platform=='win32':
if hWnd:
user32.ShowWindow(hWnd, SW_SHOW) if not wait_mode else False
|
camera.py
|
#!/usr/bin/env python3
import argparse
import threading
import json
import sys
import os
import calendar
from datetime import datetime, timedelta
import signal
import random
import time
import re
import requests
from requests.auth import HTTPDigestAuth
import errno
import paho.mqtt.client as mqtt
from json.decoder import JSONDecodeError
from sensecam_control import vapix_control,vapix_config
import utils
import logging
import coloredlogs
import logging.config # This gets rid of the annoying log messages from Vapix_Control
logging.config.dictConfig({
'version': 1,
'disable_existing_loggers': True,
})
logging.getLogger("vapix_control.py").setLevel(logging.WARNING)
logging.getLogger("vapix_control").setLevel(logging.WARNING)
logging.getLogger("sensecam_control").setLevel(logging.WARNING)
ID = str(random.randint(1,100001))
args = None
camera = None
cameraBearingCorrection = 0
cameraConfig = None
cameraZoom = None
cameraMoveSpeed = None
cameraDelay = None
cameraLead = 0
active = False
object_topic = None
flight_topic = None
config_topic = "skyscan/config/json"
bearing = 0 # this is an angle
elevation = 0 # this is an angle
cameraPan = 0 # This value is in angles
cameraTilt = 0 # This values is in angles
distance3d = 0 # this is in Meters
distance2d = 0 # in meters
angularVelocityHorizontal = 0 # in meters
angularVelocityVertical = 0 # in meters
planeTrack = 0 # This is the direction that the plane is moving in
currentPlane=None
def calculate_bearing_correction(b):
return (b + cameraBearingCorrection) % 360
# Copied from VaPix/Sensecam to customize the folder structure for saving pictures
def get_jpeg_request(): # 5.2.4.1
"""
The requests specified in the JPEG/MJPG section are supported by those video products
that use JPEG and MJPG encoding.
Args:
resolution: Resolution of the returned image. Check the product’s Release notes.
camera: Selects the source camera or the quad stream.
square_pixel: Enable/disable square pixel correction. Applies only to video encoders.
compression: Adjusts the compression level of the image.
clock: Shows/hides the time stamp. (0 = hide, 1 = show)
date: Shows/hides the date. (0 = hide, 1 = show)
text: Shows/hides the text. (0 = hide, 1 = show)
text_string: The text shown in the image, the string must be URL encoded.
text_color: The color of the text shown in the image. (black, white)
text_background_color: The color of the text background shown in the image.
(black, white, transparent, semitransparent)
rotation: Rotate the image clockwise.
text_position: The position of the string shown in the image. (top, bottom)
overlay_image: Enable/disable overlay image.(0 = disable, 1 = enable)
overlay_position:The x and y coordinates defining the position of the overlay image.
(<int>x<int>)
Returns:
Success ('image save' and save the image in the file folder) or Failure (Error and
description).
"""
payload = {
'resolution': "1920x1080",
'compression': 5,
'camera': 1,
}
url = 'http://' + args.axis_ip + '/axis-cgi/jpg/image.cgi'
start_time = datetime.now()
try:
resp = requests.get(url, auth=HTTPDigestAuth(args.axis_username, args.axis_password), params=payload, timeout=0.5)
except requests.exceptions.Timeout:
logging.info("🚨 Images capture request timed out 🚨 ")
return
disk_time = datetime.now()
if resp.status_code == 200:
captureDir = "capture/{}".format(currentPlane["type"])
try:
os.makedirs(captureDir)
except OSError as e:
if e.errno != errno.EEXIST:
raise # This was not a "directory exist" error..
filename = "{}/{}_{}_{}_{}_{}.jpg".format(captureDir, currentPlane["icao24"], int(bearing), int(elevation), int(distance3d), datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
# Original
with open(filename, 'wb') as var:
var.write(resp.content)
#Non-Blocking
#fd = os.open(filename, os.O_CREAT | os.O_WRONLY | os.O_NONBLOCK)
#os.write(fd, resp.content)
#os.close(fd)
# Blocking
#fd = os.open(filename, os.O_CREAT | os.O_WRONLY)
#os.write(fd, resp.content)
#os.close(fd)
else:
logging.error("Unable to fetch image: {}\tstatus: {}".format(url,resp.status_code))
end_time = datetime.now()
net_time_diff = (disk_time - start_time)
disk_time_diff = (end_time - disk_time)
if disk_time_diff.total_seconds() > 0.1:
logging.info("🚨 Image Capture Timeout 🚨 Net time: {} \tDisk time: {}".format(net_time_diff, disk_time_diff))
def get_bmp_request(): # 5.2.4.1
"""
The requests specified in the JPEG/MJPG section are supported by those video products
that use JPEG and MJPG encoding.
Args:
resolution: Resolution of the returned image. Check the product’s Release notes.
camera: Selects the source camera or the quad stream.
square_pixel: Enable/disable square pixel correction. Applies only to video encoders.
compression: Adjusts the compression level of the image.
clock: Shows/hides the time stamp. (0 = hide, 1 = show)
date: Shows/hides the date. (0 = hide, 1 = show)
text: Shows/hides the text. (0 = hide, 1 = show)
text_string: The text shown in the image, the string must be URL encoded.
text_color: The color of the text shown in the image. (black, white)
text_background_color: The color of the text background shown in the image.
(black, white, transparent, semitransparent)
rotation: Rotate the image clockwise.
text_position: The position of the string shown in the image. (top, bottom)
overlay_image: Enable/disable overlay image.(0 = disable, 1 = enable)
overlay_position:The x and y coordinates defining the position of the overlay image.
(<int>x<int>)
Returns:
Success ('image save' and save the image in the file folder) or Failure (Error and
description).
"""
payload = {
'resolution': "1920x1080",
'camera': 1,
}
url = 'http://' + args.axis_ip + '/axis-cgi/bitmap/image.bmp'
resp = requests.get(url, auth=HTTPDigestAuth(args.axis_username, args.axis_password),
params=payload)
if resp.status_code == 200:
captureDir = "capture/{}".format(currentPlane["type"])
try:
os.makedirs(captureDir)
except OSError as e:
if e.errno != errno.EEXIST:
raise # This was not a "directory exist" error..
filename = "{}/{}_{}_{}_{}_{}.bmp".format(captureDir,currentPlane["icao24"],int(bearing),int(elevation),int(distance3d),datetime.now().strftime('%Y-%m-%d-%H-%M-%S'))
with open(filename, 'wb') as var:
var.write(resp.content)
return str('Image saved')
text = str(resp)
text += str(resp.text)
return text
def calculateCameraPosition():
global cameraPan
global cameraTilt
global distance2d
global distance3d
global bearing
global angularVelocityHorizontal
global angularVelocityVertical
global elevation
(lat, lon, alt) = utils.calc_travel_3d(currentPlane, camera_lead)
distance3d = utils.coordinate_distance_3d(camera_latitude, camera_longitude, camera_altitude, lat, lon, alt)
#(latorig, lonorig) = utils.calc_travel(observation.getLat(), observation.getLon(), observation.getLatLonTime(), observation.getGroundSpeed(), observation.getTrack(), camera_lead)
distance2d = utils.coordinate_distance(camera_latitude, camera_longitude, lat, lon)
bearing = utils.bearingFromCoordinate( cameraPosition=[camera_latitude, camera_longitude], airplanePosition=[lat, lon], heading=currentPlane["track"])
elevation = utils.elevation(distance2d, cameraAltitude=camera_altitude, airplaneAltitude=alt)
(angularVelocityHorizontal, angularVelocityVertical) = utils.angular_velocity(currentPlane,camera_latitude, camera_longitude, camera_altitude)
#logging.info("Angular Velocity - Horizontal: {} Vertical: {}".format(angularVelocityHorizontal, angularVelocityVertical))
cameraTilt = elevation
cameraPan = utils.cameraPanFromCoordinate(cameraPosition=[camera_latitude, camera_longitude], airplanePosition=[lat, lon])
cameraPan = calculate_bearing_correction(cameraPan)
def moveCamera(ip, username, password):
movePeriod = 250 # milliseconds
capturePeriod = 1000 # milliseconds
moveTimeout = datetime.now()
captureTimeout = datetime.now()
camera = vapix_control.CameraControl(ip, username, password)
while True:
if active:
if not "icao24" in currentPlane:
logging.info(" 🚨 Active but Current Plane is not set")
continue
if moveTimeout <= datetime.now():
calculateCameraPosition()
camera.absolute_move(cameraPan, cameraTilt, cameraZoom, cameraMoveSpeed)
#logging.info("Moving to Pan: {} Tilt: {}".format(cameraPan, cameraTilt))
moveTimeout = moveTimeout + timedelta(milliseconds=movePeriod)
if moveTimeout <= datetime.now():
lag = datetime.now() - moveTimeout
logging.info(" 🚨 Move execution time was greater that Move Period - lag: {}".format(lag))
moveTimeout = datetime.now() + timedelta(milliseconds=movePeriod)
if captureTimeout <= datetime.now():
time.sleep(cameraDelay)
get_jpeg_request()
captureTimeout = captureTimeout + timedelta(milliseconds=capturePeriod)
if captureTimeout <= datetime.now():
lag = datetime.now() - captureTimeout
logging.info(" 🚨 Capture execution time was greater that Capture Period - lag: {}".format(lag))
captureTimeout = datetime.now() + timedelta(milliseconds=capturePeriod)
time.sleep(0.005)
else:
time.sleep(1)
def update_config(config):
global cameraZoom
global cameraMoveSpeed
global cameraDelay
global cameraPan
global camera_lead
global camera_altitude
global cameraBearingCorrection
if "cameraZoom" in config:
cameraZoom = int(config["cameraZoom"])
logging.info("Setting Camera Zoom to: {}".format(cameraZoom))
if "cameraDelay" in config:
cameraDelay = float(config["cameraDelay"])
logging.info("Setting Camera Delay to: {}".format(cameraDelay))
if "cameraMoveSpeed" in config:
cameraMoveSpeed = int(config["cameraMoveSpeed"])
logging.info("Setting Camera Move Speed to: {}".format(cameraMoveSpeed))
if "cameraLead" in config:
camera_lead = float(config["cameraLead"])
logging.info("Setting Camera Lead to: {}".format(camera_lead))
if "cameraAltitude" in config:
camera_altitude = float(config["cameraAltitude"])
logging.info("Setting Camera Altitude to: {}".format(camera_altitude))
if "cameraBearingCorrection" in config:
cameraBearingCorrection = float(config["cameraBearingCorrection"])
logging.info("Setting Camera Bearing Correction to: {}".format(cameraBearingCorrection))
#############################################
## MQTT Callback Function ##
#############################################
def on_message(client, userdata, message):
try:
return on_message_impl(client, userdata, message)
except Exception as exc:
logging.exception("Error in MQTT message callback: %s", exc)
def on_message_impl(client, userdata, message):
global currentPlane
global object_timeout
global camera_longitude
global camera_latitude
global camera_altitude
global active
command = str(message.payload.decode("utf-8"))
#rint(command)
try:
update = json.loads(command)
#payload = json.loads(messsage.payload) # you can use json.loads to convert string to json
except JSONDecodeError as e:
# do whatever you want
logging.exception("Error decoding message as JSON: %s", e)
except TypeError as e:
logging.exception("Error decoding message as JSON: %s", e)
# do whatever you want in this case
except ValueError as e:
logging.exception("Error decoding message as JSON: %s", e)
except:
logging.exception("Error decoding message as JSON: %s", e)
print("Caught it!")
if message.topic == object_topic:
logging.info("Got Object Topic")
setXY(update["x"], update["y"])
object_timeout = time.mktime(time.gmtime()) + 5
elif message.topic == flight_topic:
if "icao24" in update:
if active is False:
logging.info("{}\t[Starting Capture]".format(update["icao24"]))
logging.info("{}\t[IMAGE]\tBearing: {} \tElv: {} \tDist: {}".format(update["icao24"],int(update["bearing"]),int(update["elevation"]),int(update["distance"])))
currentPlane = update
active = True
else:
if active is True:
logging.info("{}\t[Stopping Capture]".format(currentPlane["icao24"]))
active = False
# It is better to just have the old values for currentPlane in case a message comes in while the
# moveCamera Thread is running.
#currentPlane = {}
elif message.topic == config_topic:
update_config(update)
logging.info("Config Message: {}".format(update))
elif message.topic == "skyscan/egi":
#logging.info(update)
camera_longitude = float(update["long"])
camera_latitude = float(update["lat"])
camera_altitude = float(update["alt"])
else:
logging.info("Message: {} Object: {} Flight: {}".format(message.topic, object_topic, flight_topic))
def main():
global args
global logging
global camera
global cameraDelay
global cameraMoveSpeed
global cameraZoom
global cameraPan
global camera_altitude
global camera_latitude
global camera_longitude
global camera_lead
global cameraConfig
global flight_topic
global object_topic
parser = argparse.ArgumentParser(description='An MQTT based camera controller')
parser.add_argument('--lat', type=float, help="Latitude of camera")
parser.add_argument('--lon', type=float, help="Longitude of camera")
parser.add_argument('--alt', type=float, help="altitude of camera in METERS!", default=0)
parser.add_argument('--camera-lead', type=float, help="how many seconds ahead of a plane's predicted location should the camera be positioned", default=0.1)
parser.add_argument('-m', '--mqtt-host', help="MQTT broker hostname", default='127.0.0.1')
parser.add_argument('-t', '--mqtt-flight-topic', help="MQTT topic to subscribe to", default="skyscan/flight/json")
parser.add_argument( '--mqtt-object-topic', help="MQTT topic to subscribe to", default="skyscan/object/json")
parser.add_argument('-u', '--axis-username', help="Username for the Axis camera", required=True)
parser.add_argument('-p', '--axis-password', help="Password for the Axis camera", required=True)
parser.add_argument('-a', '--axis-ip', help="IP address for the Axis camera", required=True)
parser.add_argument('-s', '--camera-move-speed', type=int, help="The speed at which the Axis will move for Pan/Tilt (0-100)", default=50)
parser.add_argument('-d', '--camera-delay', type=float, help="How many seconds after issuing a Pan/Tilt command should a picture be taken", default=0)
parser.add_argument('-z', '--camera-zoom', type=int, help="The zoom setting for the camera (0-9999)", default=9999)
parser.add_argument('-v', '--verbose', action="store_true", help="Verbose output")
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
styles = {'critical': {'bold': True, 'color': 'red'}, 'debug': {'color': 'green'}, 'error': {'color': 'red'}, 'info': {'color': 'white'}, 'notice': {'color': 'magenta'}, 'spam': {'color': 'green', 'faint': True}, 'success': {'bold': True, 'color': 'green'}, 'verbose': {'color': 'blue'}, 'warning': {'color': 'yellow'}}
level = logging.DEBUG if '-v' in sys.argv or '--verbose' in sys.argv else logging.INFO
if 1:
coloredlogs.install(level=level, fmt='%(asctime)s.%(msecs)03d \033[0;90m%(levelname)-8s '
''
'\033[0;36m%(filename)-18s%(lineno)3d\033[00m '
'%(message)s',
level_styles = styles)
else:
# Show process name
coloredlogs.install(level=level, fmt='%(asctime)s.%(msecs)03d \033[0;90m%(levelname)-8s '
'\033[0;90m[\033[00m \033[0;35m%(processName)-15s\033[00m\033[0;90m]\033[00m '
'\033[0;36m%(filename)s:%(lineno)d\033[00m '
'%(message)s')
logging.info("---[ Starting %s ]---------------------------------------------" % sys.argv[0])
#camera = vapix_control.CameraControl(args.axis_ip, args.axis_username, args.axis_password)
cameraDelay = args.camera_delay
cameraMoveSpeed = args.camera_move_speed
cameraZoom = args.camera_zoom
camera_longitude = args.lon
camera_latitude = args.lat
camera_altitude = args.alt # Altitude is in METERS
camera_lead = args.camera_lead
#cameraConfig = vapix_config.CameraConfiguration(args.axis_ip, args.axis_username, args.axis_password)
threading.Thread(target=moveCamera, args=[args.axis_ip, args.axis_username, args.axis_password],daemon=True).start()
# Sleep for a bit so we're not hammering the HAT with updates
time.sleep(0.005)
flight_topic=args.mqtt_flight_topic
object_topic = args.mqtt_object_topic
print("connecting to MQTT broker at "+ args.mqtt_host+", channel '"+flight_topic+"'")
client = mqtt.Client("skyscan-axis-ptz-camera-" + ID) #create new instance
client.on_message=on_message #attach function to callback
client.connect(args.mqtt_host) #connect to broker
client.loop_start() #start the loop
client.subscribe(flight_topic)
client.subscribe(object_topic)
client.subscribe(config_topic)
client.subscribe("skyscan/egi")
client.publish("skyscan/registration", "skyscan-axis-ptz-camera-"+ID+" Registration", 0, False)
#############################################
## Main Loop ##
#############################################
timeHeartbeat = 0
while True:
if timeHeartbeat < time.mktime(time.gmtime()):
timeHeartbeat = time.mktime(time.gmtime()) + 10
client.publish("skyscan/heartbeat", "skyscan-axis-ptz-camera-"+ID+" Heartbeat", 0, False)
time.sleep(0.1)
if __name__ == "__main__":
try:
main()
except Exception as e:
logging.critical(e, exc_info=True)
|
lruqueue3.py
|
"""
Least-recently used (LRU) queue device
Demonstrates use of pyzmq IOLoop reactor
While this example runs in a single process, that is just to make
it easier to start and stop the example. Each thread has its own
context and conceptually acts as a separate process.
Author: Min RK <benjaminrk(at)gmail(dot)com>
Adapted from lruqueue.py by Guillaume Aubert (gaubert) <guillaume(dot)aubert(at)gmail(dot)com>
"""
import threading
import time
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
NBR_CLIENTS = 10
NBR_WORKERS = 3
def worker_thread(worker_url, i):
"""Worker using REQ socket to do LRU routing"""
context = zmq.Context()
socket = context.socket(zmq.REQ)
identity = "Worker-%d" % (i)
socket.setsockopt(zmq.IDENTITY, identity) #set worker identity
socket.connect(worker_url)
# Tell the server we are ready for work
socket.send("READY")
try:
while True:
address, empty, request = socket.recv_multipart()
print "%s: %s\n" % (identity, request),
socket.send_multipart([address, '', 'OK'])
except zmq.ZMQError, zerr:
# context terminated so quit silently
if zerr.strerror == 'Context was terminated':
return
else:
raise zerr
def client_thread(client_url, i):
"""Basic request-reply client using REQ socket"""
context = zmq.Context()
socket = context.socket(zmq.REQ)
identity = "Client-%d" % (i)
socket.setsockopt(zmq.IDENTITY, identity) #Set client identity. Makes tracing easier
socket.connect(client_url)
# Send request, get reply
socket.send("HELLO")
reply = socket.recv()
print "%s: %s\n" % (identity, reply),
class LRUQueue(object):
"""LRUQueue class using ZMQStream/IOLoop for event dispatching"""
def __init__(self, backend_socket, frontend_socket):
self.available_workers = 0
self.workers = []
self.client_nbr = NBR_CLIENTS
self.backend = ZMQStream(backend_socket)
self.frontend = ZMQStream(frontend_socket)
self.backend.on_recv(self.handle_backend)
self.loop = IOLoop.instance()
def handle_backend(self, msg):
# Queue worker address for LRU routing
worker_addr, empty, client_addr = msg[:3]
assert self.available_workers < NBR_WORKERS
# add worker back to the list of workers
self.available_workers += 1
self.workers.append(worker_addr)
# Second frame is empty
assert empty == ""
# Third frame is READY or else a client reply address
# If client reply, send rest back to frontend
if client_addr != "READY":
empty, reply = msg[3:]
# Following frame is empty
assert empty == ""
self.frontend.send_multipart([client_addr, '', reply])
self.client_nbr -= 1
if self.client_nbr == 0:
# Exit after N messages
self.loop.add_timeout(time.time()+1, self.loop.stop)
if self.available_workers == 1:
# on first recv, start accepting frontend messages
self.frontend.on_recv(self.handle_frontend)
def handle_frontend(self, msg):
# Now get next client request, route to LRU worker
# Client request is [address][empty][request]
client_addr, empty, request = msg
assert empty == ""
# Dequeue and drop the next worker address
self.available_workers -= 1
worker_id = self.workers.pop()
self.backend.send_multipart([worker_id, '', client_addr, '', request])
if self.available_workers == 0:
# stop receiving until workers become available again
self.frontend.stop_on_recv()
def main():
"""main method"""
url_worker = "ipc://backend.ipc"
url_client = "ipc://frontend.ipc"
# Prepare our context and sockets
context = zmq.Context()
frontend = context.socket(zmq.ROUTER)
frontend.bind(url_client)
backend = context.socket(zmq.ROUTER)
backend.bind(url_worker)
# create workers and clients threads
for i in range(NBR_WORKERS):
thread = threading.Thread(target=worker_thread, args=(url_worker, i, ))
thread.daemon = True
thread.start()
for i in range(NBR_CLIENTS):
thread_c = threading.Thread(target=client_thread, args=(url_client, i, ))
thread_c.daemon = True
thread_c.start()
# create queue with the sockets
queue = LRUQueue(backend, frontend)
# start reactor
IOLoop.instance().start()
if __name__ == "__main__":
main()
|
net09_web_PWB7.py
|
"""linux平台 epoll 实现web服务器 多线程"""
'''
@Time : 2018/1/24 下午4:12
@Author : scrappy_zhang
@File : net09_web_PWB7.py
'''
import socket
import select
import re
import threading
SERVER_ADDR = (HOST, PORT) = '', 8888 # 服务器地址
VERSION = 7.0 # web服务器版本号
STATIC_PATH = './static/'
class HTTPServer():
def __init__(self, server_address):
"""初始化服务器TCP套接字"""
self.tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.tcp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.tcp_socket.bind(server_address)
self.tcp_socket.listen(128)
self.tcp_socket.setblocking(False) # 将套接字设置为非阻塞模式
self.epoll = select.epoll() # 创建一个epoll对象
self.epoll.register(self.tcp_socket.fileno(), select.EPOLLIN | select.EPOLLET) # 将服务套接字注册
self.connections = {}
self.addresses = {}
def serve_forever(self):
"""永久运行监听接收连接"""
while True:
epoll_list = self.epoll.poll()
for fd, events in epoll_list:
if fd == self.tcp_socket.fileno():
new_client_socket, new_client_address = self.tcp_socket.accept()
print(new_client_address, '向服务器发起了请求')
self.connections[new_client_socket.fileno()] = new_client_socket # 存入客户连接事件文件描述符
self.addresses[new_client_socket.fileno()] = new_client_address
# 向epoll中则侧新socket的可读事件
self.epoll.register(new_client_socket.fileno(), select.EPOLLIN | select.EPOLLET)
elif events == select.EPOLLIN:
td = threading.Thread(target=self.handlerequest, args=(fd,))
td.start()
def handlerequest(self, fd):
"""客户端请求处理,发送响应数据"""
# 收取浏览器请求信息,并在服务器端打印显示
request_data = self.connections[fd].recv(2048).decode('utf-8')
# 数据为空代表客户端关闭了连接,则在监听中除去响应的文件描述符
if not request_data:
self.epoll.unregister(fd)
self.connections[fd].close() # server侧主动关闭连接fd
print("%s---offline---" % str(self.addresses[fd]))
del self.connections[fd]
del self.addresses[fd]
# 若存在数据,则按照正常的事件处理
else:
request_header_lines = request_data.splitlines()
# for line in request_header_lines:
# print(line)
# 解析请求头,获取具体请求信息
pattern = r'[^/]+(/[^ ]*)'
request_html_name = re.match(pattern, request_header_lines[0]).group(1)
# 根据解析到的内容补全将要读取文件的路径
if request_html_name == '/':
request_html_name = STATIC_PATH + 'baidu.html'
else:
request_html_name = STATIC_PATH + request_html_name
# 根据文件情况来返回相应的信息
try:
html_file = open(request_html_name, 'rb')
except FileNotFoundError:
# 文件不存在,则返回文件不存在,并返回状态码404
resp_headers = 'HTTP/1.1 404 not found\r\n'
resp_headers += "Server: PWB" + str(VERSION) + '\r\n'
resp_headers += '\r\n'
resp_body = '==== 404 file not found===='.encode('utf-8')
else:
# 文件存在,则读取文件内容,并返回状态码200
resp_headers = "HTTP/1.1 200 OK\r\n" # 200代表响应成功并找到资源
resp_headers += "Server: PWB" + str(VERSION) + '\r\n' # 告诉浏览器服务器
resp_headers += '\r\n' # 空行隔开body
resp_body = html_file.read() # 显示内容为读取的文件内容
html_file.close()
finally:
resp_data = resp_headers.encode('utf-8') + resp_body # 结合响应头和响应体
# 发送相应数据至浏览器
self.connections[fd].send(resp_data)
# HTTP短连接,请求完即关闭TCP连接,并除去相应事件的文件描述符
self.epoll.unregister(fd)
self.connections[fd].close() # server侧主动关闭连接fd
print("%s--web请求响应完毕-offline---" % str(self.addresses[fd]))
del self.connections[fd]
del self.addresses[fd]
def run():
"""运行服务器"""
pwb = HTTPServer(SERVER_ADDR)
print('web server:PWB %s on port %d...\n' % (VERSION, PORT))
pwb.serve_forever()
if __name__ == '__main__':
run()
|
plugin.py
|
from binascii import hexlify, unhexlify
from electrum_lcc.util import bfh, bh2u
from electrum_lcc.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT,
is_segwit_address)
from electrum_lcc import constants
from electrum_lcc.i18n import _
from electrum_lcc.plugins import BasePlugin
from electrum_lcc.transaction import deserialize, Transaction
from electrum_lcc.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_lcc.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def is_segwit(self):
return self.derivation.startswith("m/49'/")
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if KeepKey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Litecoin Cash"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in ('standard',):
raise ScriptTypeNotSupported(_('This type of script is not supported with KeepKey.'))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True, keystore.is_segwit())
outputs = self.tx_outputs(keystore.get_derivation(), tx, keystore.is_segwit())
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
segwit = wallet.keystore.is_segwit()
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False, segwit=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDP2SHWITNESS if segwit else self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx, segwit=False):
def create_output_by_derivation(info):
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOP2SHWITNESS if segwit else self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
if is_segwit_address(address):
txoutputtype.script_type = self.types.PAYTOWITNESS
else:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
def is_any_output_on_change_branch():
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None:
index, xpubs, m = info
if index[0] == 1:
return True
return False
outputs = []
has_change = False
any_output_on_change_branch = is_any_output_on_change_branch()
for _type, address, amount in tx.outputs():
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation(info)
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
server.py
|
import BaseHTTPServer
import errno
import logging
import os
import re
import socket
from SocketServer import ThreadingMixIn
import ssl
import sys
import threading
import time
import traceback
import types
import urlparse
import routes as default_routes
from request import Server, Request
from response import Response
from router import Router
from utils import HTTPException
logger_access = logging.getLogger("access")
logger_error = logging.getLogger("error")
logger = logging.getLogger("wptserve")
logger.setLevel(logging.DEBUG)
"""HTTP server designed for testing purposes.
The server is designed to provide flexibility in the way that
requests are handled, and to provide control both of exactly
what bytes are put on the wire for the response, and in the
timing of sending those bytes.
The server is based on the stdlib HTTPServer, but with some
notable differences in the way that requests are processed.
Overall processing is handled by a WebTestRequestHandler,
which is a subclass of BaseHTTPRequestHandler. This is responsible
for parsing the incoming request. A RequestRewriter is then
applied and may change the request data if it matches a
supplied rule.
Once the request data had been finalised, Request and Reponse
objects are constructed. These are used by the other parts of the
system to read information about the request and manipulate the
response.
Each request is handled by a particular handler function. The
mapping between Request and the appropriate handler is determined
by a Router. By default handlers are installed to interpret files
under the document root with .py extensions as executable python
files (see handlers.py for the api for such files), .asis files as
bytestreams to be sent literally and all other files to be served
statically.
The handler functions are responsible for either populating the
fields of the response object, which will then be written when the
handler returns, or for directly writing to the output stream.
"""
class RequestRewriter(object):
def __init__(self, rules):
"""Object for rewriting the request path.
:param rules: Initial rules to add; a list of three item tuples
(method, input_path, output_path), defined as for
register()
"""
self.rules = {}
for rule in reversed(rules):
self.register(*rule)
def register(self, methods, input_path, output_path):
"""Register a rewrite rule.
:param methods: Set of methods this should match. "*" is a
special value indicating that all methods should
be matched.
:param input_path: Path to match for the initial request.
:param output_path: Path to replace the input path with in
the request.
"""
if type(methods) in types.StringTypes:
methods = [methods]
self.rules[input_path] = (methods, output_path)
def rewrite(self, request_handler):
"""Rewrite the path in a BaseHTTPRequestHandler instance, if
it matches a rule.
:param request_handler: BaseHTTPRequestHandler for which to
rewrite the request.
"""
split_url = urlparse.urlsplit(request_handler.path)
if split_url.path in self.rules:
methods, destination = self.rules[split_url.path]
if "*" in methods or request_handler.command in methods:
logger.debug("Rewriting request path %s to %s" %
(request_handler.path, destination))
new_url = list(split_url)
new_url[2] = destination
new_url = urlparse.urlunsplit(new_url)
request_handler.path = new_url
class WebTestServer(ThreadingMixIn, BaseHTTPServer.HTTPServer):
allow_reuse_address = True
acceptable_errors = (errno.EPIPE, errno.ECONNABORTED)
request_queue_size = 2000
# Ensure that we don't hang on shutdown waiting for requests
daemon_threads = True
def __init__(self, server_address, RequestHandlerClass, router, rewriter, bind_hostname,
config=None, use_ssl=False, key_file=None, certificate=None,
encrypt_after_connect=False, latency=None, **kwargs):
"""Server for HTTP(s) Requests
:param server_address: tuple of (server_name, port)
:param RequestHandlerClass: BaseHTTPRequestHandler-like class to use for
handling requests.
:param router: Router instance to use for matching requests to handler
functions
:param rewriter: RequestRewriter-like instance to use for preprocessing
requests before they are routed
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param use_ssl: Boolean indicating whether the server should use SSL
:param key_file: Path to key file to use if SSL is enabled.
:param certificate: Path to certificate to use if SSL is enabled.
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param bind_hostname True to bind the server to both the hostname and
port specified in the server_address parameter.
False to bind the server only to the port in the
server_address parameter, but not to the hostname.
:param latency: Delay in ms to wait before seving each response, or
callable that returns a delay in ms
"""
self.router = router
self.rewriter = rewriter
self.scheme = "https" if use_ssl else "http"
self.latency = latency
if bind_hostname:
hostname_port = server_address
else:
hostname_port = ("",server_address[1])
#super doesn't work here because BaseHTTPServer.HTTPServer is old-style
BaseHTTPServer.HTTPServer.__init__(self, hostname_port, RequestHandlerClass, **kwargs)
if config is not None:
Server.config = config
else:
logger.debug("Using default configuration")
Server.config = {"host": server_address[0],
"domains": {"": server_address[0]},
"ports": {"http": [self.server_address[1]]}}
self.key_file = key_file
self.certificate = certificate
self.encrypt_after_connect = use_ssl and encrypt_after_connect
if use_ssl and not encrypt_after_connect:
self.socket = ssl.wrap_socket(self.socket,
keyfile=self.key_file,
certfile=self.certificate,
ssl_version=3,
ciphers="ALL:!COMPLEMENTOFDEFAULT:!eNULL:!aNULL:!LOW:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!DH:!RC4;",
server_side=True)
def handle_error(self, request, client_address):
error = sys.exc_value
if ((isinstance(error, socket.error) and
isinstance(error.args, tuple) and
error.args[0] in self.acceptable_errors)
or
(isinstance(error, IOError) and
error.errno in self.acceptable_errors)):
pass # remote hang up before the result is sent
else:
datatime = time.strftime("%d/%b/%Y:%H:%M:%S %z")
logger_error.error("[%s] [error] [client %s] %s" % (datatime, client_address, traceback.format_exc()))
class WebTestRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""RequestHandler for WebTestHttpd"""
protocol_version = "HTTP/1.1"
def handle_one_request(self):
response = None
try:
self.close_connection = False
request_line_is_valid = self.get_request_line()
if self.close_connection:
return
request_is_valid = self.parse_request()
if not request_is_valid:
#parse_request() actually sends its own error responses
return
self.server.rewriter.rewrite(self)
request = Request(self)
response = Response(self, request)
if request.method == "CONNECT":
self.handle_connect(response)
return
if not request_line_is_valid:
response.set_error(414)
response.write()
return
handler = self.server.router.get_handler(request)
# If the handler we used for the request had a non-default base path
# set update the doc_root of the request to reflect this
if hasattr(handler, "base_path") and handler.base_path:
request.doc_root = handler.base_path
if hasattr(handler, "url_base") and handler.url_base != "/":
request.url_base = handler.url_base
if self.server.latency is not None:
if callable(self.server.latency):
latency = self.server.latency()
else:
latency = self.server.latency
logger.warning("Latency enabled. Sleeping %i ms" % latency)
time.sleep(latency / 1000.)
if handler is None:
response.set_error(404)
else:
try:
handler(request, response)
except HTTPException as e:
response.set_error(e.code, e.message)
except Exception as e:
if e.message:
err = [e.message]
else:
err = []
err.append(traceback.format_exc())
response.set_error(500, "\n".join(err))
datatime = time.strftime("%d/%b/%Y:%H:%M:%S %z")
logger_access.debug("%s - - [%s] \"%s %s %s\" %i %i %s" % (
request.client_address,
datatime,
request.method,
request.request_path,
request.protocol_version,
response.status[0],
request.raw_input.length,
request.user_agent))
if not response.writer.content_written:
response.write()
# If we want to remove this in the future, a solution is needed for
# scripts that produce a non-string iterable of content, since these
# can't set a Content-Length header. A notable example of this kind of
# problem is with the trickle pipe i.e. foo.js?pipe=trickle(d1)
if response.close_connection:
self.close_connection = True
if not self.close_connection:
# Ensure that the whole request has been read from the socket
request.raw_input.read()
except socket.timeout, e:
self.log_error("Request timed out: %r", e)
self.close_connection = True
return
except Exception as e:
err = traceback.format_exc()
if response:
response.set_error(500, err)
response.write()
datatime = time.strftime("%a %b %d %H:%M:%S %Y")
logger_error.error("[%s] [error] [client -] %s" % (datatime, err))
def get_request_line(self):
try:
self.raw_requestline = self.rfile.readline(65537)
except socket.error:
self.close_connection = True
return False
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
return False
if not self.raw_requestline:
self.close_connection = True
return True
def handle_connect(self, response):
logger.debug("Got CONNECT")
response.status = 200
response.write()
if self.server.encrypt_after_connect:
logger.debug("Enabling SSL for connection")
self.request = ssl.wrap_socket(self.connection,
keyfile=self.server.key_file,
certfile=self.server.certificate,
server_side=True)
self.setup()
return
class WebTestHttpd(object):
"""
:param host: Host from which to serve (default: 127.0.0.1)
:param port: Port from which to serve (default: 8000)
:param server_cls: Class to use for the server (default depends on ssl vs non-ssl)
:param handler_cls: Class to use for the RequestHandler
:param use_ssl: Use a SSL server if no explicit server_cls is supplied
:param key_file: Path to key file to use if ssl is enabled
:param certificate: Path to certificate file to use if ssl is enabled
:param encrypt_after_connect: For each connection, don't start encryption
until a CONNECT message has been received.
This enables the server to act as a
self-proxy.
:param router_cls: Router class to use when matching URLs to handlers
:param doc_root: Document root for serving files
:param routes: List of routes with which to initialize the router
:param rewriter_cls: Class to use for request rewriter
:param rewrites: List of rewrites with which to initialize the rewriter_cls
:param config: Dictionary holding environment configuration settings for
handlers to read, or None to use the default values.
:param bind_hostname: Boolean indicating whether to bind server to hostname.
:param latency: Delay in ms to wait before seving each response, or
callable that returns a delay in ms
HTTP server designed for testing scenarios.
Takes a router class which provides one method get_handler which takes a Request
and returns a handler function.
.. attribute:: host
The host name or ip address of the server
.. attribute:: port
The port on which the server is running
.. attribute:: router
The Router object used to associate requests with resources for this server
.. attribute:: rewriter
The Rewriter object used for URL rewriting
.. attribute:: use_ssl
Boolean indicating whether the server is using ssl
.. attribute:: started
Boolean indictaing whether the server is running
"""
def __init__(self, host="127.0.0.1", port=8000,
server_cls=None, handler_cls=WebTestRequestHandler,
use_ssl=False, key_file=None, certificate=None, encrypt_after_connect=False,
router_cls=Router, doc_root=os.curdir, routes=None,
rewriter_cls=RequestRewriter, bind_hostname=True, rewrites=None,
latency=None, config=None):
if routes is None:
routes = default_routes.routes
self.host = host
self.router = router_cls(doc_root, routes)
self.rewriter = rewriter_cls(rewrites if rewrites is not None else [])
self.use_ssl = use_ssl
if server_cls is None:
server_cls = WebTestServer
if use_ssl:
if key_file is not None:
assert os.path.exists(key_file)
assert certificate is not None and os.path.exists(certificate)
try:
self.httpd = server_cls((host, port),
handler_cls,
self.router,
self.rewriter,
config=config,
bind_hostname=bind_hostname,
use_ssl=use_ssl,
key_file=key_file,
certificate=certificate,
encrypt_after_connect=encrypt_after_connect,
latency=latency)
self.started = False
_host, self.port = self.httpd.socket.getsockname()
except Exception:
logger.error('Init failed! You may need to modify your hosts file. Refer to README.md.');
raise
def start(self, block=False):
"""Start the server.
:param block: True to run the server on the current thread, blocking,
False to run on a separate thread."""
logger.info("Starting http server on %s:%s" % (self.host, self.port))
self.started = True
if block:
self.httpd.serve_forever()
else:
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.setDaemon(True) # don't hang on exit
self.server_thread.start()
def stop(self):
"""
Stops the server.
If the server is not running, this method has no effect.
"""
if self.started:
try:
self.httpd.shutdown()
self.httpd.server_close()
self.server_thread.join()
self.server_thread = None
logger.info("Stopped http server on %s:%s" % (self.host, self.port))
except AttributeError:
pass
self.started = False
self.httpd = None
def get_url(self, path="/", query=None, fragment=None):
if not self.started:
return None
return urlparse.urlunsplit(("http" if not self.use_ssl else "https",
"%s:%s" % (self.host, self.port),
path, query, fragment))
|
pythread_per_process_scheduler.py
|
#ckwg +28
# Copyright 2012 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from vital.config import config
from sprokit.pipeline import datum
from sprokit.pipeline import edge
from sprokit.pipeline import pipeline
from sprokit.pipeline import process
from sprokit.pipeline import scheduler
from sprokit.pipeline import utils
import threading
class UnsupportedProcess(Exception):
def __init__(self, name):
self.name = name
def __str__(self):
fmt = "The process '%s' does not support running in a Python thread"
return (fmt % self.name)
class PyThreadPerProcessScheduler(scheduler.PythonScheduler):
""" Runs each process in a pipeline in its own thread.
"""
def __init__(self, pipe, conf):
scheduler.PythonScheduler.__init__(self, pipe, conf)
p = self.pipeline()
names = p.process_names()
no_threads = process.PythonProcess.property_no_threads
for name in names:
proc = p.process_by_name(name)
properties = proc.properties()
if no_threads in properties:
raise UnsupportedProcess(name)
self._threads = []
self._pause_event = threading.Event()
self._event = threading.Event()
self._make_monitor_edge_config()
def _start(self):
p = self.pipeline()
names = p.process_names()
for name in names:
proc = p.process_by_name(name)
thread = threading.Thread(target=self._run_process, name=name, args=(proc,))
self._threads.append(thread)
for thread in self._threads:
thread.start()
def _wait(self):
for thread in self._threads:
thread.join()
def _pause(self):
self._pause_event.set()
def _resume(self):
self._pause_event.clear()
def _stop(self):
self._event.set()
self.shutdown()
def _run_process(self, proc):
utils.name_thread(proc.name())
monitor = edge.Edge(self._edge_conf)
proc.connect_output_port(process.PythonProcess.port_heartbeat, monitor)
complete = False
while not complete and not self._event.is_set():
while self._pause_event.is_set():
self._pause_event.wait()
proc.step()
while monitor.has_data():
edat = monitor.get_datum()
dat = edat.datum
if dat.type() == datum.DatumType.complete:
complete = True
def _make_monitor_edge_config(self):
self._edge_conf = config.empty_config()
def __sprokit_register__():
from sprokit.pipeline import scheduler_factory
module_name = 'python:schedulers'
if scheduler_factory.is_scheduler_module_loaded(module_name):
return
scheduler_factory.add_scheduler('pythread_per_process',
'Run each process in its own Python thread',
PyThreadPerProcessScheduler)
scheduler_factory.mark_scheduler_module_as_loaded(module_name)
|
polling.py
|
import time
from threading import Event, Thread
from featureflags.api.client import AuthenticatedClient
from .api.default.get_all_segments import sync as retrieve_segments
from .api.default.get_feature_config import sync as retrieve_flags
from .config import Config
from .util import log
class PollingProcessor(Thread):
def __init__(self, client: AuthenticatedClient, config: Config,
environment_id: str, ready: Event,
stream_ready: Event) -> None:
Thread.__init__(self)
self.daemon = True
self.__environment_id = environment_id
self.__client = client
self.__config = config
self.__running = False
self.__ready = ready
self.__stream_ready = stream_ready
def run(self):
if not self.__running:
log.info("Starting PollingProcessor with request interval: " +
str(self.__config.pull_interval))
self.__running = True
while self.__running:
start_time = time.time()
try:
t1 = Thread(target=self.__retrieve_segments)
t2 = Thread(target=self.__retrieve_flags)
t1.start()
t2.start()
t1.join()
t2.join()
if not self.__ready.is_set() is True:
log.info("PollingProcessor initialized ok")
if self.__config.enable_stream and \
not self.__stream_ready.is_set():
log.debug('Poller is in pause mode...')
self.__ready.wait()
else:
self.__ready.set()
except Exception as e:
log.exception(
'Error: Exception encountered when polling flags. %s',
e
)
elapsed = time.time() - start_time
if elapsed < self.__config.pull_interval:
time.sleep(self.__config.pull_interval - elapsed)
def stop(self):
log.info("Stopping PollingProcessor")
self.__running = False
def __retrieve_flags(self):
log.debug("Loading feature flags")
flags = retrieve_flags(
client=self.__client, environment_uuid=self.__environment_id
)
log.debug("Feature flags loaded")
for flag in flags:
log.debug("Setting the cache value %s", flag.feature)
self.__config.cache.set(f"flags/{flag.feature}", flag)
def __retrieve_segments(self):
log.debug("Loading target segments")
segments = retrieve_segments(
client=self.__client, environment_uuid=self.__environment_id
)
log.debug("Target segments loaded")
for segment in segments:
log.debug("Setting the cache segment value %s", segment.identifier)
self.__config.cache.set(f"segments/{segment.identifier}", segment)
|
test_rwlock.py
|
from collections import OrderedDict
import threading
import time
import unittest
import pytest
from smqtk.utils.read_write_lock import \
ContextualReadWriteLock
def wait_for_value(f, timeout):
"""
Wait a specified timeout period of time (seconds) for the given
function to execute successfully.
`f` usually wraps an assertion function.
:param f: Assertion function.
:type f: () -> None
:param timeout: Time out in seconds to wait for convergence.
:type timeout: float
"""
s = time.time()
neq = True
while neq:
try:
f()
# function passed.
neq = False
except (Exception, AssertionError):
# if assertion fails past timeout, actually raise assertion.
if time.time() - s > timeout:
raise
@pytest.mark.skip(msg="These tests are unstable and fail "
"non-deterministically.")
class TestContextualReadWriteLock (unittest.TestCase):
def setUp(self):
self.state = OrderedDict()
def wait_for_state(self, k):
""" Wait forever until a state attribute is True. """
while k not in self.state or not self.state[k]:
pass
# Added asserts
def assertInState(self, k):
""" Assert key in state """
self.assertIn(k, self.state)
def assertLockFree(self, lock):
self.assertEqual(lock._semlock._get_value(), 1)
def assertLockAcquired(self, lock):
self.assertEqual(lock._semlock._get_value(), 0)
# Unit Tests
def test_initial_state(self):
# Test expected lock and value states before use.
crwl = ContextualReadWriteLock()
self.assertLockFree(crwl._service_lock)
self.assertLockFree(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
def test_read_context_state(self):
# Test expected state conditions when transitioning into and out of a
# read-lock context.
crwl = ContextualReadWriteLock()
def t1(c):
with c.read_context():
self.state['t1_read_acquired'] = True
self.wait_for_state('t1_release')
self.state['t1_read_released'] = True
t1 = threading.Thread(target=t1, args=(crwl,))
t1.daemon = True
t1.start()
# Thread should immediately attempt to acquire read lock. We should see
# that it does successfully.
wait_for_value(lambda: self.assertInState('t1_read_acquired'),
1.0)
self.assertLockFree(crwl._service_lock)
self.assertLockAcquired(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 1)
# Trigger thread to release context and check state.
self.state['t1_release'] = True
wait_for_value(lambda: self.assertInState('t1_read_released'),
1.0)
self.assertLockFree(crwl._service_lock)
self.assertLockFree(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
def test_write_context_state(self):
# Test expected state conditions when transitioning into and out of a
# write-lock context.
crwl = ContextualReadWriteLock()
def t1_func(c):
with c.write_context():
self.state['t1_write_acquired'] = True
self.wait_for_state('t1_release')
self.state['t1_write_released'] = True
t1 = threading.Thread(target=t1_func, args=(crwl,))
t1.daemon = True
t1.start()
# Thread should immediately attempt to acquire write lock. We should
# see that it does successfully.
wait_for_value(lambda: self.assertInState('t1_write_acquired'),
1.0)
self.assertLockFree(crwl._service_lock)
self.assertLockAcquired(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
# Trigger thread to release context and check state.
self.state['t1_release'] = True
wait_for_value(lambda: self.assertInState('t1_write_released'),
1.0)
self.assertLockFree(crwl._service_lock)
self.assertLockFree(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
def test_concurrent_read_then_write(self):
# Test that a thread with a read lock blocks a write lock from entering.
crwl = ContextualReadWriteLock()
# Thread 1 function - Read lock
def t1_func(c):
with c.read_context():
self.state['t1_read_acquired'] = True
self.wait_for_state('t1_release')
self.state['t1_read_released'] = True
# Thread 2 function - Write lock
def t2_func(c):
self.wait_for_state('t2_acquire')
with c.write_context():
self.state['t2_write_acquired'] = True
self.wait_for_state('t2_release')
self.state['t2_write_released'] = True
t1 = threading.Thread(target=t1_func, args=(crwl,))
t2 = threading.Thread(target=t2_func, args=(crwl,))
t1.daemon = t2.daemon = True
t1.start()
t2.start()
# Upon starting threads, t1 should get read lock and t2 should not have
# done anything yet.
wait_for_value(lambda: self.assertInState('t1_read_acquired'), 1.0)
self.assertNotIn('t2_write_acquired', self.state)
self.assertLockFree(crwl._service_lock)
self.assertLockAcquired(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 1)
# t2 should attempt to acquire write context but be blocked. We should
# see that the service lock is acquired and that 't2_write_acquired' is
# not set.
self.state['t2_acquire'] = True
wait_for_value(lambda: self.assertLockAcquired(crwl._service_lock), 1.0)
self.assertNotIn('t2_write_acquired', self.state)
self.assertLockAcquired(crwl._service_lock)
self.assertLockAcquired(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 1)
# Releasing t1's read lock should cause t2 to acquire write lock.
self.state['t1_release'] = True
wait_for_value(lambda: self.assertInState('t1_read_released'), 1.0)
wait_for_value(lambda: self.assertInState('t2_write_acquired'), 1.0)
self.assertLockFree(crwl._service_lock)
self.assertLockAcquired(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
# t2 should now be able to release the write lock like normal
self.state['t2_release'] = True
wait_for_value(lambda: self.assertInState('t2_write_released'), 1.0)
self.assertLockFree(crwl._service_lock)
self.assertLockFree(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
def test_concurrent_write_then_read(self):
# Test that a thread with a read lock blocks a write lock from entering.
crwl = ContextualReadWriteLock()
# Thread 1 function - Write lock
def t1_func(c):
with c.write_context():
self.state['t1_write_acquired'] = True
self.wait_for_state('t1_release')
self.state['t1_write_released'] = True
# Thread 2 function - Read lock
def t2_func(c):
self.wait_for_state('t2_acquire')
self.state['t2_read_attempt'] = True
with c.read_context():
self.state['t2_read_acquired'] = True
self.wait_for_state('t2_release')
self.state['t2_read_released'] = True
t1 = threading.Thread(target=t1_func, args=(crwl,))
t2 = threading.Thread(target=t2_func, args=(crwl,))
t1.daemon = t2.daemon = True
t1.start()
t2.start()
# Upon starting threads, t1 should get write lock and t2 should not have
# done anything yet.
wait_for_value(lambda: self.assertInState('t1_write_acquired'), 1.0)
self.assertNotIn('t2_read_acquired', self.state)
self.assertLockFree(crwl._service_lock)
self.assertLockAcquired(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
# t2 should attempt to acquire read context but be blocked. We should
# see that the service lock is acquired and that 't2_read_acquired' is
# not set.
self.state['t2_acquire'] = True
wait_for_value(lambda: self.assertLockAcquired(crwl._service_lock), 1.0)
self.assertNotIn('t2_write_acquired', self.state)
self.assertLockAcquired(crwl._service_lock)
self.assertLockAcquired(crwl._resource_lock)
self.assertLockAcquired(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
# Releasing t1's write lock should cause t2 to acquire read lock.
self.state['t1_release'] = True
wait_for_value(lambda: self.assertInState('t1_write_released'), 1.0)
wait_for_value(lambda: self.assertInState('t2_read_acquired'), 1.0)
self.assertLockFree(crwl._service_lock)
self.assertLockAcquired(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 1)
# t2 should now be able to release the read lock like normal
self.state['t2_release'] = True
wait_for_value(lambda: self.assertInState('t2_read_released'), 1.0)
self.assertLockFree(crwl._service_lock)
self.assertLockFree(crwl._resource_lock)
self.assertLockFree(crwl._reader_count_lock)
self.assertEqual(crwl._reader_count, 0)
|
__init__.py
|
"""Rhasspy command-line interface"""
import argparse
import asyncio
import io
import json
import logging
# Configure logging
import logging.config
import os
import sys
import threading
import time
import wave
from typing import Any
from rhasspy.audio_recorder import AudioData
from rhasspy.core import RhasspyCore
from rhasspy.profiles import Profile
from rhasspy.utils import buffer_to_wav
from rhasspy.wake import WakeWordDetected
logger = logging.getLogger("rhasspy")
# -----------------------------------------------------------------------------
# Globals
# -----------------------------------------------------------------------------
mic_stdin_thread = None
mic_stdin_running = False
# -----------------------------------------------------------------------------
async def main() -> None:
"""Main method"""
global mic_stdin_running, mic_stdin_thread
# Parse command-line arguments
parser = argparse.ArgumentParser(description="Rhasspy")
parser.add_argument(
"--profile", "-p", required=True, type=str, help="Name of profile to use"
)
parser.add_argument(
"--system-profiles",
type=str,
help="Directory with base profile files (read only)",
default=os.path.join(os.getcwd(), "profiles"),
)
parser.add_argument(
"--user-profiles",
type=str,
help="Directory with user profile files (read/write)",
default=os.path.expanduser("~/.config/rhasspy/profiles"),
)
parser.add_argument(
"--set",
"-s",
nargs=2,
action="append",
help="Set a profile setting value",
default=[],
)
parser.add_argument(
"--debug", action="store_true", help="Print DEBUG log to console"
)
parser.add_argument(
"--no-check",
action="store_true",
help="Don't check profile for necessary files",
)
sub_parsers = parser.add_subparsers(dest="command")
sub_parsers.required = True
# info
info_parser = sub_parsers.add_parser("info", help="Profile information")
info_parser.add_argument(
"--defaults", action="store_true", help="Only print default settings"
)
# wav2text
wav2text_parser = sub_parsers.add_parser(
"wav2text", help="WAV file to text transcription"
)
wav2text_parser.add_argument("wav_files", nargs="*", help="Paths to WAV files")
# text2intent
text2intent_parser = sub_parsers.add_parser(
"text2intent", help="Text parsed to intent"
)
text2intent_parser.add_argument("sentences", nargs="*", help="Sentences to parse")
text2intent_parser.add_argument(
"--handle", action="store_true", help="Pass result to intent handler"
)
# wav2intent
wav2intent_parser = sub_parsers.add_parser(
"wav2intent", help="WAV file to parsed intent"
)
wav2intent_parser.add_argument("wav_files", nargs="*", help="Paths to WAV files")
wav2intent_parser.add_argument(
"--handle", action="store_true", help="Pass result to intent handler"
)
# train
train_parser = sub_parsers.add_parser("train", help="Re-train profile")
train_parser.add_argument(
"--no-cache", action="store_true", help="Clear training cache"
)
# mic2wav
mic2wav_parser = sub_parsers.add_parser("mic2wav", help="Voice command to WAV data")
mic2wav_parser.add_argument(
"--timeout",
type=float,
default=None,
help="Maximum number of seconds to record (default=profile)",
)
# mic2text
mic2text_parser = sub_parsers.add_parser(
"mic2text", help="Voice command to text transcription"
)
mic2text_parser.add_argument(
"--timeout",
type=float,
default=None,
help="Maximum number of seconds to record (default=profile)",
)
# mic2intent
mic2intent_parser = sub_parsers.add_parser(
"mic2intent", help="Voice command to parsed intent"
)
mic2intent_parser.add_argument(
"--stdin", action="store_true", help="Read audio data from stdin"
)
mic2intent_parser.add_argument(
"--handle", action="store_true", help="Pass result to intent handler"
)
mic2intent_parser.add_argument(
"--timeout",
type=float,
default=None,
help="Maximum number of seconds to record (default=profile)",
)
# word2phonemes
word2phonemes_parser = sub_parsers.add_parser(
"word2phonemes", help="Get pronunciation(s) for word(s)"
)
word2phonemes_parser.add_argument("words", nargs="*", help="Word(s) to pronounce")
word2phonemes_parser.add_argument(
"-n", type=int, default=1, help="Maximum number of pronunciations"
)
# word2wav
word2wav_parser = sub_parsers.add_parser("word2wav", help="Pronounce word")
word2wav_parser.add_argument("word", help="Word to pronounce")
# wav2mqtt
wav2mqtt_parser = sub_parsers.add_parser(
"wav2mqtt", help="Push WAV file(s) to MQTT"
)
wav2mqtt_parser.add_argument("wav_files", nargs="*", help="Paths to WAV files")
wav2mqtt_parser.add_argument(
"--frames",
type=int,
default=480,
help="WAV frames per MQTT message (default=0 for all)",
)
wav2mqtt_parser.add_argument(
"--site-id", type=str, default="default", help="Hermes siteId (default=default)"
)
wav2mqtt_parser.add_argument(
"--silence-before",
type=float,
default=0,
help="Seconds of silence to add before each WAV",
)
wav2mqtt_parser.add_argument(
"--silence-after",
type=float,
default=0,
help="Seconds of silence to add after each WAV",
)
wav2mqtt_parser.add_argument(
"--pause",
type=float,
default=0.01,
help="Seconds to wait before sending next chunk (default=0.01)",
)
# text2wav
text2wav_parser = sub_parsers.add_parser(
"text2wav", help="Output WAV file using text to speech system"
)
text2wav_parser.add_argument("sentence", help="Sentence to speak")
# text2speech
text2speech_parser = sub_parsers.add_parser(
"text2speech", help="Speak sentences using text to speech system"
)
text2speech_parser.add_argument("sentences", nargs="*", help="Sentences to speak")
# sleep
sub_parsers.add_parser("sleep", help="Wait for wake word")
# download
download_parser = sub_parsers.add_parser("download", help="Download profile files")
download_parser.add_argument(
"--delete", action="store_true", help="Clear download cache before downloading"
)
# check
sub_parsers.add_parser("check", help="Check downloaded profile files")
# -------------------------------------------------------------------------
args = parser.parse_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
profiles_dirs = [args.system_profiles, args.user_profiles]
logger.debug(profiles_dirs)
# Create rhasspy core
core = RhasspyCore(args.profile, args.system_profiles, args.user_profiles)
# Add profile settings from the command line
extra_settings = {}
for key, value in args.set:
try:
value = json.loads(value)
except Exception:
pass
logger.debug("Profile: %s=%s", key, value)
extra_settings[key] = value
core.profile.set(key, value)
# Handle command
if args.command == "info":
if args.defaults:
# Print default settings
json.dump(core.defaults, sys.stdout, indent=4)
else:
# Print profile settings
json.dump(core.profile.json, sys.stdout, indent=4)
elif args.command == "sentences":
sentences_path = core.profile.read_path(
core.profile.get("speech_to_text.sentences_ini", "sentences.ini")
)
with open(sentences_path, "r") as sentences_file:
sys.stdout.write(sentences_file.read())
else:
# Patch profile
profile = core.profile
profile.set("rhasspy.listen_on_start", False)
profile.set("rhasspy.preload_profile", False)
if args.command == "wav2mqtt":
profile.set("mqtt.enabled", True)
elif args.command in ["mic2intent"] and args.stdin:
profile.set("microphone.system", "stdin")
profile.set("microphone.stdin.auto_start", False)
mic_stdin_running = True
elif args.command == "text2wav":
profile.set("sounds.system", "dummy")
# Set environment variables
os.environ["RHASSPY_BASE_DIR"] = os.getcwd()
os.environ["RHASSPY_PROFILE"] = core.profile.name
os.environ["RHASSPY_PROFILE_DIR"] = core.profile.write_dir()
# Execute command
command_funcs = {
"wav2text": wav2text,
"text2intent": text2intent,
"wav2intent": wav2intent,
"train": train_profile,
"mic2text": mic2text,
"mic2intent": mic2intent,
"mic2wav": mic2wav,
"word2phonemes": word2phonemes,
"word2wav": word2wav,
"wav2mqtt": wav2mqtt,
"text2wav": text2wav,
"text2speech": text2speech,
"sleep": sleep,
"download": download,
"check": check,
}
# Automatically start core
await core.start()
if not args.no_check and (args.command not in ["check", "download"]):
# Verify that profile has necessary files
missing_files = core.check_profile()
if len(missing_files) > 0:
logger.fatal(
"Missing required files for %s: %s. Please run download command and try again.",
profile.name,
missing_files.keys(),
)
sys.exit(1)
if mic_stdin_running:
logger.debug("Reading audio data from stdin")
mic_stdin_thread = threading.Thread(
target=read_audio_stdin, args=(core,), daemon=True
)
mic_stdin_thread.start()
# Run command
try:
await command_funcs[args.command](core, profile, args)
if mic_stdin_thread is not None:
mic_stdin_running = False
mic_stdin_thread.join()
finally:
await core.shutdown()
# -----------------------------------------------------------------------------
# wav2text: transcribe WAV file(s) to text
# -----------------------------------------------------------------------------
async def wav2text(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Transcribe WAV file(s)"""
if len(args.wav_files) > 0:
# Read WAV paths from argument list
transcriptions = {}
for wav_path in args.wav_files:
with open(wav_path, "rb") as wav_file:
text = (await core.transcribe_wav(wav_file.read())).text
transcriptions[wav_path] = text
# Output JSON
json.dump(transcriptions, sys.stdout, indent=4)
else:
# Read WAV data from stdin
text = (await core.transcribe_wav(sys.stdin.buffer.read())).text
# Output text
print(text)
# -----------------------------------------------------------------------------
# text2intent: parse text into intent(s)
# -----------------------------------------------------------------------------
async def text2intent(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Parse sentences from command line or stdin"""
intents = {}
sentences = args.sentences if len(args.sentences) > 0 else sys.stdin
for sentence in sentences:
sentence = sentence.strip()
intent = (await core.recognize_intent(sentence)).intent
if args.handle:
intent = (await core.handle_intent(intent)).intent
intents[sentence] = intent
# Output JSON
json.dump(intents, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
# wav2intent: transcribe WAV file(s) to text and parse into intent(s)
# -----------------------------------------------------------------------------
async def wav2intent(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Recognize intent from WAV file(s)"""
if len(args.wav_files) > 0:
# Read WAV paths from argument list
transcriptions = {}
for wav_path in args.wav_files:
with open(wav_path, "rb") as wav_file:
text = (await core.transcribe_wav(wav_file.read())).text
transcriptions[wav_path] = text
# Parse intents
intents = {}
for wav_path, sentence in transcriptions.items():
intent = (await core.recognize_intent(sentence)).intent
if args.handle:
intent = (await core.handle_intent(intent)).intent
intents[wav_path] = intent
# Output JSON
json.dump(intents, sys.stdout, indent=4)
else:
# Read WAV data from stdin
sentence = (await core.transcribe_wav(sys.stdin.buffer.read())).text
intent = (await core.recognize_intent(sentence)).intent
if args.handle:
intent = (await core.handle_intent(intent)).intent
# Output JSON
json.dump(intent, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
# train: re-train profile speech/intent recognizers
# -----------------------------------------------------------------------------
async def train_profile(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Train Rhasspy profile"""
result = await core.train(reload_actors=False, no_cache=args.no_cache)
print(result)
# -----------------------------------------------------------------------------
# mic2wav: record voice command and output WAV data
# -----------------------------------------------------------------------------
async def mic2wav(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Record voice command from microphone"""
# Listen until silence
wav_data = buffer_to_wav((await core.record_command(args.timeout)).data)
# Output WAV data
sys.stdout.buffer.write(wav_data)
# -----------------------------------------------------------------------------
# mic2text: record voice command, then transcribe
# -----------------------------------------------------------------------------
async def mic2text(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Record voice command and transcribe"""
# Listen until silence
wav_data = buffer_to_wav((await core.record_command(args.timeout)).data)
# Transcribe
text = (await core.transcribe_wav(wav_data)).text
# Output text
print(text)
# -----------------------------------------------------------------------------
# mic2intent: record voice command, then transcribe/parse
# -----------------------------------------------------------------------------
def read_audio_stdin(core: RhasspyCore, chunk_size: int = 960):
"""Record audio chunks from stdin"""
global mic_stdin_running
while mic_stdin_running:
audio_data = sys.stdin.buffer.read(chunk_size)
core.send_audio_data(AudioData(audio_data))
async def mic2intent(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Record voice command, transcribe, and recognize intent"""
# Listen until silence
wav_data = buffer_to_wav((await core.record_command(args.timeout)).data)
# Transcribe
sentence = (await core.transcribe_wav(wav_data)).text
# Parse
intent = (await core.recognize_intent(sentence)).intent
if args.handle:
intent = (await core.handle_intent(intent)).intent
# Output JSON
json.dump(intent, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
# word2phonemes: get pronunciation(s) for a word
# -----------------------------------------------------------------------------
async def word2phonemes(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Get pronunciation(s) for word(s)"""
words = args.words if len(args.words) > 0 else sys.stdin
# Get pronunciations for all words
pronunciations = (
await core.get_word_pronunciations(words, n=args.n)
).pronunciations
# Output JSON
json.dump(pronunciations, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
# word2wav: pronounce word as WAV data
# -----------------------------------------------------------------------------
async def word2wav(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Speak a word's pronunciation"""
# Get pronunciation for word
all_pronunciations = (
await core.get_word_pronunciations([args.word], n=1)
).pronunciations
word_pronunciations = all_pronunciations[args.word]["pronunciations"]
# Convert from CMU phonemes to eSpeak phonemes
espeak_str = (await core.get_word_phonemes(word_pronunciations[0])).phonemes
# Pronounce as WAV
wav_data = (await core.speak_word(espeak_str)).wav_data
# Output WAV data
sys.stdout.buffer.write(wav_data)
# -----------------------------------------------------------------------------
# wav2mqtt: output WAV data to MQTT via Hermes protocol
# -----------------------------------------------------------------------------
def _send_frame(
core: RhasspyCore,
topic: str,
audio_data: bytes,
rate: int,
width: int,
channels: int,
) -> None:
"""Send a single audio frame via MQTT"""
with io.BytesIO() as mqtt_buffer:
mqtt_file: wave.Wave_write = wave.open(mqtt_buffer, mode="wb")
with mqtt_file:
mqtt_file.setframerate(rate)
mqtt_file.setsampwidth(width)
mqtt_file.setnchannels(channels)
mqtt_file.writeframes(audio_data)
# Send audio frame WAV
mqtt_payload = mqtt_buffer.getvalue()
core.mqtt_publish(topic, mqtt_payload)
async def wav2mqtt(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Publish WAV to MQTT as audio frames"""
# hermes/audioServer/<SITE_ID>/audioFrame
topic = "hermes/audioServer/%s/audioFrame" % args.site_id
if len(args.wav_files) > 0:
# Read WAV paths from argument list
for wav_path in args.wav_files:
with wave.open(wav_path, "rb") as wav_file:
rate = wav_file.getframerate()
width = wav_file.getsampwidth()
channels = wav_file.getnchannels()
if args.frames > 0:
# Split into chunks
chunk_size = args.frames * width * channels
if args.silence_before > 0:
# Silence
num_chunks = int(
(args.silence_before * rate * width * channels) / chunk_size
)
for _ in range(num_chunks):
_send_frame(
core, topic, bytes(chunk_size), rate, width, channels
)
time.sleep(args.pause)
# Read actual audio data
audio_data = wav_file.readframes(args.frames)
while len(audio_data) > 0:
_send_frame(core, topic, audio_data, rate, width, channels)
time.sleep(args.pause)
# Read next chunk
audio_data = wav_file.readframes(args.frames)
if args.silence_after > 0:
# Silence
num_chunks = int(
(args.silence_after * rate * width * channels) / chunk_size
)
for _ in range(num_chunks):
_send_frame(
core, topic, bytes(chunk_size), rate, width, channels
)
time.sleep(args.pause)
else:
# Send all at once
audio_data = wav_file.readframes(wav_file.getnframes())
_send_frame(core, topic, audio_data, rate, width, channels)
print(wav_path)
# -----------------------------------------------------------------------------
# text2wav: speak sentence and output WAV
# -----------------------------------------------------------------------------
async def text2wav(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Speak a sentence and output WAV data"""
result = await core.speak_sentence(args)
sys.stdout.buffer.write(result.wav_data)
# -----------------------------------------------------------------------------
# text2speech: speak sentences
# -----------------------------------------------------------------------------
async def text2speech(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Speak sentences"""
sentences = args.sentences
if len(sentences) == 0:
sentences = sys.stdin
for sentence in sentences:
sentence = sentence.strip()
await core.speak_sentence(sentence)
# -----------------------------------------------------------------------------
# sleep: wait for wake word
# -----------------------------------------------------------------------------
async def sleep(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Wait for wake word to be spoken"""
result = await core.wakeup_and_wait()
if isinstance(result, WakeWordDetected):
print(result.name)
else:
print("") # not detected
# -----------------------------------------------------------------------------
# download: download profile files
# -----------------------------------------------------------------------------
async def download(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Download necessary profile files"""
await core.download_profile(delete=args.delete)
print("OK")
# -----------------------------------------------------------------------------
# check: check profile files
# -----------------------------------------------------------------------------
async def check(core: RhasspyCore, profile: Profile, args: Any) -> None:
"""Verify that profile files are downloaded"""
missing_files = core.check_profile()
json.dump(missing_files, sys.stdout, indent=4)
# -----------------------------------------------------------------------------
if __name__ == "__main__":
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(main())
finally:
loop.close()
|
predict.py
|
#
# Copyright (c) 2018, Salesforce, Inc.
# The Board of Trustees of the Leland Stanford Junior University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import json
import logging
import os
from pprint import pformat
from collections import defaultdict
import copy
import shutil
# multiprocessing with CUDA
from torch.multiprocessing import Process, set_start_method
from .data_utils.bootleg import Bootleg, init_bootleg_annotator, extract_features_with_annotator
from .run_bootleg import bootleg_process_splits
try:
set_start_method('spawn')
except RuntimeError:
pass
import torch
from . import models
from .tasks.registry import get_tasks
from .util import set_seed, load_config_json, make_data_loader, log_model_size, get_devices, \
combine_folders_on_disk, split_folder_on_disk, get_part_path
from .validate import generate_with_model, calculate_and_reduce_metrics
from .calibrate import ConfidenceEstimator
from .arguments import check_and_update_generation_args
logger = logging.getLogger(__name__)
def prepare_data(args, device):
# initialize bootleg
bootleg = None
if args.do_ned and args.ned_retrieve_method == 'bootleg':
bootleg = Bootleg(args)
datasets = []
paths = []
if len(args.pred_src_languages) == 1 and len(args.tasks) > 1:
args.pred_src_languages *= len(args.tasks)
for i, task in enumerate(args.tasks):
task_languages = args.pred_src_languages[i]
logger.info(f'Loading {task}')
kwargs = {'train': None, 'validation': None, 'test': None}
if args.evaluate == 'train':
del kwargs['train'] # deleting keys means use the default file name
elif args.evaluate == 'valid':
kwargs['validation'] = args.pred_set_name
elif args.evaluate == 'test':
del kwargs['test']
else:
raise ValueError('Split used for prediction should be either train, valid or test')
kwargs.update({'skip_cache': args.skip_cache,
'subsample': args.subsample,
'cached_path': os.path.join(args.cache, task.name),
'all_dirs': task_languages,
'almond_lang_as_question': args.almond_lang_as_question,
'num_workers': args.num_workers,
'separate_eval': args.separate_eval,
'translate_no_answer': args.translate_no_answer
})
task_splits, task_paths = task.get_splits(root=args.data, lower=args.lower, **kwargs)
if not isinstance(task_splits, list):
task_splits = [task_splits]
task_paths = [task_paths]
task_data_processed = []
task_path_processed = []
for split, path in zip(task_splits, task_paths):
assert (split.eval or split.test or split.train) and not split.aux
if split.train:
data = split.train
path = path.train
elif split.eval:
data = split.eval
path = path.eval
else:
data = split.test
path = path.test
if bootleg:
if split.train or split.eval:
bootleg_process_splits(args, data.examples, path, task, bootleg)
else:
# no prepped bootleg features are available
# extract features on-the-fly using bootleg annotator
bootleg_annotator = init_bootleg_annotator(args, device)
extract_features_with_annotator(data.examples, bootleg_annotator, args, task)
task_data_processed.append(data)
task_path_processed.append(path)
datasets.append(task_data_processed)
paths.append(task_path_processed)
return datasets
def prepare_data_iterators(args, val_sets, numericalizer, device):
logger.info(f'Preparing data iterators')
if len(args.val_batch_size) == 1 and len(val_sets) > 1:
args.val_batch_size *= len(val_sets)
iters = []
task_index = 0
for task, bs, val_set in zip(args.tasks, args.val_batch_size, val_sets):
task_iter = []
task_languages = args.pred_src_languages[task_index]
if task_languages is not None and args.separate_eval:
task_languages = task_languages.split('+')
assert len(task_languages) == len(val_set)
for index, set_ in enumerate(val_set):
loader, original_order = make_data_loader(set_, numericalizer, bs, device,
train=False, return_original_order=True)
task_iter.append((task, task_languages[index], loader, original_order))
# single language task or no separate eval
else:
loader, original_order = make_data_loader(val_set[0], numericalizer, bs, device,
train=False, return_original_order=True)
task_iter.append((task, task_languages, loader, original_order))
iters.extend(task_iter)
task_index += 1
return iters
def run(args, device):
# TODO handle multiple languages
src_lang = args.pred_src_languages[0]
tgt_lang = args.pred_tgt_languages[0]
Model = getattr(models, args.model)
model, _ = Model.load(args.path,
model_checkpoint_file=args.checkpoint_name,
args=args,
device=device,
tasks=args.tasks,
src_lang=src_lang,
tgt_lang=tgt_lang
)
val_sets = prepare_data(args, device)
model.add_new_vocab_from_data(args.tasks)
iters = prepare_data_iterators(args, val_sets, model.numericalizer, device)
log_model_size(logger, model, args.model)
model.to(device)
decaScore = []
task_scores = defaultdict(list)
model.eval()
eval_dir = os.path.join(args.eval_dir, args.evaluate)
os.makedirs(eval_dir, exist_ok=True)
with torch.no_grad():
for task, language, it, original_order in iters:
logger.info(task.name)
# single language task
if language is None or 'multilingual' not in task.name:
prediction_file_name = os.path.join(eval_dir, task.name + '.tsv')
results_file_name = os.path.join(eval_dir, task.name + '.results.json')
# multi language task
else:
prediction_file_name = os.path.join(eval_dir, task.name + '_{}.tsv'.format(language))
results_file_name = os.path.join(eval_dir, task.name + '_{}.results.json'.format(language))
if os.path.exists(prediction_file_name):
if args.overwrite:
logger.warning(f'{prediction_file_name} already exists -- overwriting **')
else:
raise OSError(f'{prediction_file_name} already exists')
if os.path.exists(results_file_name):
if args.overwrite:
logger.warning(f'{results_file_name} already exists -- overwriting **')
else:
raise OSError(f'{results_file_name} already exists')
if args.calibrator_paths is not None:
confidence_estimators = []
for path in args.calibrator_paths:
estimator = ConfidenceEstimator.load(path)
confidence_estimators.append(estimator)
logger.info('Loading confidence estimator "%s" from %s', estimator.name, path)
else:
confidence_estimators = None
with torch.cuda.amp.autocast(enabled=args.mixed_precision):
generation_output = generate_with_model(model, it, model.numericalizer, task, args,
original_order=original_order,
output_confidence_features=args.save_confidence_features,
confidence_estimators=confidence_estimators,
disable_progbar=False)
if args.save_confidence_features:
torch.save(generation_output.confidence_features, args.confidence_feature_path)
# write into file
# TODO change to jsonl format
with open(prediction_file_name, 'w' + ('' if args.overwrite else 'x')) as prediction_file:
for i in range(len(generation_output.example_ids)):
line = generation_output.example_ids[i] + '\t' + '\t'.join(generation_output.predictions[i]) # all outputs separated by '\t'
if args.calibrator_paths is not None:
for score in generation_output.confidence_scores:
line += '\t' + str(score[i])
prediction_file.write(line + '\n')
if len(generation_output.answers) > 0:
metrics_to_compute = task.metrics
if args.main_metric_only:
metrics_to_compute = [metrics_to_compute[0]]
metrics = calculate_and_reduce_metrics(generation_output.predictions, generation_output.answers, metrics_to_compute, args)
with open(results_file_name, 'w' + ('' if args.overwrite else '+')) as results_file:
results_file.write(json.dumps(metrics) + '\n')
if not args.silent:
for i, (c, p, a) in enumerate(zip(generation_output.contexts, generation_output.predictions, generation_output.answers)):
log_string = f'\nContext {i+1}: {c}\nPrediction {i + 1} ({len(p)} outputs): {p}\nAnswer {i + 1}: {a}\n'
if args.calibrator_paths is not None:
log_string += f'Confidence {i+1} : '
for score in generation_output.confidence_scores:
log_string += f'{score[i]:.3f}, '
log_string += '\n'
logger.info(log_string)
logger.info(metrics)
task_scores[task].append((len(generation_output.answers), metrics[task.metrics[0]]))
for task in task_scores.keys():
decaScore.append(sum([length * score for length, score in task_scores[task]]) / sum([length for length, score in task_scores[task]]))
logger.info(f'Evaluated Tasks:\n')
for i, task in enumerate(args.tasks):
logger.info(f'{task.name}: {decaScore[i]}')
logger.info(f'-------------------')
logger.info(f'DecaScore: {sum(decaScore)}\n')
logger.info(f'\nSummary: | {sum(decaScore)} | {" | ".join([str(x) for x in decaScore])} |\n')
def parse_argv(parser):
parser.add_argument('--path', type=str, required=True, help='Folder to load the model from')
parser.add_argument('--evaluate', type=str, required=True, choices=['train', 'valid', 'test'],
help='Which dataset to do predictions for (train, dev or test)')
parser.add_argument('--pred_set_name', default='eval', type=str, help='Name of dataset to run prediction for; will be ignored if --evaluate is test')
parser.add_argument('--tasks', dest='task_names', nargs='+', help='task names for prediction')
parser.add_argument('--devices', default=None, nargs='+', type=int,
help='a list of devices that can be used for prediction. By default, all devices will be used.')
parser.add_argument('--seed', default=123, type=int, help='Random seed.')
parser.add_argument('--data', default='.data/', type=str, help='where to load data from.')
parser.add_argument('--embeddings', default='.embeddings/', type=str, help='where to save embeddings.')
parser.add_argument('--checkpoint_name', default='best.pth',
help='Checkpoint file to use (relative to --path, defaults to best.pth)')
parser.add_argument('--overwrite', action='store_true', help='whether to overwrite previously written predictions')
parser.add_argument('--silent', action='store_true', help='whether to print predictions to stdout')
parser.add_argument('--skip_cache', action='store_true',
help='whether use exisiting cached splits or generate new ones')
parser.add_argument('--eval_dir', type=str, required=True, help='use this directory to store eval results')
parser.add_argument('--cache', default='.cache', type=str, help='where to save cached files')
parser.add_argument('--subsample', default=20000000, type=int, help='subsample the eval/test datasets')
parser.add_argument('--pred_languages', type=str, nargs='+', dest='pred_src_languages', default=['en'],
help='Specify dataset source languages used during prediction for multilingual tasks'
'multiple languages for each task should be concatenated with +')
parser.add_argument('--pred_tgt_languages', type=str, nargs='+', default=['en'],
help='Specify dataset target languages used during prediction for multilingual tasks'
'multiple languages for each task should be concatenated with +')
parser.add_argument('--separate_eval', action='store_true',
help='evaluate on each language eval set separately')
parser.add_argument('--main_metric_only', action='store_true', help='If True, we only calculate the deca score metric for each task.')
# If not None, these values will override the values saved in the trained model's config file
parser.add_argument('--val_batch_size', nargs='+', default=None, type=int,
help='Batch size for validation corresponding to tasks in val tasks')
parser.add_argument("--reduce_metrics", type=str, default='max', choices=['max'], help='How to calculate the metric when there are multiple outputs per input.')
# These are generation hyperparameters. Each one can be a list of values in which case, we generate `num_outputs` outputs for each set of hyperparameters.
parser.add_argument("--num_outputs", type=int, nargs='+', default=[1], help='number of sequences to output per input')
parser.add_argument("--temperature", type=float, nargs='+', default=[0.0],
help="temperature of 0 implies greedy sampling")
parser.add_argument("--repetition_penalty", type=float, nargs='+', default=[1.0],
help="primarily useful for CTRL model; in that case, use 1.2")
parser.add_argument("--top_k", type=int, nargs='+', default=[0], help='0 disables top-k filtering')
parser.add_argument("--top_p", type=float, nargs='+', default=[1.0], help='1.0 disables top-p filtering')
parser.add_argument("--num_beams", type=int, nargs='+', default=[1], help='1 disables beam seach')
parser.add_argument("--num_beam_groups", type=int, nargs='+', default=[1], help='1 disables diverse beam seach')
parser.add_argument("--diversity_penalty", type=float, nargs='+', default=[0.0], help='0 disables diverse beam seach')
parser.add_argument("--no_repeat_ngram_size", type=int, nargs='+', default=[0], help='ngrams of this size cannot be repeated in the output. 0 disables it.')
parser.add_argument('--max_output_length', default=150, type=int, help='maximum output length for generation')
# These are used for confidence calibration
parser.add_argument('--calibrator_paths', type=str, nargs='+', default=None,
help='Can be a list. If provided, each calibrator will be used to output confidence scores for each prediction.')
parser.add_argument('--save_confidence_features', action='store_true', help='If provided, will be used to output confidence scores for each prediction.')
parser.add_argument("--confidence_feature_path", type=str, default=None, help='A .pkl file to save confidence features in.')
parser.add_argument("--mc_dropout_num", type=int, default=0, help='Number of samples to use for Monte Carlo (MC) dropout. 0 disables MC dropout.')
parser.add_argument("--override_confidence_labels", type=str, default=None,
help='If provided, examples with this gold answer are marked as 1, and others as 0. Useful for out-of-domain detection.')
parser.add_argument('--database_dir', type=str, help='Path to folder containing all files (e.g. alias2qids, pretrained models for bootleg)')
parser.add_argument("--mixed_precision", action='store_true', help='If True, will use mixed precision for prediction.'
'This reduces memory consumption and is especially faster on GPUs like NVIDIA V100 and T4. May slightly change the generated output.')
#TODO Update other tasks to use this argument too; so we can use predict for pure text generation (i.e. without reporting accuracy metrics)
parser.add_argument('--translate_no_answer', action='store_true', help='if true the provided dataset would not contain the answer (translated sentence)')
parser.add_argument('--plot_heatmaps', action='store_true', help='whether to plot cross-attention heatmaps')
def set_default_values(args):
"""
sets default values that depend on other input arguments
"""
if args.confidence_feature_path is None:
args.confidence_feature_path = os.path.join(args.path, 'confidence_features.pkl')
def check_args(args):
if len(args.task_names) != len(args.pred_src_languages):
raise ValueError('You have to define prediction languages for each task'
'Use None for single language tasks. Also provide languages in the same order you provided the tasks.')
if getattr(args, 'ned_retrieve_method', None) == 'bootleg':
with open(os.path.join(args.path, 'config.json')) as config_file:
config = json.load(config_file)
if args.subsample > config['subsample']:
raise ValueError('To use bootleg, you have to use a subsample value less than the number of prepped examples.')
def main(args):
load_config_json(args)
check_and_update_generation_args(args)
check_args(args)
set_default_values(args)
set_seed(args)
args.tasks = list(get_tasks(args.task_names, args).values())
logger.info(f'Arguments:\n{pformat(vars(args))}')
logger.info(f'Loading from {args.best_checkpoint}')
devices = get_devices(args.devices)
if len(devices) > 1:
logger.info(f'Independent multi-GPU generation on following devices: {devices}')
all_processes = []
all_data_folders = split_folder_on_disk(args.data, len(devices))
for device_id in range(len(devices)):
copy_args = copy.copy(args)
copy_args.data = all_data_folders[device_id]
copy_args.eval_dir = get_part_path(args.eval_dir, device_id)
p = Process(target=run, args=(copy_args, devices[device_id]))
all_processes.append(p)
p.start()
for p in all_processes:
p.join()
for folder in all_data_folders:
shutil.rmtree(folder)
combine_folders_on_disk(args.eval_dir, len(devices), line_group_size=1, delete=True)
else:
logger.info(f'Single device generation on: {devices[0]}')
run(args, devices[0])
|
resource_monitor.py
|
import logging
import os
import warnings
from time import time
from threading import Thread, Event
import psutil
from pathlib2 import Path
from typing import Text
from ..binding.frameworks.tensorflow_bind import IsTensorboardInit
try:
from .gpu import gpustat
except ImportError:
gpustat = None
class ResourceMonitor(object):
_title_machine = ':monitor:machine'
_title_gpu = ':monitor:gpu'
def __init__(self, task, sample_frequency_per_sec=2., report_frequency_sec=30.,
first_report_sec=None, wait_for_first_iteration_to_start_sec=180.0,
max_wait_for_first_iteration_to_start_sec=1800., report_mem_used_per_process=True):
self._task = task
self._sample_frequency = sample_frequency_per_sec
self._report_frequency = report_frequency_sec
self._first_report_sec = first_report_sec or report_frequency_sec
self._wait_for_first_iteration = wait_for_first_iteration_to_start_sec
self._max_check_first_iteration = max_wait_for_first_iteration_to_start_sec
self._num_readouts = 0
self._readouts = {}
self._previous_readouts = {}
self._previous_readouts_ts = time()
self._thread = None
self._exit_event = Event()
self._gpustat_fail = 0
self._gpustat = gpustat
self._active_gpus = None
self._process_info = psutil.Process() if report_mem_used_per_process else None
self._last_process_pool = {}
self._last_process_id_list = []
if not self._gpustat:
self._task.get_logger().report_text('TRAINS Monitor: GPU monitoring is not available')
else: # if running_remotely():
try:
active_gpus = os.environ.get('NVIDIA_VISIBLE_DEVICES', '') or \
os.environ.get('CUDA_VISIBLE_DEVICES', '')
if active_gpus:
self._active_gpus = [int(g.strip()) for g in active_gpus.split(',')]
except Exception:
pass
def start(self):
self._exit_event.clear()
self._thread = Thread(target=self._run)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._exit_event.set()
# self._thread.join()
def _run(self):
try:
self._daemon()
except:
pass
def _daemon(self):
seconds_since_started = 0
reported = 0
last_iteration = 0
fallback_to_sec_as_iterations = None
# get max GPU ID, and make sure our active list is within range
if self._active_gpus:
try:
gpu_stat = self._gpustat.new_query()
if max(self._active_gpus) > len(gpu_stat.gpus) - 1:
self._active_gpus = None
except Exception:
pass
# last_iteration_interval = None
# last_iteration_ts = 0
# repeated_iterations = 0
while True:
last_report = time()
current_report_frequency = self._report_frequency if reported != 0 else self._first_report_sec
while (time() - last_report) < current_report_frequency:
# wait for self._sample_frequency seconds, if event set quit
if self._exit_event.wait(1.0 / self._sample_frequency):
return
# noinspection PyBroadException
try:
self._update_readouts()
except Exception:
pass
seconds_since_started += int(round(time() - last_report))
# check if we do not report any metric (so it means the last iteration will not be changed)
if fallback_to_sec_as_iterations is None:
if IsTensorboardInit.tensorboard_used():
fallback_to_sec_as_iterations = False
elif seconds_since_started >= self._wait_for_first_iteration:
self._task.get_logger().report_text('TRAINS Monitor: Could not detect iteration reporting, '
'falling back to iterations as seconds-from-start')
fallback_to_sec_as_iterations = True
elif fallback_to_sec_as_iterations is True and seconds_since_started <= self._max_check_first_iteration:
if self._check_logger_reported():
fallback_to_sec_as_iterations = False
self._task.get_logger().report_text('TRAINS Monitor: Reporting detected, '
'reverting back to iteration based reporting')
clear_readouts = True
# if we do not have last_iteration, we just use seconds as iteration
if fallback_to_sec_as_iterations:
iteration = seconds_since_started
else:
iteration = self._task.get_last_iteration()
if iteration < last_iteration:
# we started a new session?!
# wait out
clear_readouts = False
iteration = last_iteration
elif iteration == last_iteration:
# repeated_iterations += 1
# if last_iteration_interval:
# # to be on the safe side, we don't want to pass the actual next iteration
# iteration += int(0.95*last_iteration_interval[0] * (seconds_since_started - last_iteration_ts)
# / last_iteration_interval[1])
# else:
# iteration += 1
clear_readouts = False
iteration = last_iteration
else:
# last_iteration_interval = (iteration - last_iteration, seconds_since_started - last_iteration_ts)
# repeated_iterations = 0
# last_iteration_ts = seconds_since_started
last_iteration = iteration
fallback_to_sec_as_iterations = False
clear_readouts = True
# start reporting only when we figured out, if this is seconds based, or iterations based
average_readouts = self._get_average_readouts()
if fallback_to_sec_as_iterations is not None:
for k, v in average_readouts.items():
# noinspection PyBroadException
try:
title = self._title_gpu if k.startswith('gpu_') else self._title_machine
# 3 points after the dot
value = round(v*1000) / 1000.
self._task.get_logger().report_scalar(title=title, series=k, iteration=iteration, value=value)
except Exception:
pass
# clear readouts if this is update is not averaged
if clear_readouts:
self._clear_readouts()
# count reported iterations
reported += 1
def _update_readouts(self):
readouts = self._machine_stats()
elapsed = time() - self._previous_readouts_ts
self._previous_readouts_ts = time()
for k, v in readouts.items():
# cumulative measurements
if k.endswith('_mbs'):
v = (v - self._previous_readouts.get(k, v)) / elapsed
self._readouts[k] = self._readouts.get(k, 0.0) + v
self._num_readouts += 1
self._previous_readouts = readouts
def _get_num_readouts(self):
return self._num_readouts
def _get_average_readouts(self):
average_readouts = dict((k, v/float(self._num_readouts)) for k, v in self._readouts.items())
return average_readouts
def _clear_readouts(self):
self._readouts = {}
self._num_readouts = 0
def _machine_stats(self):
"""
:return: machine stats dictionary, all values expressed in megabytes
"""
cpu_usage = [float(v) for v in psutil.cpu_percent(percpu=True)]
stats = {
"cpu_usage": sum(cpu_usage) / float(len(cpu_usage)),
}
bytes_per_megabyte = 1024 ** 2
def bytes_to_megabytes(x):
return x / bytes_per_megabyte
virtual_memory = psutil.virtual_memory()
# stats["memory_used_gb"] = bytes_to_megabytes(virtual_memory.used) / 1024
stats["memory_used_gb"] = bytes_to_megabytes(
self._get_process_used_memory() if self._process_info else virtual_memory.used) / 1024
stats["memory_free_gb"] = bytes_to_megabytes(virtual_memory.available) / 1024
disk_use_percentage = psutil.disk_usage(Text(Path.home())).percent
stats["disk_free_percent"] = 100.0-disk_use_percentage
with warnings.catch_warnings():
if logging.root.level > logging.DEBUG: # If the logging level is bigger than debug, ignore
# psutil.sensors_temperatures warnings
warnings.simplefilter("ignore", category=RuntimeWarning)
sensor_stat = (psutil.sensors_temperatures() if hasattr(psutil, "sensors_temperatures") else {})
if "coretemp" in sensor_stat and len(sensor_stat["coretemp"]):
stats["cpu_temperature"] = max([float(t.current) for t in sensor_stat["coretemp"]])
# update cached measurements
net_stats = psutil.net_io_counters()
stats["network_tx_mbs"] = bytes_to_megabytes(net_stats.bytes_sent)
stats["network_rx_mbs"] = bytes_to_megabytes(net_stats.bytes_recv)
io_stats = psutil.disk_io_counters()
stats["io_read_mbs"] = bytes_to_megabytes(io_stats.read_bytes)
stats["io_write_mbs"] = bytes_to_megabytes(io_stats.write_bytes)
# check if we can access the gpu statistics
if self._gpustat:
try:
stats.update(self._get_gpu_stats())
except Exception:
# something happened and we can't use gpu stats,
self._gpustat_fail += 1
if self._gpustat_fail >= 3:
self._task.get_logger().report_text('TRAINS Monitor: GPU monitoring failed getting GPU reading, '
'switching off GPU monitoring')
self._gpustat = None
return stats
def _check_logger_reported(self):
titles = self.get_logger_reported_titles(self._task)
return len(titles) > 0
@classmethod
def get_logger_reported_titles(cls, task):
titles = list(task.get_logger()._get_used_title_series().keys())
try:
titles.remove(cls._title_machine)
except ValueError:
pass
try:
titles.remove(cls._title_gpu)
except ValueError:
pass
return titles
def _get_process_used_memory(self):
def mem_usage_children(a_mem_size, pr, parent_mem=None):
self._last_process_id_list.append(pr.pid)
# add out memory usage
our_mem = pr.memory_info()
mem_diff = our_mem.rss - parent_mem.rss if parent_mem else our_mem.rss
a_mem_size += mem_diff if mem_diff > 0 else 0
# now we are the parent
for child in pr.children():
# get the current memory
m = pr.memory_info()
mem_diff = m.rss - our_mem.rss
a_mem_size += mem_diff if mem_diff > 0 else 0
a_mem_size = mem_usage_children(a_mem_size, child, parent_mem=m)
return a_mem_size
# only run the memory usage query once per reporting period
# because this memory query is relatively slow, and changes very little.
if self._last_process_pool.get('cpu') and \
(time() - self._last_process_pool['cpu'][0]) < self._report_frequency:
return self._last_process_pool['cpu'][1]
# if we have no parent process, return 0 (it's an error)
if not self._process_info:
return 0
self._last_process_id_list = []
mem_size = mem_usage_children(0, self._process_info)
self._last_process_pool['cpu'] = time(), mem_size
return mem_size
def _get_gpu_stats(self):
if not self._gpustat:
return {}
# per process memory query id slow, so we only call it once per reporting period,
# On the rest of the samples we return the previous memory measurement
# update mem used by our process and sub processes
if self._process_info and (not self._last_process_pool.get('gpu') or
(time() - self._last_process_pool['gpu'][0]) >= self._report_frequency):
gpu_stat = self._gpustat.new_query(per_process_stats=True)
gpu_mem = {}
for i, g in enumerate(gpu_stat.gpus):
gpu_mem[i] = 0
for p in g.processes:
if p['pid'] in self._last_process_id_list:
gpu_mem[i] += p.get('gpu_memory_usage', 0)
self._last_process_pool['gpu'] = time(), gpu_mem
else:
# if we do no need to update the memory usage, run global query
# if we have no parent process (backward compatibility), return global stats
gpu_stat = self._gpustat.new_query()
gpu_mem = self._last_process_pool['gpu'][1] if self._last_process_pool.get('gpu') else None
# generate the statistics dict for actual report
stats = {}
for i, g in enumerate(gpu_stat.gpus):
# only monitor the active gpu's, if none were selected, monitor everything
if self._active_gpus and i not in self._active_gpus:
continue
stats["gpu_%d_temperature" % i] = float(g["temperature.gpu"])
stats["gpu_%d_utilization" % i] = float(g["utilization.gpu"])
stats["gpu_%d_mem_usage" % i] = 100. * float(g["memory.used"]) / float(g["memory.total"])
# already in MBs
stats["gpu_%d_mem_free_gb" % i] = float(g["memory.total"] - g["memory.used"]) / 1024
# use previously sampled process gpu memory, or global if it does not exist
stats["gpu_%d_mem_used_gb" % i] = float(gpu_mem[i] if gpu_mem else g["memory.used"]) / 1024
return stats
|
duo.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2018 Nathan V
# https://github.com/nathan-v/aws_okta_keyman
"""All the Duo things."""
import sys
import time
from multiprocessing import Process
import requests
if sys.version_info[0] < 3: # pragma: no cover
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
else: # pragma: no cover
from http.server import HTTPServer, BaseHTTPRequestHandler
class PasscodeRequired(BaseException):
"""A 2FA Passcode Must Be Entered"""
def __init__(self, factor, state_token):
self.factor = factor
self.state_token = state_token
super(PasscodeRequired, self).__init__()
class FactorRequired(BaseException):
"""A 2FA Factor Must Be Entered"""
def __init__(self, factor, state_token):
self.factor = factor
self.state_token = state_token
super(FactorRequired, self).__init__()
class QuietHandler(BaseHTTPRequestHandler, object):
"""We have to do this HTTP sever silliness because the Duo widget has to be
presented over HTTP or HTTPS or the callback won't work.
"""
def __init__(self, html, *args):
self.html = html
super(QuietHandler, self).__init__(*args)
def log_message(self, _format, *args):
"""Mute the server log."""
def do_GET(self):
"""Handle the GET and displays the Duo iframe."""
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(self.html.encode('utf-8'))
class Duo:
"""Does all the background work needed to serve the Duo iframe."""
def __init__(self, details, state_token, socket, factor=None,):
self.socket = socket
self.details = details
self.token = state_token
self.factor = factor
self.html = None
self.session = requests.Session()
def trigger_web_duo(self):
"""Start the webserver with the data needed to display the Duo
iframe for the user to see.
"""
host = self.details['host']
sig = self.details['signature']
script = self.details['_links']['script']['href']
callback = self.details['_links']['complete']['href']
self.html = '''<p style="text-align:center">You may close this
after the next page loads successfully</p>
<iframe id="duo_iframe" style="margin: 0 auto;display:block;"
width="620" height="330" frameborder="0"></iframe>
<form method="POST" id="duo_form" action="{cb}">
<input type="hidden" name="stateToken" value="{tkn}" /></form>
<script src="{scr}"></script><script>Duo.init(
{{'host': '{hst}','sig_request': '{sig}','post_action': '{cb}'}}
);</script>'''.format(tkn=self.token, scr=script,
hst=host, sig=sig,
cb=callback)
proc = Process(target=self.duo_webserver)
proc.start()
time.sleep(10)
proc.terminate()
def duo_webserver(self):
"""HTTP webserver."""
httpd = HTTPServer(self.socket, self.handler_with_html)
httpd.serve_forever()
def handler_with_html(self, *args):
"""Call the handler and include the HTML."""
QuietHandler(self.html, *args)
def trigger_duo(self, passcode=""):
"""Try to get a Duo Push without needing an iframe
Args:
passcode: String passcode to pass along to the OTP factor
"""
sid = self.do_auth(None, None)
if self.factor == "call":
transaction_id = self.get_txid(sid, "Phone+Call")
elif self.factor == "passcode":
if passcode:
transaction_id = self.get_txid(sid, "Passcode", passcode)
else:
raise Exception("Cannot use passcode without one provided")
elif self.factor == "push":
transaction_id = self.get_txid(sid, "Duo+Push")
else:
raise Exception("Requested Duo factor not supported")
auth = self.get_status(transaction_id, sid)
return auth
def do_auth(self, sid, certs_url):
"""Handle initial auth with Duo
Args:
sid: String Duo session ID if we have it
certs_url: String certificates URL if we have it
Returns:
String Duo session ID
"""
txid = self.details['signature'].split(":")[0]
fake_path = 'http://0.0.0.0:3000/duo&v=2.1'
url = "https://{}/frame/web/v1/auth?tx={}&parent={}".format(
self.details['host'], txid, fake_path)
if sid and certs_url:
self.session.params = {sid: sid, certs_url: certs_url}
self.session.headers = {
'Origin': "https://{}".format(self.details['host']),
'Content-Type': "application/x-www-form-urlencoded"
}
ret = self.session.post(url, allow_redirects=False)
if ret.status_code == 302:
try:
location = ret.headers['Location']
sid = location.split("=")[1]
except KeyError:
raise Exception("Location missing from auth response header.")
elif ret.status_code == 200 and sid is None:
sid = ret.json()['response']['sid']
certs_url = ret.json()['response']['certs_url']
sid = self.do_auth(sid, certs_url)
else:
raise Exception("Duo request failed.")
return sid
def get_txid(self, sid, factor, passcode=None):
"""Get Duo transaction ID
Args:
sid: String Duo session ID
factor: String to tell Duo which factor to use
passcode: OTP passcode string
Returns:
String Duo transaction ID
"""
url = "https://{}/frame/prompt".format(self.details['host'])
self.session.headers = {
'Origin': "https://{}".format(self.details['host']),
'Content-Type': "application/x-www-form-urlencoded",
'X-Requested-With': 'XMLHttpRequest'
}
params = (
"sid={}&device=phone1&"
"factor={}&out_of_date=False").format(sid, factor)
if passcode:
params = "{}&passcode={}".format(params, passcode)
url = "{}?{}".format(url, params)
ret = self.session.post(url)
return ret.json()['response']['txid']
def get_status(self, transaction_id, sid):
"""Get Duo auth status
Args:
transaction_id: String Duo transaction ID
sid: String Duo session ID
Returns:
String authorization from Duo to use in the Okta callback
"""
url = "https://{}/frame/status".format(self.details['host'])
self.session.headers = {
'Origin': "https://{}".format(self.details['host']),
'Content-Type': "application/x-www-form-urlencoded",
'X-Requested-With': 'XMLHttpRequest'
}
params = "sid={}&txid={}".format(sid, transaction_id)
url = "{}?{}".format(url, params)
tries = 0
auth = None
while auth is None and tries < 30:
tries += 1
ret = self.session.post(url)
if ret.status_code != 200:
raise Exception("Push request failed with status {}".format(
ret.status_code))
result = ret.json()
if result['stat'] == "OK":
if 'cookie' in result['response']:
auth = result['response']['cookie']
elif 'result_url' in result['response']:
auth = self.do_redirect(
result['response']['result_url'], sid)
time.sleep(1)
if auth is None:
raise Exception('Did not get callback information from Duo')
return auth
def do_redirect(self, url, sid):
"""Deal with redirected response from Duo
Args:
url: String URL we need to follow to try and get the auth
sid: String duo session ID
Returns:
String Duo authorization to use in the Okta callback
"""
url = "https://{}{}?sid={}".format(self.details['host'], url, sid)
self.session.headers = {
'Origin': "https://{}".format(self.details['host']),
'Content-Type': "application/x-www-form-urlencoded",
'X-Requested-With': 'XMLHttpRequest'
}
ret = self.session.post(url)
if ret.status_code != 200:
raise Exception("Bad status from Duo after redirect {}".format(
ret.status_code))
result = ret.json()
if 'cookie' in result['response']:
return result['response']['cookie']
|
autologin.py
|
#!/usr/local/bin/python3
'''
Script to automatically log in to Telstra Air network. It opens Safari web
browser, fills the login form, clicks 'log in' and closes the browser when
all is done.
Arguments
- username
- password
'''
from argparse import ArgumentParser
from subprocess import getoutput
import re
from time import sleep
from threading import Thread
from urllib.request import urlopen
from urllib.error import URLError
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
'''
We need to kill
'/System/Library/CoreServices/Captive Network Assistant.app/Contents/MacOS/Captive Network Assistant'
(this pop-up window that appears on top of other windows every time when
you're connected to the network that you need to log in)as it blocks all
the network traffic (and is annoying).
Arguments:
- how_often - period in seconds between every kill execution
'''
def captive_portal_killer(how_often = 1):
while True:
print("Killing captive portal." ,getoutput('pkill "Captive Network Assistant"'))
sleep(how_often)
'''
Returns True if network connection is established.
'''
def connected_to_network():
try:
# If it is able to connect but only to Telstra Captive Portal that means
# there's no Internet connection.
if re.search(
'<title>(.*)</title>',
urlopen('https://www.freenom.world/en/index.html?lang=en').read().decode('utf-8')).group(1) != 'Freenom World':
return False
# UrlOpen throws this error when there's no Internet connection at all.
except URLError:
return False
return True
# If there is connection to outside world - there's no point to execute script.
if connected_to_network():
print("Network connection established. No need to execute script")
exit(0)
argument_parser = ArgumentParser()
argument_parser.add_argument('username',
action='store',
help='Telstra air user name.')
argument_parser.add_argument('password',
action='store',
help='Telstra air password.')
args = argument_parser.parse_args()
# Get Wi-fi name and run it only if it is Telstra Air.
if re.search(
'Current Wi-Fi Network: (.*)',
getoutput('/usr/sbin/networksetup -getairportnetwork en0')).group(1) == 'Telstra Air':
# Kill the Captive Portal. It blocks all the network traffic.
captive_killer_thread = Thread(target=captive_portal_killer)
captive_killer_thread.daemon = True
captive_killer_thread.start()
# Open browser.
driver = webdriver.Chrome()
driver.get('https://www.telstra.com.au/airconnect#fon')
# https://telstra.portal.fon.com/jcp/telstra?res=welcome&nasid=1C-C6-3C-4E-CB-13&uamip=192.168.182.1&uamport=3990&mac=AC-BC-32-9A-01-7D&challenge=e84088d73ad3937edd56caba310f12c2&ip=192.168.182.119&userurl=http%3A%2F%2Fgoogle.com%2F&cap=&lang=en_GB&LANGUAGE=en_GB
# https://www.telstra.com.au/airconnect#/main
# Wait till page is fully loaded ('Log in' button must be visible).
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.XPATH, '//*[@id="loginForm"]/div/p[2]/button')))
# Fill the form and log in.
driver.find_element_by_id('username').send_keys(args.username)
driver.find_element_by_id('password').send_keys(args.password)
driver.find_element_by_xpath('//*[@id="loginForm"]/div/p[2]/button').click()
# Wait for connect button to be visible
# WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.XPATH, '//*[@id="connectBtn"')))
sleep(5)
driver.find_element_by_xpath('//*[@id="connectBtn"]').click()
# Wait till next page is loaded ('Return to home' button must be visible).
WebDriverWait(driver, 60).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[2]/div[11]/div/a/form/input')))
# Quit the browser.
driver.quit()
|
test_pool.py
|
import heapq
import psycopg2 # Trigger import error if not installed.
import threading
import time
from peewee import *
from peewee import savepoint
from peewee import transaction
from playhouse.pool import *
from playhouse.tests.base import database_initializer
from playhouse.tests.base import PeeweeTestCase
class FakeTransaction(transaction):
def _add_history(self, message):
self.db.transaction_history.append(
'%s%s' % (message, self._conn))
def __enter__(self):
self._conn = self.db.get_conn()
self._add_history('O')
def commit(self, begin=True):
self._add_history('C')
def __exit__(self, exc_type, exc_val, exc_tb):
self._add_history('X')
class FakeDatabase(SqliteDatabase):
def __init__(self, *args, **kwargs):
self.counter = 0
self.closed_counter = 0
self.transaction_history = []
super(FakeDatabase, self).__init__(*args, **kwargs)
def _connect(self, *args, **kwargs):
"""
Return increasing integers instead of actual database connections.
"""
self.counter += 1
return self.counter
def _close(self, conn):
self.closed_counter += 1
def transaction(self):
return FakeTransaction(self)
class TestDB(PooledDatabase, FakeDatabase):
def __init__(self, *args, **kwargs):
super(TestDB, self).__init__(*args, **kwargs)
self.conn_key = lambda conn: conn
pooled_db = database_initializer.get_database(
'postgres',
db_class=PooledPostgresqlDatabase)
normal_db = database_initializer.get_database('postgres')
class Number(Model):
value = IntegerField()
class Meta:
database = pooled_db
class TestPooledDatabase(PeeweeTestCase):
def setUp(self):
super(TestPooledDatabase, self).setUp()
self.db = TestDB('testing')
def test_connection_pool(self):
# Ensure that a connection is created and accessible.
self.assertEqual(self.db.get_conn(), 1)
self.assertEqual(self.db.get_conn(), 1)
# Ensure that closing and reopening will return the same connection.
self.db.close()
self.db.connect()
self.assertEqual(self.db.get_conn(), 1)
def test_concurrent_connections(self):
db = TestDB('testing')
signal = threading.Event()
def open_conn():
db.connect()
signal.wait()
db.close()
# Simulate 5 concurrent connections.
threads = [threading.Thread(target=open_conn) for i in range(5)]
for thread in threads:
thread.start()
# Wait for all connections to be opened.
while db.counter < 5:
time.sleep(.01)
# Signal threads to close connections and join threads.
signal.set()
[t.join() for t in threads]
self.assertEqual(db.counter, 5)
self.assertEqual(db._in_use, {})
def test_max_conns(self):
for i in range(self.db.max_connections):
self.db.connect()
self.assertEqual(self.db.get_conn(), i + 1)
self.assertRaises(ValueError, self.db.connect)
def test_stale_timeout(self):
# Create a test database with a very short stale timeout.
db = TestDB('testing', stale_timeout=.01)
self.assertEqual(db.get_conn(), 1)
self.assertTrue(1 in db._in_use)
# Sleep long enough for the connection to be considered stale.
time.sleep(.01)
# When we close, since the conn is stale it won't be returned to
# the pool.
db.close()
self.assertEqual(db._in_use, {})
self.assertEqual(db._connections, [])
# A new connection will be returned.
self.assertEqual(db.get_conn(), 2)
def test_manual_close(self):
conn = self.db.get_conn()
self.assertEqual(conn, 1)
self.db.manual_close()
conn = self.db.get_conn()
self.assertEqual(conn, 2)
self.db.close()
conn = self.db.get_conn()
self.assertEqual(conn, 2)
def test_stale_timeout_cascade(self):
now = time.time()
db = TestDB('testing', stale_timeout=10)
conns = [
(now - 20, 1),
(now - 15, 2),
(now - 5, 3),
(now, 4),
]
for ts_conn in conns:
heapq.heappush(db._connections, ts_conn)
self.assertEqual(db.get_conn(), 3)
self.assertEqual(db._in_use, {3: now - 5})
self.assertEqual(db._connections, [(now, 4)])
def test_connect_cascade(self):
now = time.time()
db = TestDB('testing', stale_timeout=10)
conns = [
(now - 15, 1), # Skipped due to being stale.
(now - 5, 2), # In the 'closed' set.
(now - 3, 3),
(now, 4), # In the 'closed' set.
]
db._closed.add(2)
db._closed.add(4)
db.counter = 4 # The next connection we create will have id=5.
for ts_conn in conns:
heapq.heappush(db._connections, ts_conn)
# Conn 3 is not stale or closed, so we will get it.
self.assertEqual(db.get_conn(), 3)
self.assertEqual(db._in_use, {3: now - 3})
self.assertEqual(db._connections, [(now, 4)])
# Since conn 4 is closed, we will open a new conn.
db.connect()
self.assertEqual(db.get_conn(), 5)
self.assertEqual(sorted(db._in_use.keys()), [3, 5])
self.assertEqual(db._connections, [])
def test_execution_context(self):
self.assertEqual(self.db.get_conn(), 1)
with self.db.execution_context():
self.assertEqual(self.db.get_conn(), 2)
self.assertEqual(self.db.transaction_history, ['O2'])
self.assertEqual(self.db.get_conn(), 1)
self.assertEqual(self.db.transaction_history, ['O2', 'C2', 'X2'])
with self.db.execution_context(with_transaction=False):
self.assertEqual(self.db.get_conn(), 2)
self.assertEqual(self.db.transaction_history, ['O2', 'C2', 'X2'])
self.assertEqual(self.db.get_conn(), 1)
self.assertEqual(self.db.transaction_history, ['O2', 'C2', 'X2'])
self.assertEqual(len(self.db._connections), 1)
self.assertEqual(len(self.db._in_use), 1)
def test_execution_context_nested(self):
def assertInUse(n):
self.assertEqual(len(self.db._in_use), n)
def assertFree(n):
self.assertEqual(len(self.db._connections), n)
def assertHistory(history):
self.assertEqual(self.db.transaction_history, history)
@self.db.execution_context()
def subroutine():
pass
self.assertEqual(self.db.get_conn(), 1)
assertFree(0)
assertInUse(1)
with self.db.execution_context(False):
self.assertEqual(self.db.get_conn(), 2)
assertFree(0)
assertInUse(2)
assertHistory([])
with self.db.execution_context():
self.assertEqual(self.db.get_conn(), 3)
assertFree(0)
assertInUse(3)
assertHistory(['O3'])
subroutine()
assertFree(1)
assertInUse(3)
assertHistory(['O3', 'O4', 'C4', 'X4'])
assertFree(2)
assertInUse(2)
assertHistory(['O3', 'O4', 'C4', 'X4', 'C3', 'X3'])
# Since conn 3 has been returned to the pool, the subroutine
# will use conn3 this time.
subroutine()
assertFree(2)
assertInUse(2)
assertHistory(
['O3', 'O4', 'C4', 'X4', 'C3', 'X3', 'O3', 'C3', 'X3'])
self.assertEqual(self.db.get_conn(), 1)
assertFree(3)
assertInUse(1)
assertHistory(['O3', 'O4', 'C4', 'X4', 'C3', 'X3', 'O3', 'C3', 'X3'])
def test_execution_context_threads(self):
signal = threading.Event()
def create_context():
with self.db.execution_context():
signal.wait()
# Simulate 5 concurrent connections.
threads = [threading.Thread(target=create_context) for i in range(5)]
for thread in threads:
thread.start()
# Wait for all connections to be opened.
while len(self.db.transaction_history) < 5:
time.sleep(.01)
# Signal threads to close connections and join threads.
signal.set()
[t.join() for t in threads]
self.assertEqual(self.db.counter, 5)
self.assertEqual(len(self.db._connections), 5)
self.assertEqual(len(self.db._in_use), 0)
self.assertEqual(
self.db.transaction_history[:5],
['O1', 'O2', 'O3', 'O4', 'O5'])
rest = sorted(self.db.transaction_history[5:])
self.assertEqual(
rest,
['C1', 'C2', 'C3', 'C4', 'C5', 'X1', 'X2', 'X3', 'X4', 'X5'])
def test_execution_context_mixed_thread(self):
sig_sub = threading.Event()
sig_ctx = threading.Event()
sig_in_sub = threading.Event()
sig_in_ctx = threading.Event()
self.assertEqual(self.db.get_conn(), 1)
@self.db.execution_context()
def subroutine():
sig_in_sub.set()
sig_sub.wait()
def target():
with self.db.execution_context():
subroutine()
sig_in_ctx.set()
sig_ctx.wait()
t = threading.Thread(target=target)
t.start()
sig_in_sub.wait()
self.assertEqual(len(self.db._in_use), 3)
self.assertEqual(len(self.db._connections), 0)
self.assertEqual(self.db.transaction_history, ['O2', 'O3'])
sig_sub.set()
sig_in_ctx.wait()
self.assertEqual(len(self.db._in_use), 2)
self.assertEqual(len(self.db._connections), 1)
self.assertEqual(
self.db.transaction_history,
['O2', 'O3', 'C3', 'X3'])
sig_ctx.set()
t.join()
self.assertEqual(len(self.db._in_use), 1)
self.assertEqual(len(self.db._connections), 2)
self.assertEqual(
self.db.transaction_history,
['O2', 'O3', 'C3', 'X3', 'C2', 'X2'])
class TestConnectionPool(PeeweeTestCase):
def setUp(self):
super(TestConnectionPool, self).setUp()
# Use an un-pooled database to drop/create the table.
if Number._meta.db_table in normal_db.get_tables():
normal_db.drop_table(Number)
normal_db.create_table(Number)
def test_reuse_connection(self):
for i in range(5):
Number.create(value=i)
conn_id = id(pooled_db.get_conn())
pooled_db.close()
for i in range(5, 10):
Number.create(value=i)
self.assertEqual(id(pooled_db.get_conn()), conn_id)
self.assertEqual(
[x.value for x in Number.select().order_by(Number.id)],
list(range(10)))
def test_execution_context(self):
with pooled_db.execution_context():
Number.create(value=1)
with pooled_db.atomic() as sp:
self.assertTrue(isinstance(sp, savepoint))
Number.create(value=2)
sp.rollback()
with pooled_db.atomic() as sp:
self.assertTrue(isinstance(sp, savepoint))
Number.create(value=3)
with pooled_db.execution_context(with_transaction=False):
with pooled_db.atomic() as txn:
self.assertTrue(isinstance(txn, transaction))
Number.create(value=4)
# Executed in autocommit mode.
Number.create(value=5)
with pooled_db.execution_context():
numbers = [
number.value
for number in Number.select().order_by(Number.value)]
self.assertEqual(numbers, [1, 3, 4, 5])
|
tools.py
|
import sys
import cmd
import csv
import threading
import code
import pprint
try:
from Queue import Queue, Empty
except ImportError:
from queue import Queue, Empty
import click
import pkg_resources
from glypy.structure.glycan_composition import HashableGlycanComposition
from glycopeptidepy.io import fasta, uniprot
from glycopeptidepy.structure.residue import UnknownAminoAcidException
from .base import cli
from .validators import RelativeMassErrorParam, get_by_name_or_id
from glycan_profiling.serialize import (
DatabaseBoundOperation, GlycanHypothesis, GlycopeptideHypothesis,
SampleRun, Analysis, Protein, Glycopeptide, func, FileBlob)
from glycan_profiling.database import (
GlycopeptideDiskBackedStructureDatabase,
GlycanCompositionDiskBackedStructureDatabase)
from glycan_profiling.database.builder.glycopeptide.proteomics import mzid_proteome
from glycan_profiling.database.builder.glycopeptide.proteomics.uniprot import UniprotProteinDownloader
@cli.group("tools", short_help="Odds and ends to help inspect data and diagnose issues")
def tools():
pass
def database_connection(fn):
arg = click.argument("database-connection", doc_help=(
"A connection URI for a database, or a path on the file system"),
type=DatabaseBoundOperation)
return arg(fn)
def hypothesis_identifier(hypothesis_type):
def wrapper(fn):
arg = click.argument("hypothesis-identifier", doc_help=(
"The ID number or name of the %s hypothesis to use" % (hypothesis_type,)))
return arg(fn)
return wrapper
def analysis_identifier(fn):
arg = click.argument("analysis-identifier", doc_help=(
"The ID number or name of the analysis to use"))
return arg(fn)
@tools.command("list", short_help='List names and ids of collections in the database')
@click.pass_context
@database_connection
def list_contents(context, database_connection):
click.echo("Glycan Hypothesis")
for hypothesis in database_connection.query(GlycanHypothesis):
click.echo("\t%d\t%s\t%s" % (hypothesis.id, hypothesis.name, hypothesis.uuid))
click.echo("\nGlycopeptide Hypothesis")
for hypothesis in database_connection.query(GlycopeptideHypothesis):
click.echo("\t%d\t%s\t%s\t%d" % (hypothesis.id, hypothesis.name, hypothesis.uuid,
hypothesis.glycan_hypothesis.id))
click.echo("\nSample Run")
for run in database_connection.query(SampleRun):
click.echo("\t%d\t%s\t%s" % (run.id, run.name, run.uuid))
click.echo("\nAnalysis")
for analysis in database_connection.query(Analysis):
click.echo("\t%d\t%s\t%s" % (analysis.id, analysis.name, analysis.sample_run.name))
click.echo("\nFile Blobs")
for blob in database_connection.query(FileBlob):
click.echo("\t%d\t%s" % (blob.id, blob.name))
@tools.command('mzid-proteins', short_help='Extract proteins from mzIdentML files')
@click.argument("mzid-file")
def list_protein_names(mzid_file):
for name in mzid_proteome.protein_names(mzid_file):
click.echo(name)
class SQLShellInterpreter(cmd.Cmd):
prompt = '[sql-shell] '
def __init__(self, session, *args, **kwargs):
self.session = session
cmd.Cmd.__init__(self, *args, **kwargs)
def postcmd(self, stop, line):
if line == "quit":
return True
return False
def precmd(self, line):
tokens = line.split(" ")
tokens[0] = tokens[0].lower()
return ' '.join(tokens)
def _print_table(self, result):
rows = list(result)
if len(rows) == 0:
return
sizes = [0] * len(rows[0])
titles = rows[0].keys()
str_rows = []
for row in rows:
str_row = []
for i, col in enumerate(row):
col = repr(col)
col_len = len(col)
if sizes[i] < col_len:
sizes[i] = col_len
str_row.append(col)
str_rows.append(str_row)
print(" | ".join(titles))
for row in str_rows:
print(' | '.join(row))
def _to_csv(self, rows, fh):
titles = rows[0].keys()
writer = csv.writer(fh)
writer.writerow(titles)
writer.writerows(rows)
def do_export(self, line):
try:
fname, line = line.rsplit(";", 1)
if len(fname.strip()) == 0 or len(line.strip()) == 0:
raise ValueError()
try:
result = self.session.execute("select " + line)
except Exception as e:
print(str(e))
self.session.rollback()
return
try:
rows = list(result)
print("%d rows selected; Writing to %r" % (len(rows), fname))
with open(fname, 'wb') as handle:
self._to_csv(rows, handle)
except Exception as e:
print(str(e))
except ValueError:
print("Could not determine the file name to export to.")
print("Usage: export <filename>; <query>")
def do_execsql(self, line):
try:
result = self.session.execute(line)
self._print_table(result)
except Exception as e:
print(str(e))
self.session.rollback()
return False
def do_explain(self, line):
try:
result = self.session.execute("explain " + line)
self._print_table(result)
except Exception as e:
print(str(e))
self.session.rollback()
return False
def do_select(self, line):
try:
result = self.session.execute("select " + line)
self._print_table(result)
except Exception as e:
print(str(e))
self.session.rollback()
return False
def do_quit(self, line):
return True
@tools.command("sql-shell",
short_help=("A minimal SQL command shell for running "
"diagnostics and exporting arbitrary data."))
@click.argument("database-connection")
@click.option("-s", "--script", default=None)
def sql_shell(database_connection, script=None):
db = DatabaseBoundOperation(database_connection)
session = db.session() # pylint: disable=not-callable
interpreter = SQLShellInterpreter(session)
if script is None:
interpreter.cmdloop()
else:
result = session.execute(script)
interpreter._to_csv(list(result), sys.stdout)
@tools.command("validate-fasta", short_help="Validates a FASTA file, checking a few errors.")
@click.argument("path")
def validate_fasta(path):
with open(path, "r") as handle:
n_deflines = 0
for line in handle:
if line.startswith(">"):
n_deflines += 1
with open(path, 'r') as handle:
n_entries = 0
for entry in fasta.FastaFileParser(handle):
n_entries += 1
if n_entries != n_deflines:
click.secho("%d\">\" prefixed lines found, but %d entries parsed" % (n_deflines, n_entries))
else:
click.echo("%d Protein Sequences" % (n_entries, ))
n_glycoprots = 0
o_glycoprots = 0
with open(path, 'r') as handle:
invalid_sequences = []
for entry in fasta.FastaFileParser(handle):
try:
seq = fasta.ProteinSequence(entry['name'], entry['sequence'])
n_glycoprots += bool(seq.n_glycan_sequon_sites)
o_glycoprots += bool(seq.o_glycan_sequon_sites)
except UnknownAminoAcidException as e:
invalid_sequences.append((entry['name'], e))
click.echo("Proteins with N-Glycosites: %d" % n_glycoprots)
for name, error in invalid_sequences:
click.secho("%s had %s" % (name, error), fg='yellow')
@tools.command("validate-glycan-text", short_help="Validates a text file of glycan compositions")
@click.argument("path")
def validate_glycan_text(path):
from glycan_profiling.database.builder.glycan.glycan_source import TextFileGlycanCompositionLoader
with open(path, 'r') as handle:
loader = TextFileGlycanCompositionLoader(handle)
n = 0
glycan_classes = set()
residues = set()
unresolved = set()
for line in loader:
n += 1
glycan_classes.update(line[1])
glycan_composition = HashableGlycanComposition.parse(line[0])
for residue in glycan_composition.keys():
if residue.mass() == 0:
unresolved.add(residue)
residues.add(residue)
click.secho("%d glycan compositions" % (n,))
click.secho("Residues:")
for residue in residues:
click.secho("\t%s - %f" % (str(residue), residue.mass()))
if unresolved:
click.secho("Unresolved Residues:", fg='yellow')
click.secho("\n".join(str(r) for r in unresolved), fg='yellow')
def has_known_glycosylation(accession):
try:
prot = uniprot.get(accession)
if "Glycoprotein" in prot.keywords:
return True
else:
# for feature in prot.features:
# if isinstance(feature, uniprot.GlycosylationSite):
# return True
pass
return False
except Exception:
return False
@tools.command("known-glycoproteins", short_help=(
'Checks UniProt to see if a list of proteins contains any known glycoproteins'))
@click.option("-i", "--file-path", help="Read input from a file instead of STDIN")
@click.option("-f", "--fasta-format", is_flag=True, help="Indicate input is in FASTA format")
@click.option("-o", "--output-path", help="Write output to a file instead of STDOUT")
def known_uniprot_glycoprotein(file_path=None, output_path=None, fasta_format=False):
if file_path is not None:
handle = open(file_path)
else:
handle = sys.stdin
if fasta_format:
reader = fasta.ProteinFastaFileParser(handle)
def name_getter(x):
return x.name.accession
else:
reader = handle
def name_getter(x):
header = fasta.default_parser(x)
try:
return header.accession
except Exception:
return header[0]
if output_path is None:
outhandle = sys.stdout
else:
outhandle = open(output_path, 'w')
def checker_task(inqueue, outqueue, no_more_event):
has_work = True
while has_work:
try:
protein = inqueue.get(True, 1)
except Empty:
if no_more_event.is_set():
has_work = False
continue
try:
if has_known_glycosylation(name_getter(protein)):
outqueue.put(protein)
except Exception as e:
click.secho("%r occurred for %s" % (e, protein), err=True, fg='yellow')
def consumer_task(outqueue, no_more_event):
has_work = True
if fasta_format:
writer = fasta.ProteinFastaFileWriter(outhandle)
write_fn = writer.write
else:
def write_fn(payload):
outhandle.write(str(payload).strip() + '\n')
while has_work:
try:
protein = outqueue.get(True, 1)
except Empty:
if no_more_event.is_set():
has_work = False
continue
write_fn(protein)
outhandle.close()
producer_done = threading.Event()
checker_done = threading.Event()
inqueue = Queue()
outqueue = Queue()
n_threads = 10
checkers = [threading.Thread(
target=checker_task, args=(inqueue, outqueue, producer_done)) for i in range(n_threads)]
for check in checkers:
check.start()
consumer = threading.Thread(target=consumer_task, args=(outqueue, checker_done))
consumer.start()
for protein in reader:
inqueue.put(protein)
producer_done.set()
for checker in checkers:
checker.join()
checker_done.set()
consumer.join()
@tools.command("download-uniprot", short_help=(
"Downloads a list of proteins from UniProt"))
@click.option("-i", "--name-file-path", help="Read input from a file instead of STDIN")
@click.option("-o", "--output-path", help="Write output to a file instead of STDOUT")
def download_uniprot(name_file_path=None, output_path=None):
if name_file_path is not None:
handle = open(name_file_path)
else:
handle = sys.stdin
def name_getter(x):
header = fasta.default_parser(x)
try:
return header.accession
except Exception:
return header[0]
accession_list = []
for line in handle:
accession_list.append(name_getter(line))
if output_path is None:
outhandle = sys.stdout
else:
outhandle = open(output_path, 'w')
writer = fasta.FastaFileWriter(outhandle)
downloader = UniprotProteinDownloader(accession_list, 10)
downloader.start()
has_work = True
def make_protein(accession, uniprot_protein):
sequence = uniprot_protein.sequence
name = "sp|{accession}|{gene_name} {description}".format(
accession=accession, gene_name=uniprot_protein.gene_name,
description=uniprot_protein.recommended_name)
return name, sequence
while has_work:
try:
accession, value = downloader.get(True, 3)
if isinstance(value, Exception):
click.echo("Could not retrieve %s - %r" % (accession, value), err=True)
else:
writer.write(*make_protein(accession, value))
except Empty:
# If we haven't completed the download process, block
# now and wait for the threads to join, then continue
# trying to fetch results
if not downloader.done_event.is_set():
downloader.join()
continue
# Otherwise we've already waited for all the results to
# arrive and we can stop iterating
else:
has_work = False
@tools.command("mass-search")
@click.option("-p", "--glycopeptide", is_flag=True)
@click.option("-m", "--error-tolerance", type=RelativeMassErrorParam(), default=1e-5)
@click.argument("database-connection")
@click.argument("hypothesis-identifier")
@click.argument("target-mass", type=float)
def mass_search(database_connection, hypothesis_identifier, target_mass, glycopeptide=False, error_tolerance=1e-5):
if glycopeptide:
handle = GlycopeptideDiskBackedStructureDatabase(database_connection, int(hypothesis_identifier))
else:
handle = GlycanCompositionDiskBackedStructureDatabase(database_connection, int(hypothesis_identifier))
width = (target_mass * error_tolerance)
click.secho("Mass Window: %f-%f" % (target_mass - width, target_mass + width), fg='yellow')
hits = list(handle.search_mass_ppm(target_mass, error_tolerance))
if not hits:
click.secho("No Matches", fg='red')
for hit in hits:
click.echo("\t".join(map(str, hit)))
@tools.command("version-check")
def version_check():
packages = [
"glycan_profiling",
"glycresoft_app",
"glypy",
"glycopeptidepy",
"ms_peak_picker",
"brain-isotopic-distribution",
"ms_deisotope",
"pyteomics",
"lxml",
"numpy",
"scipy",
"matplotlib"
]
click.secho("Library Versions", fg='yellow')
for dep in packages:
try:
rev = pkg_resources.require(dep)
click.echo(str(rev[0]))
except Exception:
try:
module = __import__(dep)
except ImportError:
continue
version = getattr(module, "__version__", None)
if version is None:
version = getattr(module, "version", None)
if version is None:
try:
module = __import__("%s.version" % dep).version
version = module.version
except ImportError:
continue
if version:
click.echo("%s %s" % (dep, version))
@tools.command("interactive-shell")
@click.option("-s", "--script", default=None)
def interactive_shell(script):
if script:
with open(script, 'rt') as fh:
script = fh.read()
exec(script)
code.interact()
@tools.command("update-analysis-parameters")
@database_connection
@analysis_identifier
@click.option("-p", "--parameter", nargs=2, multiple=True, required=False)
def update_analysis_parameters(database_connection, analysis_identifier, parameter):
session = database_connection.session
analysis = get_by_name_or_id(session, Analysis, analysis_identifier)
click.echo("Current Parameters:")
pprint.pprint(analysis.parameters)
for name, value in parameter:
analysis.parameters[name] = value
session.add(analysis)
session.commit()
@tools.command("summarize-glycopeptide-hypothesis")
@database_connection
@hypothesis_identifier(GlycopeptideHypothesis)
def summarize_glycopeptide_hypothesis(database_connection, hypothesis_identifier):
session = database_connection.session
hypothesis = get_by_name_or_id(session, GlycopeptideHypothesis, hypothesis_identifier)
counts = session.query(Protein, func.count(Glycopeptide.id)).join(
Glycopeptide).group_by(Protein.id).filter(Protein.hypothesis_id == hypothesis.id).all()
counts = sorted(counts, key=lambda x: x[1], reverse=1)
total = 0
for protein, count in counts:
click.echo("%s: %d" % (protein.name, count))
total += count
click.echo("Total: %d" % (total,))
@tools.command("extract-blob")
@database_connection
@click.argument("blob-identifier")
@click.option("-o", "--output-path", type=click.File(mode='w'))
def extract_file_blob(database_connection, blob_identifier, output_path=None):
if output_path is None:
output_path = click.get_binary_stream('stdout')
session = database_connection.session
blob = get_by_name_or_id(session, FileBlob, blob_identifier)
with blob.open() as fh:
chunk_size = 2 ** 16
chunk = fh.read(chunk_size)
while chunk:
output_path.write(chunk)
chunk = fh.read(chunk_size)
@tools.command("csv-concat")
@click.argument("csv-paths", type=click.File(mode='rt'), nargs=-1)
@click.option("-o", "--output-path", type=click.Path(writable=True), help="Path to write output to")
def csv_concat(csv_paths, output_path=None):
if output_path is None:
output_path = '-'
import csv
headers = None
with click.open_file(output_path, mode='wt') as outfh:
writer = csv.writer(outfh)
for infh in csv_paths:
reader = csv.reader(infh)
_header_line = next(reader)
if headers is None:
headers = _header_line
writer.writerow(headers)
elif _header_line != headers:
raise click.ClickException("File %s did not have matching headers (%s != %s)" % (
infh.name, _header_line, headers))
for row in reader:
writer.writerow(row)
infh.close()
outfh.flush()
|
cli.py
|
# -*- coding: utf-8 -*-
import configparser
import random
import sys
import time
from pathlib import Path
from threading import Thread
import click
import requests
from . import __version__
from .controllers import Cache, CastState, StateFileError, StateMode, get_chromecast, get_chromecasts, setup_cast
from .error import CastError, CattUserError, CliError, SubsEncodingError
from .http_server import serve_file
from .util import convert_srt_to_webvtt, echo_json, human_time, hunt_subtitle, is_ipaddress, read_srt_subs, warning
CONFIG_DIR = Path(click.get_app_dir("catt"))
CONFIG_PATH = Path(CONFIG_DIR, "catt.cfg")
STATE_PATH = Path(CONFIG_DIR, "state.json")
class CattTimeParamType(click.ParamType):
def convert(self, value, param, ctx):
try:
tdesc = [int(x) for x in value.split(":")]
tlen = len(tdesc)
if (tlen > 1 and any(t > 59 for t in tdesc)) or tlen > 3:
raise ValueError
except ValueError:
self.fail("%s is not a valid time description." % value, param, ctx)
tdesc.reverse()
return sum(tdesc[p] * 60 ** p for p in range(tlen))
CATT_TIME = CattTimeParamType()
class YtdlOptParamType(click.ParamType):
def convert(self, value, param, ctx):
if "=" not in value:
self.fail("%s is not a valid key/value pair." % value, param, ctx)
ykey, yval = value.split("=", 1)
yval = {"true": True, "false": False}.get(yval.lower().strip(), yval)
return (ykey, yval)
YTDL_OPT = YtdlOptParamType()
def process_url(ctx, param, value):
if value == "-":
stdin_text = click.get_text_stream("stdin")
if not stdin_text.isatty():
value = stdin_text.read().strip()
else:
raise CliError("No input received from stdin")
if "://" not in value:
if ctx.info_name != "cast":
raise CliError("Local file not allowed as argument to this command")
if not Path(value).is_file():
raise CliError("The chosen file does not exist")
return value
def process_path(ctx, param, value):
path = Path(value) if value else None
if path and (path.is_dir() or not path.parent.exists()):
raise CliError("The specified path is invalid")
return path
def process_device(ctx, param, value):
"""
Resolve real device name when value is an alias.
:param value: Can be an ip-address or a name (alias or real name).
:type value: str
"""
if is_ipaddress(value):
return value
else:
return ctx.default_map["aliases"].get(value, value)
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.group(context_settings=CONTEXT_SETTINGS)
@click.option("--delete-cache", is_flag=True, help="Empty the Chromecast discovery cache.")
@click.option("-d", "--device", metavar="NAME_OR_IP", callback=process_device, help="Select Chromecast device.")
@click.version_option(version=__version__, prog_name="catt", message="%(prog)s v%(version)s, Winter Waterfall.")
@click.pass_context
def cli(ctx, delete_cache, device):
if delete_cache:
Cache().clear()
ctx.obj["device"] = device
@cli.command("write_config", short_help="Write the name of default Chromecast device to config file.")
@click.pass_obj
def write_config(settings):
if settings.get("device"):
# This is so we fail if the specified Chromecast cannot be found.
get_chromecast(settings["device"])
writeconfig(settings)
else:
raise CliError("No device specified")
def load_subtitle_if_exists(subtitle, video, local_ip, port):
subtitle = subtitle if subtitle else hunt_subtitle(video)
if subtitle is None:
return None
click.echo("Using subtitle {}".format(subtitle))
if "://" in subtitle:
if subtitle.lower().endswith(".srt"):
content = requests.get(subtitle).text
subtitle = convert_srt_to_webvtt(content)
else:
return subtitle
if subtitle.lower().endswith(".srt"):
try:
content = read_srt_subs(subtitle)
except SubsEncodingError:
raise CliError("Could not find the proper encoding of {}. Please convert it to utf-8.".format(subtitle))
subtitle = convert_srt_to_webvtt(content)
thr = Thread(target=serve_file, args=(subtitle, local_ip, port, "text/vtt;charset=utf-8"))
thr.setDaemon(True)
thr.start()
subtitle_url = "http://{}:{}/{}".format(local_ip, port, subtitle)
return subtitle_url
def process_subtitle(ctx, param, value):
if value is None:
return None
if "://" in value:
return value
if not Path(value).is_file():
raise CliError("Subtitle file [{}] does not exist".format(value))
return value
@cli.command(short_help="Send a video to a Chromecast for playing.")
@click.argument("video_url", callback=process_url)
@click.option("-s", "--subtitle", callback=process_subtitle, metavar="SUB", help="Specify a subtitle.")
@click.option(
"-f",
"--force-default",
is_flag=True,
help="Force use of the default Chromecast app (use if a custom app doesn't work).",
)
@click.option("-r", "--random-play", is_flag=True, help="Play random item from playlist, if applicable.")
@click.option(
"--no-subs", is_flag=True, default=False, help="Don't try to load subtitles automatically from the local folder."
)
@click.option("-n", "--no-playlist", is_flag=True, help="Play only video, if url contains both video and playlist ids.")
@click.option(
"-y",
"--ytdl-option",
type=YTDL_OPT,
multiple=True,
metavar="OPT",
help="YouTube-DL option. "
"Should be passed as `-y option=value`, and can be specified multiple times (implies --force-default).",
)
@click.pass_obj
def cast(settings, video_url, subtitle, force_default, random_play, no_subs, no_playlist, ytdl_option):
controller = "default" if force_default or ytdl_option else None
subtitle_url = None
playlist_playback = False
cst, stream = setup_cast(
settings["device"], video_url=video_url, prep="app", controller=controller, ytdl_options=ytdl_option
)
if stream.is_local_file:
if subtitle or not no_subs:
subtitle_url = load_subtitle_if_exists(subtitle, video_url, stream.local_ip, stream.port + 1)
thr = Thread(target=serve_file, args=(video_url, stream.local_ip, stream.port, stream.guessed_content_type))
thr.setDaemon(True)
thr.start()
elif stream.is_playlist and not (no_playlist and stream.video_id):
if stream.playlist_length == 0:
cst.kill(idle_only=True)
raise CliError("Playlist is empty")
if not random_play and cst.playlist_capability and stream.playlist_all_ids:
playlist_playback = True
else:
if random_play:
entry = random.randrange(0, stream.playlist_length)
else:
warning("Playlist playback not possible, playing first video")
entry = 0
stream.set_playlist_entry(entry)
if playlist_playback:
click.echo("Casting remote playlist %s..." % video_url)
video_id = stream.video_id or stream.playlist_all_ids[0]
cst.play_playlist(stream.playlist_id, video_id=video_id)
else:
click.echo("Casting %s file %s..." % ("local" if stream.is_local_file else "remote", video_url))
click.echo('Playing "%s" on "%s"...' % (stream.video_title, cst.cc_name))
if cst.info_type == "url":
cst.play_media_url(
stream.video_url,
title=stream.video_title,
content_type=stream.guessed_content_type,
subtitles=subtitle_url,
thumb=stream.video_thumbnail,
)
elif cst.info_type == "id":
cst.play_media_id(stream.video_id)
else:
raise ValueError("Invalid or undefined info type")
if stream.is_local_file:
click.echo("Serving local file, press Ctrl+C when done.")
while thr.is_alive():
time.sleep(1)
@cli.command("cast_site", short_help="Cast any website to a Chromecast.")
@click.argument("url", callback=process_url)
@click.pass_obj
def cast_site(settings, url):
cst = setup_cast(settings["device"], controller="dashcast", action="load_url", prep="app")
click.echo('Casting %s on "%s"...' % (url, cst.cc_name))
cst.load_url(url)
@cli.command(short_help="Add a video to the queue (YouTube only).")
@click.argument("video_url", callback=process_url)
@click.option("-n", "--play-next", is_flag=True, help="Add video immediately after currently playing video.")
@click.pass_obj
def add(settings, video_url, play_next):
cst, stream = setup_cast(settings["device"], video_url=video_url, action="add", prep="control")
if cst.name != stream.extractor or not (stream.is_remote_file or stream.is_playlist_with_active_entry):
raise CliError("This url cannot be added to the queue")
click.echo('Adding video id "%s" to the queue.' % stream.video_id)
if play_next:
cst.add_next(stream.video_id)
else:
cst.add(stream.video_id)
@cli.command(short_help="Remove a video from the queue (YouTube only).")
@click.argument("video_url", callback=process_url)
@click.pass_obj
def remove(settings, video_url):
cst, stream = setup_cast(settings["device"], video_url=video_url, action="remove", prep="control")
if cst.name != stream.extractor or not stream.is_remote_file:
raise CliError("This url cannot be removed from the queue")
click.echo('Removing video id "%s" from the queue.' % stream.video_id)
cst.remove(stream.video_id)
@cli.command(short_help="Clear the queue (YouTube only).")
@click.pass_obj
def clear(settings):
cst = setup_cast(settings["device"], action="clear", prep="control")
cst.clear()
@cli.command(short_help="Pause a video.")
@click.pass_obj
def pause(settings):
cst = setup_cast(settings["device"], action="pause", prep="control")
cst.pause()
@cli.command(short_help="Resume a video after it has been paused.")
@click.pass_obj
def play(settings):
cst = setup_cast(settings["device"], action="play", prep="control")
cst.play()
@cli.command(short_help="Stop playing.")
@click.pass_obj
def stop(settings):
cst = setup_cast(settings["device"])
cst.kill()
@cli.command(short_help="Rewind a video by TIME duration.")
@click.argument("timedesc", type=CATT_TIME, required=False, default="30", metavar="TIME")
@click.pass_obj
def rewind(settings, timedesc):
cst = setup_cast(settings["device"], action="rewind", prep="control")
cst.rewind(timedesc)
@cli.command(short_help="Fastforward a video by TIME duration.")
@click.argument("timedesc", type=CATT_TIME, required=False, default="30", metavar="TIME")
@click.pass_obj
def ffwd(settings, timedesc):
cst = setup_cast(settings["device"], action="ffwd", prep="control")
cst.ffwd(timedesc)
@cli.command(short_help="Seek the video to TIME position.")
@click.argument("timedesc", type=CATT_TIME, metavar="TIME")
@click.pass_obj
def seek(settings, timedesc):
cst = setup_cast(settings["device"], action="seek", prep="control")
cst.seek(timedesc)
@cli.command(short_help="Skip to end of content.")
@click.pass_obj
def skip(settings):
cst = setup_cast(settings["device"], action="skip", prep="control")
cst.skip()
@cli.command(short_help="Set the volume to LVL [0-100].")
@click.argument("level", type=click.IntRange(0, 100), metavar="LVL")
@click.pass_obj
def volume(settings, level):
cst = setup_cast(settings["device"])
cst.volume(level / 100.0)
@cli.command(short_help="Turn up volume by a DELTA increment.")
@click.argument("delta", type=click.IntRange(1, 100), required=False, default=10, metavar="DELTA")
@click.pass_obj
def volumeup(settings, delta):
cst = setup_cast(settings["device"])
cst.volumeup(delta / 100.0)
@cli.command(short_help="Turn down volume by a DELTA increment.")
@click.argument("delta", type=click.IntRange(1, 100), required=False, default=10, metavar="DELTA")
@click.pass_obj
def volumedown(settings, delta):
cst = setup_cast(settings["device"])
cst.volumedown(delta / 100.0)
@cli.command(short_help="Show some information about the currently-playing video.")
@click.pass_obj
def status(settings):
cst = setup_cast(settings["device"], prep="info")
print_status(cst.cast_info)
@cli.command(short_help="Show complete information about the currently-playing video.")
@click.option("-j", "--json-output", is_flag=True, help="Output info as json.")
@click.pass_obj
def info(settings, json_output):
try:
cst = setup_cast(settings["device"], prep="info")
except CastError:
if json_output:
info = {}
else:
raise
else:
info = cst.info
if json_output:
echo_json(info)
else:
for (key, value) in info.items():
click.echo("%s: %s" % (key, value))
@cli.command(short_help="Scan the local network and show all Chromecasts and their IPs.")
@click.option("-j", "--json-output", is_flag=True, help="Output scan result as json.")
def scan(json_output):
if not json_output:
click.echo("Scanning Chromecasts...")
devices_dict = {
d.name: {
"host": d.host,
"port": d.port,
"manufacturer": d.device.manufacturer,
"model_name": d.model_name,
"uuid": d.uuid,
"cast_type": d.cast_type,
}
for d in get_chromecasts()
}
if json_output:
echo_json(devices_dict)
else:
if not devices_dict:
raise CastError("No devices found")
for device in devices_dict.keys():
click.echo("{host} - {device} - {manufacturer} {model_name}".format(device=device, **devices_dict[device]))
@cli.command(short_help="Save the current state of the Chromecast for later use.")
@click.argument("path", type=click.Path(writable=True), callback=process_path, required=False)
@click.pass_obj
def save(settings, path):
cst = setup_cast(settings["device"], prep="control")
if not cst.save_capability or cst.is_streaming_local_file:
raise CliError("Saving state of this kind of content is not supported")
elif cst.save_capability == "partial":
warning("Please be advised that playlist data will not be saved")
print_status(cst.media_info)
if path and path.is_file():
click.confirm("File already exists. Overwrite?", abort=True)
click.echo("Saving...")
if path:
state = CastState(path, StateMode.ARBI)
cc_name = "*"
else:
state = CastState(STATE_PATH, StateMode.CONF)
cc_name = cst.cc_name
state.set_data(cc_name, {"controller": cst.name, "data": cst.media_info})
@cli.command(short_help="Return Chromecast to saved state.")
@click.argument("path", type=click.Path(exists=True), callback=process_path, required=False)
@click.pass_obj
def restore(settings, path):
if not path and not STATE_PATH.is_file():
raise CliError("Save file in config dir has not been created")
cst = setup_cast(settings["device"])
state = CastState(path or STATE_PATH, StateMode.READ)
try:
data = state.get_data(cst.cc_name if not path else None)
except StateFileError:
raise CliError("The chosen file is not a valid save file")
if not data:
raise CliError("No save data found for this device")
print_status(data["data"])
click.echo("Restoring...")
cst = setup_cast(settings["device"], prep="app", controller=data["controller"])
cst.restore(data["data"])
def print_status(status):
if status.get("title"):
click.echo("Title: %s" % status["title"])
if status.get("current_time"):
current = human_time(status["current_time"])
if status.get("duration"):
duration = human_time(status["duration"])
remaining = human_time(status["remaining"])
click.echo("Time: %s / %s (%s%%)" % (current, duration, status["progress"]))
click.echo("Remaining time: %s" % remaining)
else:
click.echo("Time: %s" % current)
if status.get("player_state"):
click.echo("State: %s" % status["player_state"])
if status.get("volume_level"):
click.echo("Volume: %s" % status["volume_level"])
def writeconfig(settings):
try:
CONFIG_DIR.mkdir()
except FileExistsError:
pass
# Put all the standalone options from the settings into an "options" key.
old_conf = readconfig()
conf = {"options": settings}
conf["aliases"] = old_conf["aliases"]
# Convert the conf dict into a ConfigParser instance.
config = configparser.ConfigParser()
for section, options in conf.items():
config.add_section(section)
for option, value in options.items():
config.set(section, option, value)
with CONFIG_PATH.open("w") as configfile:
config.write(configfile)
def readconfig():
"""
Read the configuration from the config file.
Returns a dictionary of the form:
{"option": "value",
"aliases": {"device1": "device_name"}}
"""
config = configparser.ConfigParser()
# ConfigParser.read does not take path-like objects <3.6.
config.read(str(CONFIG_PATH))
conf_dict = {section: dict(config.items(section)) for section in config.sections()}
conf = conf_dict.get("options", {})
conf["aliases"] = conf_dict.get("aliases", {})
return conf
def main():
try:
return cli(obj={}, default_map=readconfig())
except CattUserError as err:
sys.exit("Error: {}.".format(str(err)))
if __name__ == "__main__":
main()
|
Dkbot7.py
|
#Eww -*- coding: utf-8 -*-
import LINETCR
import requests
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
#import time,random,sys,json,codecs,threading,glob,urllib,urllib2
import time,random,sys,json,codecs,threading,glob,urllib,urllib2,urllib3,re,subprocess
cl = LINETCR.LINE()
#cl.login(token="EyW8mMdmmgnYBHQdlmp0.9q2iIcfJltjghJCvoSD1Wa.uWZ+XbAkBO6TPSHaAgpkfWnsGt7sE54yCUc74sXW5Ow=")
cl.login(token="EzoiNPFxJqCj4vqVcvLe.+r+jAIdxZWQoBO38ex8YVG.03Kn17fyOCnB9lFoNyu+RMnkAvjTWYHrssUtfkD2Hp4=")
cl.loginResult()
ki = LINETCR.LINE()
#ki.login(token="Eyfg6QGFHBFUFC8ZtkZ5.BAbZE0umy2dUOSbpoBpzPq.lXD2kRtVC24dED2mukQS8SWJq69jiPdbdSwYttfrEQg=")
ki.login(token="EzR8HpeZGz9lp8mHvIQf.1nwXHL4o/F+CWggPIrMI7W.AM/dQog+61y4QUGn4xOfyaptJmHO/5aPZ9VqlZt21eI=")
ki.loginResult()
kk = LINETCR.LINE()
#kk.login(token="Ey870DEVeRttxWG0kULc.YCFgbMZPhmkVN+eVj380ta.iJVSxeD+bc4PA1LUDs7vsCeCDLPRUCzX4+9TVC4HQfE=")
kk.login(token="EzIWZklQUjt5fv1z6gl7.UM3NxLlxVADgLmxsusBUDW.vYzoZxvagrRKNQC6pHY0+jUJ8c04tn86ENNXbrl/dR0=")
kk.loginResult()
kc = LINETCR.LINE()
#kc.login(token="EyAO2wLu8PnLKEIxWZQf.jcKRHQZO/ZUdXspu91JKJW.qIV6DwN1HjPrUW6pq0JghQDflEV0c/mlCsPIn3SG/zM=")
kc.login(token="EzTOEEpCcPTZTDadcP85.8OY1iJbE2cs3otqwBUM9rq.bdX5srzo2Gm1K0VxGA42Oz5pBBLYG8SQeaVR3P7f/ss=")
kc.loginResult()
kb = LINETCR.LINE()
#kb.login(token="EyCivT3oBbQGe4azowY2.a3wwcrKoF7GSYZK8iLIneG.P2ZAwWDZRfny1TV8lUIguyLE+N19zD7Fb6KPLEv/H3w=")
kb.login(token="EzaGWroRNObLVVLFGHdb.x6JXUNwTo2ABeWsGFXEkYW.ZFya5XmezciEuejH2KDY5X/t/vD46ErVwIsvknhrwcc=")
kb.loginResult()
kd = LINETCR.LINE()
kd.login(token="EzMt06ZdlknGjwR2jRnf.iXDW3NqeZe2QwsaB0p/2tW.qcz1McuRnvz9pxEOP9g9veBgiVOcHfWPe4CLNmoFloo=")
kd.loginResult()
sw = LINETCR.LINE()
sw.login(token="EzYtGhVbfA0VaETi5E27.ysELs5+0aaJPhSRVRuIgzW.VtJLf2gOwv8HKweiIXO5g2VvsVq3jXhUKWjTBV4ZUgE=")
sw.loginResult()
print u"login success"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage ="""
~🔰👻DRAGON BOTS👻🔰~~~
👻🔰DRAGON BOTS TEAM🔰👻
╔══════════════
╠ 🔰👻✮Ginfo
╠ 🔰👻✮Botcancel
╠ 🔰👻✮Say「Text」
╠ 🔰👻✮ Gn:「Name GC
╠ 🔰👻✮Mymid「mid me」
╠ 🔰👻✮Welcome on
╠ 🔰👻✮Welcome off
╠ 🔰👻✮ Lurkers
| 🔰👻Mybot
╠ 🔰👻✮View
╠ 🔰👻✮Creator
╚═════════════
🔰👻DK BOTS👻🔰
╔═════════════
╠ 🔰👻Ghost absen
╠ 🔰👻Say 「Text」
╠ 🔰👻@.
╠ 🔰👻@,
╠ 🔰👻Sablenk copy 「@」
╠ 🔰👻Sablenk kembali 「@」
╠ 🔰👻Sablenk kick「Mid」
╠ 🔰👻Sablenk invite「Mid」
╠ 🔰👻Ghost setting 「View」
╠ 🔰👻Sablenk bot 「Cek bots」
╠ 🔰👻S cancel「Cancel pending」
╠ 🔰👻S link 「on / off」
╠ 🔰👻S bir 「Cleanse this group」
╠ 🔰👻Clearall 「Cleanse group」
╠ 🔰👻Sb 「BL all member」
╠ 🔰👻Ghost del 「Unban all member」
╠ 🔰👻Ginfo 「View group info」
╠ 🔰👻Gcreator 「Melihat pembuat」
╠ 🔰👻All mid 「Melihat mid bot」
╠ 🔰👻Mymid 「mid sndiri」
╠ 🔰👻Gift 「Gift1,Gift2,Gift3」
╠ 🔰👻Spam「on / off」1\ Text
╠ 🔰👻Creator 「Cek pembuat bot」
╠ 🔰👻Gurl 「View group link」
╠ 🔰👻Mentions 「Tag all member」
╠ 🔰👻All: 「Rename all bot」
╠ 🔰👻Allbio: 「Change all bio bot」
╠ 🔰👻Mid 「@」
╠ 🔰👻Bc: 「Text」
╠ 🔰👻Admin on/off 「@」
╠ 🔰👻 List admin
╠ 🔰👻Spam to 「@」
╠ 🔰👻 Speed
╠ 🔰👻Respon
╠ 🔰👻Lurkers
╠ 🔰👻View
╠ 🔰👻Fuck「@」
╠ 🔰👻Sayang「@」
╠ 🔰👻Mk「@」
╠ 🔰👻Nk 「@」
╠ 🔰👻Ban 「@」
╠ 🔰👻Unban「@」
╠ 🔰👻Kick 「@」
╠ 🔰👻Ban:on「Send contact」
╠ 🔰👻Unban:on「Send Contact」
╠ 🔰👻Banlist
╠ 🔰👻Kick ban
╠ 🔰👻╬═Mimic on/off
╠ 🔰👻╬═Mimic:add 「@」
╠ 🔰👻╬═Mimic:del 「@」
╠ 🔰👻╬═Reject「Cancel undangn]
╠ 🔰👻╬═InviteMeTo:[group id]
╠ 🔰👻╬═Invite [invite mmber]
╠ 🔰👻╬═TD leaveAllGc
╠ 🔰👻╬═Music[jaran goyang]
╠ 🔰👻╬═TD:Bc [Bc taks all]
╚════════════
❧🔰👻DRAGON BOTS👻🔰 ❧
╚════════════
🔰👻DRAGON👻🔰
penting...!!!: SALAM SANTUN
DRAGON BOTS
╚════════════"""
helpMessage1 =""" *** Set Group ***
╔════════════
╠🔐 Auto cancel「on / off」
╠🔐 Contact 「on / off」
╠🔐 Allprotect 「on / off」
╠🔐 Auto like 「on / off」
╠🔐 Auto leave 「on / off」
╠🔐 Backup 「on / off」
╠🔐 Welcome 「on / off」
╚════════════
*** Set Group ***"""
KAC=[cl,ki,kk,kc,kb,kd,sw]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = kb.getProfile().mid
Emid = kd.getProfile().mid
Lmid = sw.getProfile().mid
Bots = [mid,Amid,Bmid,Cmid,Dmid,Emid,Lmid]
admin = ["ufd1fc96a20d7cf0a8e6e8dfc117f32be"]
staff = ["ufd1fc96a20d7cf0a8e6e8dfc117f32be"]
owner = ["ufd1fc96a20d7cf0a8e6e8dfc117f32be"]
adminMID = 'ufd1fc96a20d7cf0a8e6e8dfc117f32be'
wait ={
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True,"members":1},
'leaveRoom':False,
'timeline':False,
'autoAdd':False,
'message':"Thanks for add Created by .Dzulkifli",
"lang":"JP",
"comment":" TEAM DK BOTS•\n\nKilers:\n[☆] Dzulkifli [☆]\n[✯] DK-BOT\nhttps://line.me.ti/p~reza.p.i.p",
"likeOn":True,
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cName":" ",
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"Protectgroupname":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"protectJoin":False,
"Backup":True,
"welcome":False,
"goodbye":False,
"TDinvite":False,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{},
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{},
}
setTime = {}
setTime = wait2['setTime']
blacklistFile='blacklist.txt'
pendinglistFile='pendinglist.txt'
setTime = {}
setTime = wait2["setTime"]
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to,name):
aa = ""
bb = ""
strt = int(12)
akh = int(12)
nm = name
print nm
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x98\xbb @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.from_ = cl.getProfile.mid
msg.text = "[MENTION]\n"+bb
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print msg
try:
cl.sendMessage(msg)
except Exception as error:
print error
def NOTIFIED_KICKOUT_FROM_GROUP(op):
try:
cl.sendText(op.param1, cl.getContact(op.param3).displayName + " Jangan main kick ")
except Exception as e:
print e
print ("\n\nNOTIFIED_KICKOUT_FROM_GROUP\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#------------------------Auto Join-------------------------------
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kd.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
#cl.updateGroup(G)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 13:
if Bmid in op.param3:
G = kk.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kk.cancelGroupInvitation(op.param1, matched_list)
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = Amid.getGroup(op.param1)
G.preventJoinByTicket = False
Amid.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
Amid.updateGroup(G)
Ticket = Amid.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in mid:
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Fmid:
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Imid:
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in Dmid:
X = kb.getGroup(op.param1)
X.preventJoinByTicket = False
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in mid:
X = sw.getGroup(op.param1)
X.preventJoinByTicket = False
sw.updateGroup(X)
Ti = sw.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = ki.getGroup(op.param1)
X.preventJoinByTicket = False
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
if op.param3 in Fmid:
if op.param2 in Gmid:
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
X.preventJoinByTicket = True
Ti = kg.reissueGroupTicket(op.param1)
if op.type == 13:
print op.param1
print op.param2
print op.param3
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
#_____________
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
cl.cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
#_____________________________
#-------------------------------------------------------------------
if op.type == 11:
if not op.param2 in Bots:
try:
gs = ki.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
#--------------------------------------------------------------------------------------------
if op.type == 19:
if not op.param2 in Bots:
try:
gs = cl.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots:
if wait["Backup"] == True:
try:
klist=[ki,kk,kc,kb,kd,cl]
kicker = random.choice(klist)
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots:
if op.param2 not in Bots:
if wait["protect"] == True:
try:
klist=[cl,ki,kk,kc,kb,kd,cl]
kicker=random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kt.acceptGroupInvitationByTicket(op.param1,Ticket)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kb.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kb.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots:
if op.param2 not in Bots:
if wait["protect"] == True:
try:
klist=[cl,ki,kk,kc,kb,kd,cl]
kicker=random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kz.acceptGroupInvitationByTicket(op.param1,Ticket)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kd.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kd.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots:
if op.param2 not in Bots:
if wait["protect"] == True:
try:
klist=[cl,ki,kk,kc,kb,kd,cl]
kicker=random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
sw.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
#--------------------------------------------------------------------------------------------
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = kc.getGroup(op.param1)
G.preventJoinByTicket = False
kc.updateGroup(G)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
kb.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kb.getGroup(op.param1)
X.preventJoinByTicket = False
kb.updateGroup(X)
Ti = kb.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
G = kb.getGroup(op.param1)
G.preventJoinByTicket = True
kb.updateGroup(G)
Ticket = kb.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
kd.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
G = kd.getGroup(op.param1)
G.preventJoinByTicket = True
kd.updateGroup(G)
Ticket = kd.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
ke.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Because there is no kick regulation or group,\n["+op.param1+"]\nof\n["+op.param2+"]\nI could not kick.\nAdd it to the black list.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).KickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki.getGroup(op.param1)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ti = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#____________~_~~~~_~___~~~~~~~~~~~~___~~~~~~
if op.type == 19:
if op.param2 not in Bots:
if op.param3 in admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,admin)
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 not in admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
print "kicker kicked"
else:
pass
if op.type == 19:
if op.param2 not in Bots:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group¡¢\n["+op.param1+"]\n¤Î\n["+op.param2+"]\n¤òõí¤ëʤ¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿¡£\n¥Ö¥é¥Ã¥¯¥ê¥¹¥È¤Ë×·¼Ó¤·¤Þ¤¹¡£")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client¤¬õí¤êÒŽÖÆor¥°¥ë©`¥×¤Ë´æÔÚ¤·¤Ê¤¤žé¡¢\n["+op.param1+"]\n¤Î\n["+op.param2+"]\n¤òõí¤ëʤ¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿¡£\n¥Ö¥é¥Ã¥¯¥ê¥¹¥È¤Ë×·¼Ó¤·¤Þ¤¹¡£")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
G = ki.getGroup(op.param1)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client¤¬õí¤êÒŽÖÆor¥°¥ë©`¥×¤Ë´æÔÚ¤·¤Ê¤¤žé¡¢\n["+op.param1+"]\n¤Î\n["+op.param2+"]\n¤òõí¤ëʤ¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿¡£\n¥Ö¥é¥Ã¥¯¥ê¥¹¥È¤Ë×·¼Ó¤·¤Þ¤¹¡£")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
G = kk.getGroup(op.param1)
G.preventJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client¤¬õí¤êÒŽÖÆor¥°¥ë©`¥×¤Ë´æÔÚ¤·¤Ê¤¤žé¡¢\n["+op.param1+"]\n¤Î\n["+op.param2+"]\n¤òõí¤ëʤ¬¤Ç¤¤Þ¤»¤ó¤Ç¤·¤¿¡£\n¥Ö¥é¥Ã¥¯¥ê¥¹¥È¤Ë×·¼Ó¤·¤Þ¤¹¡£")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = cl.getGroup(op.param1)
X.preventJoinByTicket = False
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kb.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
G = kc.getGroup(op.param1)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#~~~~~~~__~____~~~__________~~___
#----------------Welcome---------------------------------------------
if op.type == 15:
if wait["goodbye"] == True:
cl.sendText(op.param1, "Nah kq leave,,,Baper yee?")
print op.param3 + "has left the group"
if op.type == 11:
if op.param2 not in Bots:
return
cl.sendText(op.param1,cl.getContact(op.param2).displayName + "Please don't play qr")
print "Update group"
if op.type == 17:
if wait["welcome"] == True:
group = cl.getGroup(op.param1)
cb = Message()
cb.to = op.param1
cb.text = cl.getContact(op.param2).displayName + " Welcome to [ " + group.name + " ]\nCreator grup => [ " + group.creator.displayName + " ]"
cl.sendMessage(cb)
#-----------------------------------------------------------------
if op.type == 13:
if mid in op.param3:
klist=[cl,ki,kk,kc,kb,kd]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kicker.rejectGroupInvitation(op.param1)
else:
kicker.acceptGroupInvitation(op.param1)
else:
kicker.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kicker.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kicker.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == admin:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
if op.type == 32:
if op.param2 not in Bots:
if op.param2 not in admin:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["Sablenkinvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invite
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"[×]" + _name + " Sudah di grup ini😉")
break
elif invite in wait["blacklist"]:
cl.sendText(msg.to,"Sorry 😉 " + _name + " Ada di daftar Blacklist")
cl.sendText(msg.to,"Call my gerhana to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Succes : \n➡" + _name)
wait["Sablenkinvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["Sablenkinvite"] = False
except:
cl.sendText(msg.to,"Sorry i can't invite this contact")
wait["Sablenkinvite"] = False
break
if wait["Sablenknvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
ki.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my Gerhana to use command !, \n➡ Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
ki.findAndAddContactsByMid(target)
ki.inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"✍⎯ٴ☬⟿Admin kece☬ Invited: \n➡ " + _name)
wait["akaInvite"] = False
break
except:
cl.sendText(msg.to,"Negative, Err0r Detected")
wait["akaInvite"] = False
break
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"Telah ditambahkan ke daftar hitam")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"Telah ditambahkan di daftar hitam")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"Succes")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"deleted")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
cl.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"🔹Name 》\n" + msg.contentMetadata["displayName"] + "\n🔹Mid 》\n" + msg.contentMetadata["mid"] + "\n🔹Status 》\n" + contact.statusMessage + "\n🔹Picture status 》\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n🔹CoverURL:\n" + str(cu))
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"🔹[NAME]:\n" + msg.contentMetadata["displayName"] + "\n🔹[MID]:\n" + msg.contentMetadata["mid"] + "\n🔹[STATUS]:\n" + contact.statusMessage + "\n🔹[PICTURE STATUS]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n🔹[CoverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help","Sw help","Menu","Key","menu"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage)
else:
cl.sendText(msg.to,helpt)
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Bot only admin & staff 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
elif msg.text in ["Set view"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,helpMessage1)
else:
cl.sendText(msg.to,helpt)
elif ("Gn: " in msg.text):
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Gn: ","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
elif "Dk.kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Sablenk kick ","")
cl.kickoutFromGroup(msg.to,[midd])
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
elif "Kick " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick ","")
ki.kickoutFromGroup(msg.to,[midd])
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
elif msg.text in ["Invite on"]:
if msg.from_ in admin:
wait["Sablenk invite"] = True
cl.sendText(msg.to,"send contact to invite")
elif "Sablenk invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Sablenk invite ","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif "Sablenk invite " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Sablenk invite ","")
ki.findAndAddContactsByMid(midd)
ki.inviteIntoGroup(msg.to,[midd])
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
elif msg.text in ["Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text.lower() == 'invite':
if msg.from_ in admin:
if msg.toType == 2:
wait["akaInvite"] = True
ki.sendText(msg.to,"send contact 👻")
elif "Invite:" in msg.text:
midd = msg.text.replace("Invite:","")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text in ["Kibar"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Lmid}
cl.sendMessage(msg)
cl.sendText(msg.to,
"█░░╦─╦╔╗╦─╔╗╔╗╔╦╗╔╗░░█\n█░░║║║╠─║─║─║║║║║╠─░░█\n█░░╚╩╝╚╝╚╝╚╝╚╝╩─╩╚╝░░█\n"
"ASSALAMUALAIKUM\n"
" ╭━Ⓓ✒Ⓡ✒ⒼⓄ✒Ⓝ✒\n"
" ╰╮┏━┳┳┓┏┳┳┓┏┳┳┳┓\n"
" ┏┻╋━┻┻┫┣┻┻┫┣┻┻┻┫\n"
" ┃HLO▪┃KMI DTANG LGI┃\n"
" ┗ⓞⓞ┻┻ⓞ━━ⓞ┻┻ⓞ━╯\n"
"UNTUK MENGGUSUR\nROOM KALIAN\n"
".. (҂`_´)\n"
" <,︻╦̵̵̿╤━ ҉ ~ •"
"█۞███████]▄▄▄▄▄▄▃●●\n"
"▂▄▅█████████▅▄▃▂…"
"[██████████████████]\n"
"◥⊙⊙▲⊙▲⊙▲⊙▲⊙▲⊙\n"
"╭━╮╭━╮\n"
"┃┃╰╯┃┃\n"
"┃╭╮╭╮┣┳━╮╭━━┳━━┳┳━╮\n"
"┃┃┃┃┃┣┫╭╮┫╭╮┃╭╮┣┫╭╯\n"
"┃┃┃┃┃┃┃┃┃┃╰╯┃╰╯┃┃┃\n"
"╰╯╰╯╰┻┻╯╰┻━╮┣━╮┣┻╯\n"
"╱╱╱╱╱╱╱╱╱╭━╯┣━╯┃\n"
"╱╱╱╱╱╱╱╱╱╰━━┻━━╯\n"
"👿━━━━━━━━━━━━━👿"
"Ⓣⓜⓟⓐ Ⓑⓐⓢⓐ_Ⓑⓐⓢⓘ\n"
"Ⓡⓐⓣⓐ ⓖⓐ ⓡⓐⓣⓐ\n"
"Ⓨⓖ ⓟⓝⓣⓘⓝⓖ ⓚⓘⓑⓐⓡ\n"
"Ⓣⓐⓝⓖⓚⓘⓢ Ⓖⓞⓑⓛⓞⓚ\n"
"👿━━━━━━━━━━━━━👿\n"
"╔══╗╔═╗╔══╗╔═╦═╗\n"
"╚╗╔╝║╦╝║╔╗║║║║║║\n"
"━║║━║╩╗║╠╣║║║║║║\n"
"━╚╝━╚═╝╚╝╚╝╚╩═╩╝\n"
"👿━━━━━━━━━━━━━👿\n"
"╔══╗ ╔╦╗\n"
"╚╗╗║ ║╔╝\n"
"╔╩╝║ ║╚╗\n"
"╚══╝ ╚╩╝\n"
"👿━━━━━━━━━━━━━👿\n"
"Ⓓⓡⓐⓖⓞⓝ_Ⓚⓘⓛⓛⓔⓡ\n"
"Ⓟⓤⓝⓨⓐ👿━━👿Ⓡⓐⓣⓐ Ⓝⓘ\n"
"Ⓜⓐⓗ━👿━\n"
"╔═╗╔══╗╔══╗╔══╗\n"
"║╬║║╔╗║╚╗╔╝║╔╗║\n"
"║╗╣║╠╣║━║║━║╠╣║\n"
"╚╩╝╚╝╚╝━╚╝━╚╝╚╝\n"
"━━━━━━━━━━━━━━━\n"
"╔═╗╔══╗╔══╗╔══╗\n"
"║╬║║╔╗║╚╗╔╝║╔╗║\n"
"║╗╣║╠╣║━║║━║╠╣║\n"
"╚╩╝╚╝╚╝━╚╝━╚╝╚╝\n"
"━━━━━━━━━━━━━━━\n"
"╔═╗╔══╗╔══╗╔══╗\n"
"║╬║║╔╗║╚╗╔╝║╔╗║\n"
"║╗╣║╠╣║━║║━║╠╣║\n"
"╚╩╝╚╝╚╝━╚╝━╚╝╚╝\n"
"━━━━━━━━━━━━━━━\n"
">>>Ⓑⓨⓔ_Ⓑⓨⓔ ⒼⒸ Ⓛⓐⓚⓝⓐⓣ>><\nⒹⓝⓓⓐⓜ Ⓒⓐⓡⓘ Ⓚⓜⓘ\n<<<<<<<<<>>\nhttp://line.me/ti/p/~reza.p.i.p\nhttp://line.me/ti/p/~reza.p.i.p")
elif msg.text.lower() == 'promo':
cl.sendText(msg.to, "──────┅❇͜͡❇͜͡☆͜͡❇͜͡❇┅──────\nᴼᴾᴱᴺ ᴼᴿᴰᴱᴿ\n────────┅┅───────\n➣ꜱᴇʟꜰʙᴏᴛ ᴏɴʟʏ\n➣ꜱᴇʟꜰʙᴏᴛ + ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 2 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 3 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 4 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 5 ᴀꜱɪꜱᴛ\n➣ʙᴏᴛᴘʀᴏᴛᴇᴄᴛ 3-11 ʙᴏᴛ ᴀꜱɪꜱᴛ\n➣ɴᴇᴡ ꜱᴄʀɪᴘᴛ\n➣ʜʀɢᴀ ʙɪꜱᴀ ɴᴇɢᴏ\n─────────┅┅─────────\n ✯❇͜͡❇͜͡C͜͡r͜͡e͜͡a͜͡t͜͡o͜͡r✯͜͡$͜͡ë͜͡I͜͡F͜͡-͜͡฿͜͜͡͡o͜͡t͜͡ ͜͡❇͜͡❇✯\nline.me/ti/p/~reza.p.i.p\nline.me/ti/p/~ryansakra\n➣ѕєʟғвот κɪcκєʀ_+_ᴘʀᴏᴛᴇᴄᴛ\n────────┅❇͜͡❇͜͡☆͜͡❇͜͡❇┅────────")
elif msg.text.lower() == 'harga':
cl.sendText(msg.to, "╭══════════\n║⚫─[ DAFTAR HARGA ]─⚫ \n║SELFBOT ONLY = 75K /BLN\n║2 ASSIST = 100K /BLN\n║5 ASSIST = 200K /BLN\n║10 ASSIST = 300K /BLN\n║\n║PROTECT ANTIJS\n║\n║2 BOT + ANTIJS = 150K /BLN\n║5 BOT + ANTIJS = 300K /BLN\n║10 BOT + ANTIJS = 500K /BLN\n║\n║═ই\═ANDA BERMINAT\n║ SILAHKAN ADD CONTACT \n║ DIBAWAH INI \n║\n║http://line.me/ti/p/~reza.p.i.p\n║ TERIMA KASIH \n║\n╰════════════")
elif msg.text.lower() == 'dkbot':
cl.sendText(msg.to, "[🔰 Ⓓⓚ~ⒷⓄⓣ☯t]")
ki.sendText(msg.to, "╚☆Ⓢⓘⓐⓟ☆╗\n╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
kk.sendText(msg.to, "╚☆Ⓢⓘⓐⓟ☆╗\n╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
kc.sendText(msg.to, "╚☆Ⓢⓘⓐⓟ☆╗\n╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
kb.sendText(msg.to, "╚☆Ⓢⓘⓐⓟ☆╗\n╚Ⓚⓞⓜⓐⓝⓓⓝ╮╗")
kd.sendText(msg.to, "[🔰ⒹⓄⓝⓔ]")
cl.sendText(msg.to, "╚☆Ⓐⓜⓐⓝ-☆╗\n╚ⒷⓄⓈ☆╮╗")
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '2'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift1"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift2"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
cl.sendMessage(msg)
elif msg.text in ["æ„›ã®ãƒ—レゼント","Gift3"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
cl.sendMessage(msg)
cl.sendMessage(msg)
elif msg.text in ["Cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
cl.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
elif msg.text in ["Canc"]:
if msg.toType == 2:
G = kk.getGroup(msg.to)
if G.invitee is not None:
gInviMids = [contact.mid for contact in G.invitee]
kk.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"No one is inviting")
else:
kk.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Qr on","Ghost link on"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👻")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["R1 ourl","R1 link on"]:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
ki.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done👻 ")
else:
cl.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["R2 ourl","R2 link on"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = False
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done ")
else:
kk.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Qr off","Sw link off"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
cl.updateGroup(X)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["R1 curl","R1 link off"]:
if msg.toType == 2:
X = ki.getGroup(msg.to)
X.preventJoinByTicket = True
ki.updateGroup(X)
if wait["lang"] == "JP":
ki.sendText(msg.to,"Done ")
else:
ki.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
ki.sendText(msg.to,"Can not be used outside the group")
else:
ki.sendText(msg.to,"Not for use less than group")
elif msg.text in ["R2 curl","R2 link off"]:
if msg.toType == 2:
X = kk.getGroup(msg.to)
X.preventJoinByTicket = True
kk.updateGroup(X)
if wait["lang"] == "JP":
kk.sendText(msg.to,"Done ")
else:
kk.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
kk.sendText(msg.to,"Can not be used outside the group")
else:
kk.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Ginfo"]:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"➰ NAME GROUP ➰\n" + str(ginfo.name) + "\n\n🔹 Group Id \n" + msg.to + "\n\n🔹Creator \n" + gCreator + "\n\n🔹Status profile \nhttp://dl.profile.line.naver.jp/" + ginfo.pictureStatus + "\n\n~ Anggota :: " + str(len(ginfo.members)) + " Members\n~ Pending :: " + sinvitee + " People\n~ URL :: ")
cl.sendMessage(msg)
# elif "Music" in msg.text.lower():
# songname = msg.text.lower().replace("Music","")
# params = {"songname":" songname"}
# r = requests.get('https://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
# data = r.text
# data = json.loads(data)
# for song in data:
#cl.sendMessage(msg.to,song[4])
# elif "jointicket " in msg.text.lower():
# rplace=msg.text.lower().replace("jointicket ")
# if rplace == "on":
# wait["atjointicket"]=True
# elif rplace == "off":
# wait["atjointicket"]=False
# cl.sendText(msg.to,"Auto Join Group by Ticket is %s" % str(wait["atjointicket"]))
# elif '/ti/g/' in msg.text.lower():
# link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
# links = link_re.findall(msg.text)
# n_links=[]
# for l in links:
# if l not in n_links:
# n_links.append(l)
#for ticket_id in n_links:
#if wait["atjointicket"] == True:
# group=cl.findGroupByTicket(ticket_id)
# cl.acceptGroupInvitationByTicket(group.mid,ticket_id)
# cl.sendText(msg.to,"Sukses join ke grup %s" % str(group.name))
elif "Gc" == msg.text:
if msg.from_ in admin:
try:
group = cl.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
cl.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
cl.sendMessage(M)
cl.sendText(msg.to,"old user")
elif "Name: " in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Name: ","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kb.getProfile()
profile.displayName = string
kb.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kd.getProfile()
profile.displayName = string
kd.updateProfile(profile)
cl.sendText(msg.to,"Update Name all bot succes")
elif "Mybio: " in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
cl.sendText(msg.to,"Update Bio👉 " + string + " 👈Succes")
elif "Allbio: " in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Allbio: ","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kb.getProfile()
profile.statusMessage = string
kb.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kd.getProfile()
profile.statusMessage = string
kd.updateProfile(profile)
elif "Rename: " in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Rename: ","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"[●] Update Name👉 " + string + "👈")
elif "Mymid" == msg.text:
cl.sendText(msg.to, msg.from_)
elif "TL: " in msg.text:
if msg.from_ in admin:
tl_text = msg.text.replace("TL: ","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif ("Cn: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = cl.getProfile()
X = msg.text.replace("Cn: ","")
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to,"Name ~ " + X + " Done")
else:
cl.sendText(msg.to,"Failed")
elif ("2cn: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = ki.getProfile()
X = msg.text.replace("2cn: ","")
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to,"name " + X + " done")
else:
ki.sendText(msg.to,"Failed")
elif ("3cn: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = kk.getProfile()
X = msg.text.replace("3cn: ","")
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to,"name " + X + " done")
else:
kk.sendText(msg.to,"Failed")
elif ("4cn: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = kc.getProfile()
X = msg.text.replace("4cn: ","")
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to,"name " + X + " done")
else:
kc.sendText(msg.to,"Failed")
elif ("5cn: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = kb.getProfile()
X = msg.text.replace("5cn: ","")
profile.displayName = X
kb.updateProfile(profile)
kb.sendText(msg.to,"name " + X + " done")
else:
kb.sendText(msg.to,"Failed")
elif ("6cn: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = kd.getProfile()
X = msg.text.replace("6cn: ","")
profile.displayName = X
kd.updateProfile(profile)
kd.sendText(msg.to,"name " + X + " done")
else:
kd.sendText(msg.to,"Failed")
elif ("7cn: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = sw.getProfile()
X = msg.text.replace("10cn: ","")
profile.displayName = X
sw.updateProfile(profile)
sw.sendText(msg.to,"name " + X + " done")
else:
sw.sendText(msg.to,"Failed")
elif ("Last: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = kd.getProfile()
X = msg.text.replace("Last: ","")
profile.displayName = X
kd.updateProfile(profile)
kd.sendText(msg.to,"name " + X + " done")
else:
kd.sendText(msg.to,"Failed")
elif ("11cn: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
profile = sw.getProfile()
X = msg.text.replace("11cn: ","")
profile.displayName = X
sw.updateProfile(profile)
sw.sendText(msg.to,"Changed ==[ " + X + " ]== Succes")
else:
sw.sendText(msg.to,"Failed")
elif ("Mid " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendText(msg.to,key1)
elif ("Mid: " in msg.text):
if msg.from_ in admin:
mmid = msg.replace("Mid: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#---------------------------------------------------------------------------------------------
elif msg.text in ["Protect on"]:
if msg.from_ in admin:
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Already on")
else:
cl.sendText(msg.to,"Already on")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already on")
elif msg.text in ["Protect qr on"]:
if msg.from_ in admin:
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"QR protect already on")
else:
cl.sendText(msg.to,"Already on")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On")
elif msg.text in ["Protect invite on"]:
if msg.from_ in admin:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect invite already on")
else:
cl.sendText(msg.to,"Already on")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On")
elif msg.text in ["Cancelprotect on"]:
if msg.from_ in admin:
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect cancel already on")
else:
cl.sendText(msg.to,"Already on")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already ON")
else:
cl.sendText(msg.to,"It is already On")
elif msg.text in ["Gnamelock on"]:
if msg.from_ in admin:
if wait["Protectgroupname"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect group name on")
else:
cl.sendText(msg.to,"Protect group name on")
else:
wait["Protectgroupname"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Gnamelock already on")
else:
cl.sendText(msg.to,"Gnamelock already on")
elif msg.text in ["Gnamelock off"]:
if msg.from_ in admin:
if wait["Protectgroupname"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect group name off")
else:
cl.sendText(msg.to,"Protect group name off")
else:
wait["Protectgroupname"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Gnamelock already off")
else:
cl.sendText(msg.to,"Gnamelock already off")
#---------------------------------------------------------------------------------------------
elif msg.text in ["Allprotect on","Sw on","S on"]:
if msg.from_ in admin:
if wait["protectJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect join already on")
else:
cl.sendText(msg.to,"Protect join already ON")
else:
wait["protectJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Kick Join already On")
else:
cl.sendText(msg.to,"done")
if wait["protect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Already on")
else:
cl.sendText(msg.to,"Protection already on")
else:
wait["protect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection already ON")
else:
cl.sendText(msg.to,"Protection already On")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ProtectQR Already on")
else:
cl.sendText(msg.to,"ProtectQR already on")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"ProtectQR Already On")
else:
cl.sendText(msg.to,"ProtectQR already On")
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ProtectInvite Already On")
else:
cl.sendText(msg.to,"ProtectInvite Already ON")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"ProtectInvite already ON")
else:
cl.sendText(msg.to,"ProtectInvite already On")
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"CancelProtect Already On")
else:
cl.sendText(msg.to,"CancelProtect already On")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"CancelProtect already ON")
else:
cl.sendText(msg.to,"CancelProtect already On")
#-----------------------------------------------------------------------------------------
elif msg.text in ["Allprotect off","Sw off","S off"]:
if msg.from_ in admin:
if wait["protectJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protect join already Off")
else:
cl.sendText(msg.to,"Protect join already Off")
else:
wait["protectJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"kick Join already Off")
else:
cl.sendText(msg.to,"done")
if wait["protect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection Already off")
else:
cl.sendText(msg.to,"Protection Already off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection already Off")
else:
cl.sendText(msg.to,"Protection already off")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ProtectQR Already off")
else:
cl.sendText(msg.to,"ProtectQR Already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ProtectQR already off")
else:
cl.sendText(msg.to,"ProtectQR already Off")
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"ProtectInvite Already off")
else:
cl.sendText(msg.to,"ProtectInvite Already off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"ProtectInvite already off")
else:
cl.sendText(msg.to,"ProtectInvite already off")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"CancelProtect Already off")
else:
cl.sendText(msg.to,"CancelProtect Already off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"CancelProtect already off")
else:
cl.sendText(msg.to,"CancelProtect already off")
#----------------------------------------------------------------------------------------------
elif msg.text in ["連絡先:オン","K on","Contact on","顯示:開"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["連絡先:オフ","K off","Contact off","顯示:關"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif msg.text in ["Join on","Auto join:on"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Join off","Auto join:off"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
elif ("Auto cancel:" in msg.text):
if msg.from_ in admin:
try:
strnum = msg.text.replace("Auto cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["強制自動退出:オン","Leave on","Auto leave:on","強制自動退出:開"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["強制自動退出:オフ","Leave off","Auto leave:off","強制自動退出:關"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
elif msg.text in ["共有:オン","Share:on","Share on"]:
if msg.from_ in admin:
if wait["timeline"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["共有:オフ","Share:off","Share off"]:
if msg.from_ in admin:
if wait["timeline"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Dkbot setting","Set","Set view","Setting"]:
if msg.from_ in admin:
md = " ✮🔰「 DKBOT SETING 」🔰✮\n\n╔══════════════\n"
if wait["contact"] == True: md+="🔹 Contact → on\n"
else: md+="🔹 Contact → off\n"
if wait["autoJoin"] == True: md+="🔹 Auto join → on\n"
else: md +="🔹 Auto join → off\n"
if wait["autoCancel"]["on"] == True: md+="🔹 Auto cancel → "+ str(wait["autoCancel"]["members"]) + "\n"
else: md+= "🔹 Auto cancel → off\n"
if wait["likeOn"] == True: md+="🔹 Auto Like → on\n"
else: md+="🔹 Auto Like → off\n"
if wait["leaveRoom"] == True: md+="🔹 Auto leave → on\n"
else: md+="🔹 Auto leave → off\n"
if wait["Backup"] == True: md+="🔹 Auto backup → on\n"
else:md+="🔹 Auto backup → off\n"
if wait["timeline"] == True: md+="🔹 Share → on\n"
else: md+="🔹 Share → off\n"
if wait["autoAdd"] == True: md+="🔹 Auto add → on\n"
else: md+="🔹 Auto add → off\n"
if wait["commentOn"] == True: md+="🔹 Comment → on\n"
else: md+="🔹 Comment → off\n"
if wait["protect"] == True: md+="🔐 Protect → on\n"
else:md+="🔐 Protect → off\n"
if wait["linkprotect"] == True: md+="🔐 QRProtect → on\n"
else:md+="🔐 QRprotect → off\n"
if wait["inviteprotect"] == True: md+="🔐 Protect invite → on\n"
else:md+="🔐 Protect invite → off \n"
if wait["Protectgroupname"] == True: md+="🔐 Gnamelock → on\n"
else:md+="🔐 Gnamelock → off \n"
if wait["cancelprotect"] == True: md+="🔐 Protect cancel → on\n"
else:md+="🔐 Protect cancel → off\n"
if wait["protectJoin"] == True: md+="🔐 Protectjoin → on\n"
else:md+="🔐 Protect join → off\n"
cl.sendText(msg.to,md + "╚═════════════\n\n 🔐 🔰DK BOTS🔰「👻」")
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
elif msg.text in ["Group id","List group"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[🔹] %s \n" % (cl.getGroup(i).name + " :::: " + str(len (cl.getGroup(i).members)))
cl.sendText(msg.to, "==== [GROUPS] ====\n\n"+ h +"\n[●] TOTAL GROUPS : " +str(len(gid)))
elif msg.text in ["Reject"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All invitations have been refused")
else:
cl.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif msg.text in ["Cancelall1"]:
if msg.from_ in admin:
gid = ki.getGroupIdsInvited()
for i in gid:
ki.rejectGroupInvitation(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"All invitations have been refused")
else:
ki.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif msg.text in ["Cancelall2"]:
if msg.from_ in admin:
gid = kk.getGroupIdsInvited()
for i in gid:
kk.rejectGroupInvitation(i)
if wait["lang"] == "JP":
kk.sendText(msg.to,"All invitations have been refused")
else:
kk.sendText(msg.to,"æ‹’ç»äº†å…¨éƒ¨çš„邀请。")
elif msg.text in ["Backup on","Auto backup on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already On")
else:
cl.sendText(msg.to,"Backup already On")
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup already On")
else:
cl.sendText(msg.to,"already on")
elif msg.text in ["Backup off","Auto backup off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
else:
cl.sendText(msg.to,"Backup already Off")
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Backup already Off")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Auto like on"]:
if msg.from_ in admin:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
elif msg.text in ["Auto like off"]:
if msg.from_ in admin:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オン","Add on","Auto add:on","è‡ªå‹•è¿½åŠ ï¼šé–‹"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["è‡ªå‹•è¿½åŠ :オフ","Add off","Auto add:off","è‡ªå‹•è¿½åŠ ï¼šé—œ"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif "Cek msg" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,"Your message ⤵\n\n" + str(wait["message"]))
elif "Message set: " in msg.text:
if msg.from_ in admin:
m = msg.text.replace("Message set: ","")
if m in [""," ","\n",None]:
cl.sendText(msg.to,"Error")
else:
wait["message"] = m
cl.sendText(msg.to,"Changed ⤵\n\n" + m)
elif "Comment set: " in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Comment set: ","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"Error")
else:
wait["comment"] = c
cl.sendText(msg.to,"Changed ⤵\n\n" + c)
elif msg.text in ["Comment on","Comment:on","自動首é 留言:開"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å¼€ã€‚")
elif msg.text in ["コメント:オフ","Comment:off","Comment off","自動首é 留言:關"]:
if msg.from_ in admin:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"è¦äº†å…³æ–。")
elif msg.text in ["Welcome on"]:
if wait["welcome"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Welcome already off")
else:
wait["welcome"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Welcome already on")
if wait["goodbye"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Message goodbye already on")
else:
wait["goodbye"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Message goodbye already on")
elif msg.text in ["Welcome off"]:
if wait["welcome"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Welcome already off")
else:
wait["welcome"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Welcome already off")
if wait["goodbye"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Message goodbye off")
else:
wait["goodbye"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Message goodbye off")
elif msg.text in ["Cek comment"]:
if msg.from_ in admin:
cl.sendText(msg.to,"Your comment ⤵\n\n" + str(wait["comment"]))
elif msg.text in ["Bot creator","Creator"]:
msg.contentType = 13
msg.contentMetadata = {'mid': 'uf50d888821632d32461e37153ac775c0'}
cl.sendMessage(msg)
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["1gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
ki.updateGroup(x)
gurl = ki.reissueGroupTicket(msg.to)
ki.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["2gurl"]:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
kk.updateGroup(x)
gurl = kk.reissueGroupTicket(msg.to)
kk.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can't be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
elif msg.text in ["Comment bl "]:
if msg.from_ in admin:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "[]" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Check"]:
if msg.from_ in admin:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"Nothing a blacklist")
else:
cl.sendText(msg.to,"Blacklist user")
kontak = cl.getContact(commentBlack)
num=1
msgs="Blacklist user\n"
for ids in kontak:
msgs+="\n%si. %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n\n[●] Total %i blacklist user(s)" % len(kontak)
cl.sendText(msg.to, msgs)
elif msg.text in ["Jam on"]:
if msg.from_ in admin:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Jam off"]:
if msg.from_ in admin:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif msg.text in ["Change clock "]:
if msg.from_ in admin:
n = msg.text.replace("Change clock ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"changed to\n\n" + n)
elif msg.text in ["Up"]:
if msg.from_ in admin:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,".")
else:
cl.sendText(msg.to,"Please turn on the name clock.")
#-----------------------------------------------------------------------
elif 'youtube ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
cl.sendText(msg.to,'https://www.youtube.com' + results['href'])
except:
cl.sendText(msg.to,"Could not find it")
# elif "Remove chat" in msg.text:
#if msg.from_ in admin:
#try:
# cl.removeAllMessages(op.param2)
# ki.removeAllMessages(op.param2)
#kc.removeAllMessages(op.param2)
# kb.removeAllMessages(op.param2)
#kd.removeAllMessages(op.param2)
#ke.removeAllMessages(op.param2)
#kg.removeAllMessages(op.param2)
#h.removeAllMessages(op.param2)
#print "Success Remove Chat"
# except:
# try:
# cl.sendText(msg.to,"Chat telah dihapus")
# pass
#-----------------------------------------------------------------------
elif msg.text in ["Lurkers"]:
cl.sendText(msg.to, "Waiting in lurkers Har Har")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime("%Y-%m-%d %H:%M")
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text in ["View"]:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔════════════\n%s\n\n╠═══════════\n\n%s\n╠═════════════\n║Reading point creation:\n║ [%s]\n╚══════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "Ketik「Lurkers」dulu pekok ahhh Har Har")
#-------------------------------------------------
elif "Spam @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Spam to @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to,"Wating in progres...")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
ke.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
ke.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
ki.sendText(g.mid,"Your Account Has Been Spammed !")
kk.sendText(g.mid,"Your Account Has Been Spammed !")
kc.sendText(g.mid,"Your Account Has Been Spammed !")
kb.sendText(g.mid,"Your Account Has Been Spammed !")
kd.sendText(g.mid,"Your Account Has Been Spammed !")
cl.sendText(msg.to, "Succes")
print " Spammed !"
#--------------------------------------------------------------------------
elif "Admin @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"Admin Ditambahkan di perangkat Bot")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command tidak bisa")
cl.sendText(msg.to,"Bot ready in admin only")
elif "Admin @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin off @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Admin Dihapus dari perangkat Bot")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command tidak bisa")
cl.sendText(msg.to,"Bot ready in admin only")
elif msg.text in ["Admin list","List admin"]:
if admin == []:
cl.sendText(msg.to,"The admin is empty")
else:
cl.sendText(msg.to,"This is admin bot")
mc = ""
for mi_d in admin:
mc += "[●] " + cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#~~~~~~___________________________________________________________
#------------------------------------------------------------------------------
elif msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
cl.sendMessage(msg)
elif "Mimic:" in msg.text:
if msg.from_ in admin:
cmd = msg.text.replace("Mimic:","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Mimic on")
else:
cl.sendText(msg.to,"Mimic already on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Mimic off")
else:
cl.sendText(msg.to,"Mimic already off")
elif "add " in cmd:
target0 = msg.text.replace("Mimic:add ","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
else:
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Success added target")
mid.sendMessageWithMention(msg.to,target)
break
except:
mid.sendText(msg.to,)
break
elif "del " in cmd:
target0 = msg.text.replace("Mimic:del ","")
target1 = target0.lstrip()
target2 = target1.replace("@","")
target3 = target2.rstrip()
_name = target3
gInfo = cl.getGroup(msg.to)
targets = []
for a in gInfo.members:
if _name == a.displayName:
targets.append(a.mid)
if targets == []:
cl.sendText(msg.to,"No targets")
else:
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Success deleted target")
mid.sendMessageWithMention(msg.to,target)
break
except:
mid.sendText(msg.to,)
break
elif "target" in cmd:
if mimic["target"] == {}:
ki.sendText(msg.to,"No target")
else:
lst = "List Target"
total = len(mimic["target"])
for a in mimic["target"]:
if mimic["target"][a] == True:
stat = "On"
else:
stat = "Off"
lst += "\n->" + me.getContact(a).displayName + " | " + start
ki.sendText(msg.to,lst + "\nTotal:" + total)
#----------------------------------------------------------------------------
elif "Staff @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff add executing"
_name = msg.text.replace("Staff on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.append(target)
cl.sendText(msg.to,"Staff Ditambahkan diperangkat bot")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin & staff permission required.")
elif "Staff off @" in msg.text:
if msg.from_ in admin:
print "[Command]Staff remove executing"
_name = msg.text.replace("Staff off @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
staff.remove(target)
cl.sendText(msg.to,"Staff Dihapus dari perangkat bot")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"Admin & staff permission required.")
elif msg.text in ["Staff list"]:
if staff == []:
cl.sendText(msg.to,"The stafflist is empty")
else:
cl.sendText(msg.to,"Staff in bot")
mc = ""
for mi_d in staff:
mc += "[●]" + cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#-------------------------------------------------------------------------------
elif "Dor " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Dor ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
sw.kickoutFromGroup(msg.to,[target])
sw.leaveGroup(msg.to)
cl.updateGroup(G)
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Succes ")
kk.sendText(msg.to,"Bye")
elif "Nk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Nk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
ki.kickoutFromGroup(msg.to,[target])
ki.leaveGroup(msg.to)
cl.updateGroup(G)
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Succes ")
kk.sendText(msg.to,"Bye")
#-----------------------------------------------
elif msg.text in ["."]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kb.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kd.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "kicker ok"
elif msg.text in ["Join"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kb.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kd.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = ki.getGroup(msg.to)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(msg.to)
elif msg.text in ["Sw 2"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kj.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kf.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kl.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kn.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
ko.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kp.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
kq.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
G = ko.getGroup(msg.to)
G.preventJoinByTicket = True
ko.updateGroup(G)
Ticket = ko.reissueGroupTicket(msg.to)
elif msg.text in ["Kuy3"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ti)
G = kc.getGroup(msg.to)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(msg.to)
elif msg.text in ["Kuy4"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kb.acceptGroupInvitationByTicket(msg.to,Ti)
G = kb.getGroup(msg.to)
G.preventJoinByTicket = True
kb.updateGroup(G)
Ticket = kb.reissueGroupTicket(msg.to)
elif msg.text in ["Kuy5"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kd.acceptGroupInvitationByTicket(msg.to,Ti)
G = kd.getGroup(msg.to)
G.preventJoinByTicket = True
kd.updateGroup(G)
Ticket = kd.reissueGroupTicket(msg.to)
elif msg.text in ["Kuy6"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
ke.acceptGroupInvitationByTicket(msg.to,Ti)
G = ke.getGroup(msg.to)
G.preventJoinByTicket = True
ke.updateGroup(G)
Ticket = ke.reissueGroupTicket(msg.to)
elif msg.text in ["Kuy7"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kg.acceptGroupInvitationByTicket(msg.to,Ti)
G = kg.getGroup(msg.to)
G.preventJoinByTicket = True
kg.updateGroup(G)
Ticket = kg.reissueGroupTicket(msg.to)
elif msg.text in ["Kuy8"]:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
kh.acceptGroupInvitationByTicket(msg.to,Ti)
G = kh.getGroup(msg.to)
G.preventJoinByTicket = True
kh.updateGroup(G)
Ticket = kh.reissueGroupTicket(msg.to)
elif msg.text in ["Ghost"]:
if msg.from_ in admin:
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
cl.updateGroup(X)
invsend = 0
Ti = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ti)
G = sw.getGroup(msg.to)
G.preventJoinByTicket = True
sw.updateGroup(G)
Ticket = sw.reissueGroupTicket(msg.to)
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
#-----------------------------------------------
elif msg.text in ["Mentions","Tag","Tagall"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
k = len(group.members)//20
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*20 : (j+1)*20]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
else:
cl.sendText(msg.to,noticeMessage)
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😊\nKetik 「Creator」for contact admin")
#-----------------------------------------------
elif msg.text in ["Minggat"]:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = kb.getGroupIdsJoined()
gid = kd.getGroupIdsJoined()
gid = kg.getGroupIdsJoined()
gid = kh.getGroupIdsJoined()
gid = kl.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
kb.leaveGroup(i)
kd.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot sudah keluar dari semua grup")
else:
cl.sendText(msg.to,"He declined all invitations")
elif msg.text in ["dk.pulang"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.sendText(msg.to,"Bye bye " + str(ginfo.name) + "")
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
kb.leaveGroup(msg.to)
kd.leaveGroup(msg.to)
#kl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["dk.out"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
#ki.sendText(msg.to,"Bye Bye " + str(ginfo.name) + "")
ki.leaveGroup(msg.to)
#kk.sendText(msg.to,"Bye Bye " + str(ginfo.name) + "")
kk.leaveGroup(msg.to)
#kc.sendText(msg.to,"Bye Bye😘 " + str(ginfo.name) + "")
kc.leaveGroup(msg.to)
#kb.sendText(msg.to,"Bye Bye😘 " + str(ginfo.name) + "")
kb.leaveGroup(msg.to)
#kd.sendText(msg.to,"Bye Bye😘 " + str(ginfo.name) + "")
kd.leaveGroup(msg.to)
#ke.sendText(msg.to,"Bye Bye😘 " + str(ginfo.name) +
except:
pass
elif msg.text in ["Bye"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Bye"]:
if msg.toType == 2:
ginfo = cl.getGroup(msg.to)
try:
sw.leaveGroup(msg.to)
except:
pass
#------------------------[Copy]-------------------------
elif msg.text in ["S kembali"]:
if msg.from_ in admin:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to,"Backup done")
except Exception as e:
cl.sendText(msg.to, str (e))
elif "kedapkedip " in msg.text.lower():
txt = msg.text.replace("kedapkedip ", "")
cl.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Sablenk:Bc " in msg.text:
bctxt = msg.text.replace("Sablenk:Bc ", "")
a = cl.getAllContactIds()
for manusia in a:
cl.sendText(manusia, (bctxt))
elif "Sablenk:Bc " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Sablenk:Bc ", "")
b = ki.getAllContactIds()
for manusia in b:
ki.sendText(manusia, (bctxt))
c = kk.getAllContactIds()
for manusia in c:
kk.sendText(manusia, (bctxt))
d = kc.getAllContactIds()
for manusia in d:
kc.sendText(manusia, (bctxt))
e = kb.getAllContactIds()
for manusia in e:
kb.sendText(manusia, (bctxt))
f = kd.getAllContactIds()
for manusia in f:
kd.sendText(manusia, (bctxt))
g = ke.getAllContactIds()
for manusia in g:
ke.sendText(manusia, (bctxt))
h = kg.getAllContactIds()
for manusia in h:
kg.sendText(manusia, (bctxt))
i = kh.getAllContactIds()
for manusia in i:
kh.sendText(manusia, (bctxt))
j = sw.getAllContactIds()
for manusia in j:
sw.sendText(manusia, (bctxt))
#_______________
elif "InviteMeTo: " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("InviteMeTo: ","")
if gid == "":
cl.sendText(msg.to,"Invalid group id")
else:
try:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(gid,[msg.from_])
except:
cl.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
elif msg.text in ["Minggatall gc"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
gid = ki.getGroupIdsJoined()
gid = kk.getGroupIdsJoined()
gid = kc.getGroupIdsJoined()
gid = kb.getGroupIdsJoined()
gid = kd.getGroupIdsJoined()
gid = sw.getGroupIdsJoined()
for i in gid:
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
kb.leaveGroup(i)
kd.leaveGroup(i)
sw.leaveGroup(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"Bot Sudah Keluar Di semua grup")
else:
cl.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Sw1 kembali"]:
if msg.from_ in admin:
try:
kc.updateDisplayPicture(backup.pictureStatus)
kc.updateProfile(backup)
kc.sendText(msg.to,"Backup done")
except Exception as e:
kc.sendText(msg.to, str (e))
elif "S copy @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Copy]"
_name = msg.text.replace("S copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Succes copy")
except Exception as e:
print e
elif "S clone @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[Copy]"
_name = msg.text.replace("S clone @","")
_nametarget = _name.rstrip(' ')
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kc.sendText(msg.to, "Not Found")
else:
for target in targets:
try:
kc.CloneContactProfile(target)
kc.sendText(msg.to, "Succes clone")
except Exception as e:
print e
#=====TRANSLATE===========
elif "/translate-en " in msg.text:
txt = msg.text.replace("/translate-en ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except:
cl.sendText(msg.to,'Error.')
elif "/translate-id " in msg.text:
txt = msg.text.replace("/translate-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except:
cl.sendText(msg.to,'Error.')
#-----------------------------------------------
elif "Glist" in msg.text:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "=> %s \n" % (cl.getGroup(i).name + " | Members : [ " + str(len (cl.getGroup(i).members))+" ]")
cl.sendText(msg.to, "#[List Grup]# \n"+ h +"Total Group : " +"[ "+str(len(gid))+" ]")
elif msg.text.lower() == 'group id':
gid = cl.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (cl.getGroup(i).name,i)
cl.sendText(msg.to,h)
elif msg.text in ["Fuck"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
ki.sendText(msg.to,"Bye")
return
for jj in matched_list:
try:
random.choice(KAC).kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif "Kickall" in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Clearall","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("all","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Tidak Ada Member")
pass
else:
for target in targets:
if not target in Bots and admin:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Salam kenal hehehe...")
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
elif "Gass" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("S bir","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
gs = kb.getGroup(msg.to)
gs = kd.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Not found.")
kk.sendText(msg.to,"Not found.")
else:
for target in targets:
if not target in Bots and admin:
try:
klist=[cl,ki,kk,kc,kb,kd]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg,to,"Group cleanse")
cl.sendText(msg,to,"Group cleanse")
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
#-------------------------------------------------------------------
# elif "kibar" in msg.text:
# if msg._from in admin:
# if msg.toType == 2:
# print "ok"
# _name = msg.text.replace("Kibar","")
# gs = ki.getGroup(msg.to)
# gs = kk.getGroup(msg.to)
#gs = kc.getGroup(msg.to)
# gs = kb.getGroup(msg.to)
# gs = kd.getGroup(msg.to)
# gs = ke.getGroup(msg.to)
# gs = kg.getGroup(msg.to)
# gs = kh.getGroup(msg.to)
# targets = []
# for g in gs.members:
# if _name in g.displayName:
# targets.append(g.mid)
# if targets == []:
# kk.sendText(msg.to,"Not found.")
# cl.sendContact(to, mid)
# cl.sendContact(to, Amid)
# cl.sendContact(to, Bmid)
# cl.sendContact(to, Cmid)
# cl.sendContact(to, Dmid)
#cl.sendContact(to, Emid)
# cl.sendMessage(msg.to, "ASSALAMUALAIKUM \nROOM KALIAN \nDAFTAR ..PENGGUSURAN \nDALAM TARGET KAMI \n\nNO COMEND \nNO BAPER \nNO BACOT \nNO DESAH \n\n\nWAR!!! WER!!! WOR!!!\nKENAPE LU PADA DIEM\nTANGKIS >NYET TANGKIS\n\n\nDASAR ROOM PEA KAGAK BERGUNA\nHAHAHAHHAHAHAHAHAHAHA\nGC LU MAU GUA SITA...!!!\n\n\n[SK]SOAK KILLER\n\nHADIR DI ROOM ANDA\n\nRATA GAK RATA YANG PENTING KIBAR \n\n\n>>>>>>BYE BYE <<<<<<\n\n\nDENDAM CARI KAMI\n\n<<<<<<<<<<>>>>>>>>>>\n\nhttp://line.me/ti/p/afr1z4l\nhttp://line.me/ti/p/~noto_ati2122")
# else:
# for target in targets:
# if not target in Bots and admin:
# try:
# klist=[cl,ki,kk,kc,kb,kd,ke,kg,kh]
# kicker=random.choice(klist)
# kicker.kickoutFromGroup(msg.to,[target])
# print (msg.to,[g.mid])
# except:
# cl.sendText(msg,to,"Group cleanse")
#.........................#..............##
elif "Mk " in msg.text:
if msg.from_ in admin:
nk0 = msg.text.replace("Mk ","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("@","")
nk3 = nk2.rstrip()
_name = nk3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
if not target in Bots and admin:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
ki.sendText(msg.to,"Succes ")
kk.sendText(msg.to,"Bye")
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
#-----------------------------------------------------------------------
elif "BL @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("BL @","")
_kicktarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _kicktarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found")
else:
for target in targets:
if not target in Bots and admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes ")
except:
cl.sendText(msg.to,"error")
elif "Sd" in msg.text:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"Blacklist user succes dibersihkan")
elif "Sb" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ban0 = msg.text.replace("Sb","")
ban1 = ban0.lstrip()
ban2 = ban1.replace("@","")
ban3 = ban2.rstrip()
_name = ban3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
cl.sendText(msg.to,"user does not exist")
pass
else:
for target in targets:
if not target in Bots and admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
cl.sendText(msg.to,"ヽ( ^ω^)ノ Success")
elif "Spam" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Spam ","")
_nametarget = _name.rstrip(' ')
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Melebihi Batas!!! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Melebihi Batas!!! ")
elif "Anju" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "Fuck"
_name = msg.text.replace("Anjuu","")
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
if not target in Bots and admin:
try:
cl.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
pass
elif "Ban @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
ban0 = msg.text.replace("Ban @","")
ban1 = ban0.lstrip()
ban2 = ban1.replace("@","")
ban3 = ban2.rstrip()
_name = ban3
gs = cl.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
cl.sendText(msg.to,"This contact can't is a blacklist")
pass
else:
for target in targets:
if not target in Bots and admin:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Done blacklist")
except:
cl.sendText(msg.to,"Done blacklist")
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😊\nKetik 「Creator」for contact admin")
#---------------------------------------------------------------------------------
elif "Gass" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Js","")
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"「 Sory ���\nSory is STARTING♪\n abort to abort♪")
cl.sendText(msg.to,"「 Js 」\nAll victims shall yell hul·la·ba·loo♪\nhələbəˈlo͞o hələbəˌlo͞o")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not Found")
else:
for target in targets:
if target not in Bots and admin:
try:
klist=[cl,ki,kk,kc,kb,kd]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to," done")
#----------------------------------------------------------------------------------
elif "Kick " in msg.text:
if msg.from_ in admin:
if msg.contentMetadata is not None:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.kickoutFromGroup(msg.to,[target])
else:
pass
elif "Sayang" in msg.text:
if msg.from_ in admin:
if msg.contentMetadata is not None:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
ki.kickoutFromGroup(msg.to,[target])
except:
ki.kickoutFromGroup(msg.to,[target])
else:
pass
#-----------------------------------------------
elif "Say " in msg.text:
if msg.from_ in admin:
string = msg.text.replace("Say ","")
if len(string.decode('utf-8')) <= 50:
ki.sendText(msg.to," " + string + " ")
kk.sendText(msg.to," " + string + " ")
kc.sendText(msg.to," " + string + " ")
kb.sendText(msg.to," " + string + " ")
kd.sendText(msg.to," " + string + " ")
ke.sendText(msg.to," " + string + " ")
kg.sendText(msg.to," " + string + " ")
kh.sendText(msg.to," " + string + " ")
kl.sendText(msg.to," " + string + " ")
elif "Bc: " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Bc: ","")
A = cl.getProfile()
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia, (bctxt) + "\n\nBroadcast by : " + (A.displayName))
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😆\nKetik 「Creator」 for contact admin")
print "COMMENT DENIED"
elif "Absen" in msg.text:
if msg.from_ in admin:
s = cl.getProfile()
s1 = ki.getProfile()
s2 = kk.getProfile()
s3 = kc.getProfile()
s4 = kb.getProfile()
s5 = kd.getProfile()
cl.sendText(msg.to, s.displayName + " ready Har Har")
ki.sendText(msg.to, s1.displayName + " ready Har Har")
kk.sendText(msg.to, s2.displayName + " ready Har Har")
kc.sendText(msg.to, s3.displayName + " ready Har Har")
kb.sendText(msg.to, s4.displayName + " ready Har Har")
kd.sendText(msg.to, s5.displayName + " ready Har Har")
#-----------------------------------------------
elif "Pict @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Pict @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#-----------------------------------------------
elif msg.text in ["Respon"]:
if msg.from_ in admin:
cl.sendText(msg.to,"Hadirr")
ki.sendText(msg.to,"Hadiirrr")
kk.sendText(msg.to,"Hadirr")
kc.sendText(msg.to,"Hadirr")
kb.sendText(msg.to,"Hadiirrr")
kd.sendText(msg.to,"Hadirr")
ke.sendText(msg.to,"Hadirr")
cl.sendText(msg.to,"Hadirr Semua boooss..siap nikung Ampe minta ampun si bojo,,")
#-----------------------------------------------
elif "Sp" in msg.text:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Progress ...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds double thumbs up" % (elapsed_time))
ki.sendText(msg.to, "%sseconds double thumbs up�" % (elapsed_time))
kk.sendText(msg.to, "%sseconds double thumbs up�" % (elapsed_time))
kc.sendText(msg.to, "%sseconds double thumbs up�" % (elapsed_time))
kb.sendText(msg.to, "%sseconds double thumbs up�" % (elapsed_time))
kd.sendText(msg.to, "%sseconds double thumbs up�" % (elapsed_time))
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😊\nKetik 「Creator」for contact admin")
print "COMMEND DENIED"
elif "speed" in msg.text:
if msg.from_ in admin:
start = time.time()
cl.sendText(msg.to, "Progress ...")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds double thumbs up" % (elapsed_time))
ki.sendText(msg.to, "%sseconds double thumbs up" % (elapsed_time))
kk.sendText(msg.to, "%sseconds double thumbs up" % (elapsed_time))
kc.sendText(msg.to, "%sseconds double thumbs up" % (elapsed_time))
kb.sendText(msg.to, "%sseconds double thumbs up" % (elapsed_time))
else:
msg.contentType = 13
msg.contentMetadata = {"mid": msg.from_}
cl.sendMessage(msg)
cl.sendText(msg.to, "Acces denied for you 😊\nKetik 「Creator」for contact admin")
print "COMMEND DENIED"
elif "Cbc: " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Cbc: ", "")
contact = cl.getAllContactIds()
for cbc in contact:
cl.sendText(cbc,(bctxt))
#------------------------------------------------------------------
elif msg.text in ["Ban:on"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to blacklist")
elif msg.text in ["Daftarbl"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to unblacklist")
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"Nothing a blacklist user")
else:
cl.sendText(msg.to,"Blacklist user")
mc = "[●]「Blacklist User」[●]\n\n"
for mi_d in wait["blacklist"]:
mc += "~ " +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Blc"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = ""
for mm in matched_list:
cocoa += mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif msg.text in ["Kick ban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
group = ki.getGroup(msg.to)
group = kk.getGroup(msg.to)
group = kc.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
cl.sendText(msg.to,"There was no blacklist user")
ki.sendText(msg.to,"There was no blacklist user")
kk.sendText(msg.to,"There was no blacklist user")
kc.sendText(msg.to,"There was no blacklist user")
return
for jj in matched_list:
cl.kickoutFromGroup(msg.to,[jj])
ki.kickoutFromGroup(msg.to,[jj])
kk.kickoutFromGroup(msg.to,[jj])
kc.kickoutFromGroup(msg.to,[jj])
cl.sendText(msg.to,"Blacklist user")
ki.sendText(msg.to,"Blacklist user")
kk.sendText(msg.to,"Blacklist user")
elif msg.text in [".."]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
cl.cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"")
#-----------------------------------------------------------------
if op.param3 == "1":
if op.param1 in Protectgroupmame:
group = cl.getGroup(op.param1)
try:
group.name = wait["Protectgrouname"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
if op.type == 17:
if wait["protectJoin"] == True:
if op.param2 not in Bots:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
cl.updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).updateGroup(G)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
elif op.param2 not in admin + Bots:
random.choice(KAC).sendText(op.param1,"Welcome. Don't Play Bots. I can kick you!")
else:
pass
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['Gnamelock']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kb.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
pass
G.name = wait['Gnamelock'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
ke.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
kd.updateGroup(G)
except:
pass
if op.param2 in Bots:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
except:
try:
#kb.kickoutFromGroup(op.param1,[op.param2])
#except:
#try:
kd.kickoutFromGroup(op.param1,[op.param2])
except:
pass
cl.sendText(op.param1,"Gnamelock")
ki.sendText(op.param1,"Haddeuh dikunci Pe'a")
kk.sendText(op.param1,"Wekawekaweka Har Har")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 17:
if op.param2 not in Bots:
joinblacklist = op.param2.replace(".",',')
joinblacklistX = joinblacklist.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, joinblacklistX)
if matched_list == []:
pass
else:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in wait["blacklist"]:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#--------------------------------------------------------------------------------
if op.type == 19:
if op.param3 in admin and op.param2 in Bots:
random.choice(KAC).inviteIntoGroup(op.param3)
random.choice(KAC).findAndAddContactsByMid(op.param3)
if op.type == 19:
if op.param3 in admin:
cl.inviteIntoGroup(op.param1,admin)
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
else:
cl.sendText(op.param1,"Please contact admin for invite member")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = True
Ticket = cl.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
cl.updateGroup(G)
sw.leaveGroup(op.param1)
else:
cl.sendText(op.param1,"")
else:
cl.sendText(op.param1,"")
#-----------------------------------------------------------------------------
if op.type == 46:
if op.param2 in Bots:
cl.removeAllMessages()
ki.removeAllMessages()
kk.removeAllMessages()
kc.removeAllMessages()
kb.removeAllMessages()
kd.removeAllMessages()
# kj.removeAllMessages()
#------------------------------------------------------------------------------
if op.type == 55:
print "NOTIFIED_READ_MESSAGE"
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n• " + Name
wait2['ROM'][op.param1][op.param2] = "• " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
cl.sendText
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autoSta():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ki.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kk.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kc.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kb.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kd.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["──────┅❇͜͡❇͜͡☆͜͡❇͜͡❇┅──────\nᴼᴾᴱᴺ ᴼᴿᴰᴱᴿ\n────────┅┅───────\n➣ꜱᴇʟꜰʙᴏᴛ ᴏɴʟʏ\n➣ꜱᴇʟꜰʙᴏᴛ + ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 2 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 3 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 4 ᴀꜱɪꜱᴛ\n➣1 ᴀᴋᴜɴ ᴜᴛᴀᴍᴀ + 5 ᴀꜱɪꜱᴛ\n➣ʙᴏᴛᴘʀᴏᴛᴇᴄᴛ 3-11 ʙᴏᴛ ᴀꜱɪꜱᴛ\n➣ɴᴇᴡ ꜱᴄʀɪᴘᴛ\n➣ʜʀɢᴀ ʙɪꜱᴀ ɴᴇɢᴏ\n─────────┅┅─────────\n ✯❇͜͡❇͜͡C͜͡r͜͡e͜͡a͜͡t͜͡o͜͡r✯͜͡$͜͡ë͜͡I͜͡F͜͡-͜͡฿͜͜͡͡o͜͡t͜͡ ͜͡❇͜͡❇✯\nline.me/ti/p/~reza.p.i.p\nline.me/ti/p/~ryansakra\n➣ѕєʟғвот κɪcκєʀ_+_ᴘʀᴏᴛᴇᴄᴛ\n────────┅❇͜͡❇͜͡☆͜͡❇͜͡❇┅────────"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autoSta)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(500)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
namespaces.py
|
import contextlib
import ctypes
import errno
import os
import pyroute2
import pytest
import signal
import multiprocessing
# All allowed namespace types
NAMESPACE_FLAGS = dict(mnt=0x00020000,
uts=0x04000000,
ipc=0x08000000,
user=0x10000000,
pid=0x20000000,
net=0x40000000)
STACKSIZE = 1024*1024
libc = ctypes.CDLL('libc.so.6', use_errno=True)
@contextlib.contextmanager
def keep_directory():
"""Restore the current directory on exit."""
pwd = os.getcwd()
try:
yield
finally:
os.chdir(pwd)
def mount_sys(target="/sys"):
flags = [2 | 4 | 8] # MS_NOSUID | MS_NODEV | MS_NOEXEC
flags.append(1 << 18) # MS_PRIVATE
flags.append(1 << 19) # MS_SLAVE
for fl in flags:
ret = libc.mount(b"none",
target.encode('ascii'),
b"sysfs",
fl,
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def mount_tmpfs(target, private=False):
flags = [0]
if private:
flags.append(1 << 18) # MS_PRIVATE
flags.append(1 << 19) # MS_SLAVE
for fl in flags:
ret = libc.mount(b"none",
target.encode('ascii'),
b"tmpfs",
fl,
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def _mount_proc(target):
flags = [2 | 4 | 8] # MS_NOSUID | MS_NODEV | MS_NOEXEC
flags.append(1 << 18) # MS_PRIVATE
flags.append(1 << 19) # MS_SLAVE
for fl in flags:
ret = libc.mount(b"proc",
target.encode('ascii'),
b"proc",
fl,
None)
if ret == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def mount_proc(target="/proc"):
# We need to be sure /proc is correct. We do that in another
# process as this doesn't play well with setns().
if not os.path.isdir(target):
os.mkdir(target)
p = multiprocessing.Process(target=_mount_proc, args=(target,))
p.start()
p.join()
class Namespace(object):
"""Combine several namespaces into one.
This gets a list of namespace types to create and combine into one. The
combined namespace can be used as a context manager to enter all the
created namespaces and exit them at the end.
"""
def __init__(self, *namespaces):
self.namespaces = namespaces
for ns in namespaces:
assert ns in NAMESPACE_FLAGS
# Get a pipe to signal the future child to exit
self.pipe = os.pipe()
# First, create a child in the given namespaces
child = ctypes.CFUNCTYPE(ctypes.c_int)(self.child)
child_stack = ctypes.create_string_buffer(STACKSIZE)
child_stack_pointer = ctypes.c_void_p(
ctypes.cast(child_stack,
ctypes.c_void_p).value + STACKSIZE)
flags = signal.SIGCHLD
for ns in namespaces:
flags |= NAMESPACE_FLAGS[ns]
pid = libc.clone(child, child_stack_pointer, flags)
if pid == -1:
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
# If a user namespace, map UID 0 to the current one
if 'user' in namespaces:
uid_map = '0 {} 1'.format(os.getuid())
gid_map = '0 {} 1'.format(os.getgid())
with open('/proc/{}/uid_map'.format(pid), 'w') as f:
f.write(uid_map)
with open('/proc/{}/setgroups'.format(pid), 'w') as f:
f.write('deny')
with open('/proc/{}/gid_map'.format(pid), 'w') as f:
f.write(gid_map)
# Retrieve a file descriptor to this new namespace
self.next = [os.open('/proc/{}/ns/{}'.format(pid, x),
os.O_RDONLY) for x in namespaces]
# Keep a file descriptor to our old namespaces
self.previous = [os.open('/proc/self/ns/{}'.format(x),
os.O_RDONLY) for x in namespaces]
# Tell the child all is done and let it die
os.close(self.pipe[0])
if 'pid' not in namespaces:
os.close(self.pipe[1])
self.pipe = None
os.waitpid(pid, 0)
def __del__(self):
for fd in self.next:
os.close(fd)
for fd in self.previous:
os.close(fd)
if self.pipe is not None:
os.close(self.pipe[1])
def child(self):
"""Cloned child.
Just be here until our parent extract the file descriptor from
us.
"""
os.close(self.pipe[1])
# For a network namespace, enable lo
if 'net' in self.namespaces:
ipr = pyroute2.IPRoute()
lo = ipr.link_lookup(ifname='lo')[0]
ipr.link('set', index=lo, state='up')
# For a mount namespace, make it private
if 'mnt' in self.namespaces:
libc.mount(b"none", b"/", None,
# MS_REC | MS_PRIVATE
16384 | (1 << 18),
None)
while True:
try:
os.read(self.pipe[0], 1)
except OSError as e:
if e.errno in [errno.EAGAIN, errno.EINTR]:
continue
break
os._exit(0)
def fd(self, namespace):
"""Return the file descriptor associated to a namespace"""
assert namespace in self.namespaces
return self.next[self.namespaces.index(namespace)]
def __enter__(self):
with keep_directory():
for n in self.next:
if libc.setns(n, 0) == -1:
ns = self.namespaces[self.next.index(n)] # NOQA
e = ctypes.get_errno()
raise OSError(e, os.strerror(e))
def __exit__(self, *exc):
with keep_directory():
err = None
for p in reversed(self.previous):
if libc.setns(p, 0) == -1 and err is None:
ns = self.namespaces[self.previous.index(p)] # NOQA
e = ctypes.get_errno()
err = OSError(e, os.strerror(e))
if err:
raise err
def __repr__(self):
return 'Namespace({})'.format(", ".join(self.namespaces))
class NamespaceFactory(object):
"""Dynamically create namespaces as they are created.
Those namespaces are namespaces for IPC, net, mount and UTS. PID
is a bit special as we have to keep a process for that. We don't
do that to ensure that everything is cleaned
automatically. Therefore, the child process is killed as soon as
we got a file descriptor to the namespace. We don't use a user
namespace either because we are unlikely to be able to exit it.
"""
def __init__(self, tmpdir):
self.namespaces = {}
self.tmpdir = tmpdir
def __call__(self, ns):
"""Return a namespace. Create it if it doesn't exist."""
if ns in self.namespaces:
return self.namespaces[ns]
self.namespaces[ns] = Namespace('ipc', 'net', 'mnt', 'uts')
with self.namespaces[ns]:
mount_proc()
mount_sys()
# Also setup the "namespace-dependant" directory
self.tmpdir.join("ns").ensure(dir=True)
mount_tmpfs(str(self.tmpdir.join("ns")), private=True)
return self.namespaces[ns]
@pytest.fixture
def namespaces(tmpdir):
return NamespaceFactory(tmpdir)
|
wsdump.py
|
#!/Users/Rahul/Desktop/pollr-backend2/pollr-eb2/bin/python3
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
fixmp4.py
|
#!/usr/bin/python3
"""Fix problems with improperly encoded video files.
For some reason a lot of my video files do not have proper
encoding and this results in errors during playback. This
script goes through each of the files uses ffmpeg to convert
the file to one that is wrapped appropriately.
"""
import argparse
import datetime
import hashlib
import json
import os
import re
import tempfile
import subprocess
import sys
from multiprocessing import Process
from typing import Dict, Any, List
from tqdm import tqdm
import coloredlogs
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
coloredlogs.install(level="DEBUG", logger=logger)
MD5_BLOCKSIZE = 4 * 1024
MONITOR_DELAY = 1.0
STATUS_COMPLETE = "complete"
def md5(fname: str, desc: str = None) -> str:
"""Calculate the md5 sum of a large file.
see: https://stackoverflow.com/a/3431838/57626
"""
hash_md5 = hashlib.md5()
if desc is None:
desc = f"md5 {os.path.basename(fname)}"
with tqdm(
total=os.path.getsize(fname), unit="B", unit_scale=True, miniters=1, desc=desc, leave=False,
) as pbar:
with open(fname, "rb") as f:
ctr = 0
for chunk in iter(lambda: f.read(MD5_BLOCKSIZE), b""):
pbar.update(len(chunk))
hash_md5.update(chunk)
ctr = ctr + 1
return hash_md5.hexdigest()
def get_ffmpeg_version() -> str:
"""Invoke ffmpeg to return the version string
I found that weird things were happening in the program and that some of
these issues might've been from different ffmpeg versions. This allows the
program to fetch the ffmpeg version and record it later.
"""
outcommand = [
"ffmpeg",
"-version",
]
logger.info("command: %s", " ".join(outcommand))
result = subprocess.run(outcommand, capture_output=True)
return result.stdout.decode("utf-8")
def get_blocked_streams(filename: str) -> List[str]:
"""Get a list of streams that are eia_608.
These are typically closed captioning tracks, but they're not officially
supported in mp4 containers and it turns out that these tracks have been
the problematic streams.
This method coule be easily expanded to add additional codecs by adding
them to the `BLOCKED_CODECS` list.
"""
BLOCKED_CODECS = ["eia_608"]
outcommand = ["ffprobe", "-show_streams", "-of", "json", filename]
logger.info("command: %s", " ".join(outcommand))
result = subprocess.run(outcommand, capture_output=True)
if result.returncode != 0:
logger.fatal("ffprobe exit code is %d - this indicates an error", result.returncode)
sys.exit(1)
out_streams = [] # type: List[str]
for stream in json.loads(result.stdout.decode("utf-8")).get("streams", []):
if stream.get("codec_name", "") in BLOCKED_CODECS:
out_streams.append(f"0:{stream.get('index')}")
return out_streams
def process_dir(dirname: str) -> None:
"""Iterate and process the video in a directory."""
logger.info("processing: %s", dirname)
basename = os.path.basename(dirname)
json_metafile = os.path.join(dirname, f"{basename}.json")
json_metadata = {} # type: Dict[str, Any]
# check to see if JSON metadata is present
if os.path.isfile(json_metafile):
logger.info("metafile '%s' is present", json_metafile)
json_metadata = json.load(open(json_metafile))
if json_metadata.get("status", "") == STATUS_COMPLETE:
logger.info("%s is complete", dirname)
return
files = [x for x in os.listdir(os.path.join(dirname))]
videos = [x for x in files if os.path.splitext(x)[1].lower() in [".m4v", ".mp4"]]
if len(videos) == 0:
logger.warn("no video files present")
return
if len(videos) > 1:
logger.warn("multiple video files: %s", videos)
return
video = videos[0]
logger.info("working on %s", video)
# calculate md5sum of original file
if "md5" not in json_metadata.get("original", {}):
md5sum = md5(os.path.join(dirname, video), desc=f"original md5 {video}")
json_metadata["original"] = {
"filename": video,
"md5": md5sum,
}
with open(json_metafile, "w") as f:
json.dump(json_metadata, f)
# run the conversion command
outfilename = next(tempfile._get_candidate_names()) + ".mp4"
blocked_tracks = get_blocked_streams(os.path.join(dirname, video))
logger.info("eia_608 tracks: %s", blocked_tracks)
def transcode():
logger.info("writing remuxed file to: %s", outfilename)
outcommand = [
"ffmpeg",
"-v",
"warning",
"-i",
os.path.join(dirname, video),
"-c",
"copy",
"-map",
"0",
]
for track in blocked_tracks:
outcommand = outcommand + ["-map", f"-{track}"]
outcommand.append(os.path.join(dirname, outfilename))
logger.info("command: %s", " ".join(outcommand))
subprocess.run(outcommand, stdout=sys.stdout.buffer, stderr=sys.stdout.buffer)
json_metadata["ffmpeg"] = get_ffmpeg_version()
p = Process(target=transcode)
p.start()
with tqdm(
total=os.path.getsize(os.path.join(dirname, video)),
unit="B",
unit_scale=True,
miniters=1,
desc=f"ffmpeg {video}",
leave=False,
) as pbar:
old_filesize = 0
while True:
p.join(MONITOR_DELAY)
if os.path.isfile(os.path.join(dirname, outfilename)):
filesize = os.path.getsize(os.path.join(dirname, outfilename))
pbar.update(filesize - old_filesize)
old_filesize = filesize
if p.exitcode is not None:
break
logger.warning("exitcode %d", p.exitcode)
# get the final filesize of the output filename
if os.path.getsize(os.path.join(dirname, outfilename)) == 0:
os.remove(os.path.join(dirname, outfilename))
logger.fatal("Converstion error - output file size of %s is 0 bytes", outfilename)
sys.exit(1)
json_metadata["new"] = {"filename": outfilename}
# incrementally save the metadata
with open(json_metafile, "w") as f:
json.dump(json_metadata, f)
logger.info("calculating remuxed md5")
md5sum = md5(os.path.join(dirname, outfilename), desc=f"converted md5 {video}")
json_metadata["new"]["md5"] = md5sum
# rename the files
original_moved = video + ".orig"
os.rename(os.path.join(dirname, video), os.path.join(dirname, original_moved))
json_metadata["original"]["target"] = original_moved
new_moved = os.path.join(dirname, os.path.basename(dirname) + ".mp4")
os.rename(os.path.join(dirname, outfilename), new_moved)
logger.info("renamed %s to %s", os.path.join(dirname, outfilename), new_moved)
json_metadata["new"]["target"] = os.path.basename(new_moved)
# add in the metadata to show when the task was completed
# this stores the timestamp as an int and an isoformat string with timezone
completion_time = datetime.datetime.now(datetime.timezone.utc)
json_metadata["status"] = STATUS_COMPLETE
json_metadata["timestamp"] = int(completion_time.timestamp())
json_metadata["isotimestamp"] = completion_time.astimezone().isoformat(timespec="seconds")
with open(json_metafile, "w") as f:
json.dump(json_metadata, f)
def main(dirname: str = ".") -> None:
"""Iterate through all the directories.
Because there isn't a way to know ahead of time if we need to look at the
files in a directory, the directories are iterated through. This means that
even junk directories are counted as part of the list.
"""
if not (os.path.isdir(dirname)):
logger.error("%s is not a directory", dirname)
sys.exit(1)
logger.info("working in directory %s", dirname)
ansi = {"BOLD": "\033[1m", "RED": "\033[91m", "END": "\033[0m"}
dirs = [
os.path.join(dirname, x)
for x in os.listdir(dirname)
if os.path.isdir(os.path.join(dirname, x))
]
dirs = sorted(dirs)
for this_dir in tqdm(dirs, desc="{BOLD}{RED}Total Progress{END}".format(**ansi)):
process_dir(this_dir)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"dirname", action="store", default=os.getcwd(), nargs="?", help="directory to work within",
)
args = parser.parse_args()
main(dirname=args.dirname)
|
toolbar.py
|
"""Module for dealing with the toolbar.
"""
import math
import os
import ipyevents
import ipyleaflet
import ipywidgets as widgets
from ipyfilechooser import FileChooser
from .common import *
from .pc import *
def tool_template(m=None):
"""Generates a tool GUI template using ipywidgets.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gear",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Checkbox",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
dropdown = widgets.Dropdown(
options=["Option 1", "Option 2", "Option 3"],
value=None,
description="Dropdown:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
int_slider = widgets.IntSlider(
min=1,
max=100,
description="Int Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
int_slider_label = widgets.Label()
widgets.jslink((int_slider, "value"), (int_slider_label, "value"))
float_slider = widgets.FloatSlider(
min=1,
max=100,
description="Float Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
float_slider_label = widgets.Label()
widgets.jslink((float_slider, "value"), (float_slider_label, "value"))
color = widgets.ColorPicker(
concise=False,
description="Color:",
value="white",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
text = widgets.Text(
value="",
description="Textbox:",
placeholder="Placeholder",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
textarea = widgets.Textarea(
placeholder="Placeholder",
layout=widgets.Layout(width=widget_width),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
widgets.HBox([int_slider, int_slider_label]),
widgets.HBox([float_slider, float_slider_label]),
dropdown,
text,
color,
textarea,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
with output:
output.clear_output()
print("Running ...")
elif change["new"] == "Reset":
textarea.value = ""
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def main_toolbar(m):
"""Creates the main toolbar and adds it to the map.
Args:
m (leafmap.Map): The leafmap Map object.
"""
tools = {
"map": {
"name": "basemap",
"tooltip": "Change basemap",
},
"globe": {
"name": "split_map",
"tooltip": "Split-panel map",
},
"adjust": {
"name": "planet",
"tooltip": "Planet imagery",
},
"folder-open": {
"name": "open_data",
"tooltip": "Open local vector/raster data",
},
"gears": {
"name": "whitebox",
"tooltip": "WhiteboxTools for local geoprocessing",
},
"fast-forward": {
"name": "timeslider",
"tooltip": "Activate the time slider",
},
"eraser": {
"name": "eraser",
"tooltip": "Remove all drawn features",
},
"camera": {
"name": "save_map",
"tooltip": "Save map as HTML or image",
},
"address-book": {
"name": "census",
"tooltip": "Get US Census data",
},
"info": {
"name": "inspector",
"tooltip": "Get COG/STAC pixel value",
},
"search": {
"name": "search_xyz",
"tooltip": "Search XYZ tile services",
},
"download": {
"name": "download_osm",
"tooltip": "Download OSM data",
},
"picture-o": {
"name": "raster",
"tooltip": "Open COG/STAC dataset",
},
"spinner": {
"name": "placeholder2",
"tooltip": "This is a placeholder",
},
"question": {
"name": "help",
"tooltip": "Get help",
},
}
# if m.sandbox_path is None and (os.environ.get("USE_VOILA") is not None):
# voila_tools = ["camera", "folder-open", "gears"]
# for item in voila_tools:
# if item in tools.keys():
# del tools[item]
icons = list(tools.keys())
tooltips = [item["tooltip"] for item in list(tools.values())]
icon_width = "32px"
icon_height = "32px"
n_cols = 3
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(
children=[
widgets.ToggleButton(
layout=widgets.Layout(
width="auto", height="auto", padding="0px 0px 0px 4px"
),
button_style="primary",
icon=icons[i],
tooltip=tooltips[i],
)
for i in range(len(icons))
],
layout=widgets.Layout(
width="109px",
grid_template_columns=(icon_width + " ") * n_cols,
grid_template_rows=(icon_height + " ") * n_rows,
grid_gap="1px 1px",
padding="5px",
),
)
m.toolbar = toolbar_grid
def tool_callback(change):
if change["new"]:
current_tool = change["owner"]
for tool in toolbar_grid.children:
if tool is not current_tool:
tool.value = False
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
if tool_name == "basemap":
change_basemap(m)
if tool_name == "split_map":
split_basemaps(m)
if tool_name == "planet":
split_basemaps(m, layers_dict=planet_tiles())
elif tool_name == "open_data":
open_data_widget(m)
elif tool_name == "eraser":
if m.draw_control is not None:
m.draw_control.clear()
m.user_roi = None
m.user_rois = None
m.draw_features = []
elif tool_name == "whitebox":
import whiteboxgui.whiteboxgui as wbt
tools_dict = wbt.get_wbt_dict()
wbt_toolbox = wbt.build_toolbox(
tools_dict,
max_width="800px",
max_height="500px",
sandbox_path=m.sandbox_path,
)
wbt_control = ipyleaflet.WidgetControl(
widget=wbt_toolbox, position="bottomright"
)
m.whitebox = wbt_control
m.add_control(wbt_control)
elif tool_name == "timeslider":
m.add_time_slider()
elif tool_name == "save_map":
save_map((m))
elif tool_name == "census":
census_widget(m)
elif tool_name == "inspector":
inspector_gui(m)
elif tool_name == "search_xyz":
search_basemaps(m)
elif tool_name == "download_osm":
download_osm(m)
elif tool_name == "raster":
open_raster_gui(m)
elif tool_name == "help":
import webbrowser
webbrowser.open_new_tab("https://leafmap.org")
current_tool.value = False
else:
# tool = change["owner"]
# tool_name = tools[tool.icon]["name"]
pass
m.toolbar_reset()
for tool in toolbar_grid.children:
tool.observe(tool_callback, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="wrench",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
m.toolbar_button = toolbar_button
layers_button = widgets.ToggleButton(
value=False,
tooltip="Layers",
icon="server",
layout=widgets.Layout(height="28px", width="72px"),
)
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [layers_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [toolbar_grid]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
layers_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
layers_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not layers_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def layers_btn_click(change):
if change["new"]:
layers_hbox = []
all_layers_chk = widgets.Checkbox(
value=False,
description="All layers on/off",
indent=False,
layout=widgets.Layout(height="18px", padding="0px 8px 25px 8px"),
)
all_layers_chk.layout.width = "30ex"
layers_hbox.append(all_layers_chk)
def all_layers_chk_changed(change):
if change["new"]:
for layer in m.layers:
layer.visible = True
else:
for layer in m.layers:
layer.visible = False
all_layers_chk.observe(all_layers_chk_changed, "value")
layers = [
lyr
for lyr in m.layers
if (
isinstance(lyr, ipyleaflet.TileLayer)
or isinstance(lyr, ipyleaflet.WMSLayer)
)
]
# if the layers contain unsupported layers (e.g., GeoJSON, GeoData), adds the ipyleaflet built-in LayerControl
if len(layers) < (len(m.layers) - 1):
if m.layer_control is None:
layer_control = ipyleaflet.LayersControl(position="topright")
m.layer_control = layer_control
if m.layer_control not in m.controls:
m.add_control(m.layer_control)
# for non-TileLayer, use layer.style={'opacity':0, 'fillOpacity': 0} to turn layer off.
for layer in layers:
layer_chk = widgets.Checkbox(
value=layer.visible,
description=layer.name,
indent=False,
layout=widgets.Layout(height="18px"),
)
layer_chk.layout.width = "25ex"
layer_opacity = widgets.FloatSlider(
value=layer.opacity,
min=0,
max=1,
step=0.01,
readout=False,
layout=widgets.Layout(width="80px"),
)
layer_settings = widgets.ToggleButton(
icon="gear",
tooltip=layer.name,
layout=widgets.Layout(
width="25px", height="25px", padding="0px 0px 0px 5px"
),
)
# def layer_vis_on_click(change):
# if change["new"]:
# layer_name = change["owner"].tooltip
# change["owner"].value = False
# layer_settings.observe(layer_vis_on_click, "value")
# def layer_chk_changed(change):
# layer_name = change["owner"].description
# layer_chk.observe(layer_chk_changed, "value")
widgets.jslink((layer_chk, "value"), (layer, "visible"))
widgets.jsdlink((layer_opacity, "value"), (layer, "opacity"))
hbox = widgets.HBox(
[layer_chk, layer_settings, layer_opacity],
layout=widgets.Layout(padding="0px 8px 0px 8px"),
)
layers_hbox.append(hbox)
toolbar_footer.children = layers_hbox
toolbar_button.value = False
else:
toolbar_footer.children = [toolbar_grid]
layers_button.observe(layers_btn_click, "value")
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
m.add_control(toolbar_control)
def open_data_widget(m):
"""A widget for opening local vector/raster data.
Args:
m (object): leafmap.Map
"""
import warnings
warnings.filterwarnings("ignore")
padding = "0px 0px 0px 5px"
style = {"description_width": "initial"}
file_type = widgets.ToggleButtons(
options=["Shapefile", "GeoJSON", "CSV", "Vector", "Raster"],
tooltips=[
"Open a shapefile",
"Open a GeoJSON file",
"Open a vector dataset",
"Create points from CSV",
"Open a vector dataset",
"Open a raster dataset",
],
)
file_type.style.button_width = "88px"
filepath = widgets.Text(
value="",
description="File path or http URL:",
tooltip="Enter a file path or http URL to vector data",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
http_widget = widgets.HBox()
file_chooser = FileChooser(
os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px")
)
file_chooser.filter_pattern = "*.shp"
file_chooser.use_dir_icons = True
layer_name = widgets.Text(
value="Shapefile",
description="Enter a layer name:",
tooltip="Enter a layer name for the selected file",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
longitude = widgets.Dropdown(
options=[],
value=None,
description="Longitude:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
latitude = widgets.Dropdown(
options=[],
value=None,
description="Latitude:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
label = widgets.Dropdown(
options=[],
value=None,
description="Label:",
layout=widgets.Layout(width="149px", padding=padding),
style=style,
)
point_check = widgets.Checkbox(
description="Is it a point layer?",
indent=False,
layout=widgets.Layout(padding=padding, width="150px"),
style=style,
)
point_popup = widgets.SelectMultiple(
options=[
"None",
],
value=["None"],
description="Popup attributes:",
disabled=False,
style=style,
)
csv_widget = widgets.HBox()
point_widget = widgets.HBox()
def point_layer_check(change):
if point_check.value:
if filepath.value.strip() != "":
m.default_style = {"cursor": "wait"}
point_popup.options = vector_col_names(filepath.value)
point_popup.value = [point_popup.options[0]]
point_widget.children = [point_check, point_popup]
else:
point_widget.children = [point_check]
point_check.observe(point_layer_check)
ok_cancel = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
# ok_cancel.style.button_width = "50px"
bands = widgets.Text(
value=None,
description="Band:",
tooltip="Enter a list of band indices",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
vmin = widgets.Text(
value=None,
description="vmin:",
tooltip="Minimum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px"),
)
vmax = widgets.Text(
value=None,
description="vmax:",
tooltip="Maximum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px"),
)
nodata = widgets.Text(
value=None,
description="Nodata:",
tooltip="Nodata the raster to visualize",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
palette = widgets.Dropdown(
options=[],
value=None,
description="palette:",
layout=widgets.Layout(width="300px"),
style=style,
)
raster_options = widgets.VBox()
def filepath_change(change):
if file_type.value == "Raster":
pass
# if (
# filepath.value.startswith("http")
# or filepath.value.endswith(".txt")
# or filepath.value.endswith(".csv")
# ):
# bands.disabled = True
# palette.disabled = False
# # x_dim.disabled = True
# # y_dim.disabled = True
# else:
# bands.disabled = False
# palette.disabled = False
# # x_dim.disabled = True
# # y_dim.disabled = True
filepath.observe(filepath_change, "value")
tool_output = widgets.Output(
layout=widgets.Layout(max_height="150px", max_width="500px", overflow="auto")
)
main_widget = widgets.VBox(
[
file_type,
file_chooser,
http_widget,
csv_widget,
layer_name,
point_widget,
raster_options,
ok_cancel,
tool_output,
]
)
tool_output_ctrl = ipyleaflet.WidgetControl(widget=main_widget, position="topright")
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
def bands_changed(change):
if change["new"] and "," in change["owner"].value:
palette.value = None
palette.disabled = True
else:
palette.disabled = False
bands.observe(bands_changed, "value")
def chooser_callback(chooser):
filepath.value = file_chooser.selected
if file_type.value == "CSV":
import pandas as pd
df = pd.read_csv(filepath.value)
col_names = df.columns.values.tolist()
longitude.options = col_names
latitude.options = col_names
label.options = col_names
if "longitude" in col_names:
longitude.value = "longitude"
if "latitude" in col_names:
latitude.value = "latitude"
if "name" in col_names:
label.value = "name"
file_chooser.register_callback(chooser_callback)
def file_type_changed(change):
ok_cancel.value = None
file_chooser.default_path = os.getcwd()
file_chooser.reset()
layer_name.value = file_type.value
csv_widget.children = []
filepath.value = ""
tool_output.clear_output()
if change["new"] == "Shapefile":
file_chooser.filter_pattern = "*.shp"
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = []
elif change["new"] == "GeoJSON":
file_chooser.filter_pattern = ["*.geojson", "*.json"]
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "Vector":
file_chooser.filter_pattern = "*.*"
raster_options.children = []
point_widget.children = [point_check]
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "CSV":
file_chooser.filter_pattern = ["*.csv", "*.CSV"]
csv_widget.children = [longitude, latitude, label]
raster_options.children = []
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
elif change["new"] == "Raster":
file_chooser.filter_pattern = ["*.tif", "*.img"]
palette.options = get_palettable(types=["matplotlib", "cartocolors"])
palette.value = None
raster_options.children = [
widgets.HBox([bands, vmin, vmax]),
widgets.HBox([nodata, palette]),
]
point_widget.children = []
point_check.value = False
http_widget.children = [filepath]
def ok_cancel_clicked(change):
if change["new"] == "Apply":
m.default_style = {"cursor": "wait"}
file_path = filepath.value
with tool_output:
tool_output.clear_output()
if file_path.strip() != "":
ext = os.path.splitext(file_path)[1]
if point_check.value:
popup = list(point_popup.value)
if len(popup) == 1:
popup = popup[0]
m.add_point_layer(
file_path,
popup=popup,
layer_name=layer_name.value,
)
elif ext.lower() == ".shp":
m.add_shp(file_path, style={}, layer_name=layer_name.value)
elif ext.lower() == ".geojson":
m.add_geojson(file_path, style={}, layer_name=layer_name.value)
elif ext.lower() == ".csv" and file_type.value == "CSV":
m.add_xy_data(
file_path,
x=longitude.value,
y=latitude.value,
label=label.value,
layer_name=layer_name.value,
)
elif (
ext.lower() in [".tif", "img"]
) and file_type.value == "Raster":
band = None
vis_min = None
vis_max = None
vis_nodata = None
try:
if len(bands.value) > 0:
band = int(bands.value)
if len(vmin.value) > 0:
vis_min = float(vmin.value)
if len(vmax.value) > 0:
vis_max = float(vmax.value)
if len(nodata.value) > 0:
vis_nodata = float(nodata.value)
except:
pass
m.add_local_tile(
file_path,
layer_name=layer_name.value,
band=band,
palette=palette.value,
vmin=vis_min,
vmax=vis_max,
nodata=vis_nodata,
)
else:
print("Please select a file to open.")
m.toolbar_reset()
m.default_style = {"cursor": "default"}
elif change["new"] == "Reset":
file_chooser.reset()
tool_output.clear_output()
filepath.value = ""
m.toolbar_reset()
elif change["new"] == "Close":
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
m.tool_output_ctrl = None
m.toolbar_reset()
ok_cancel.value = None
file_type.observe(file_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
# file_chooser.register_callback(chooser_callback)
m.add_control(tool_output_ctrl)
m.tool_output_ctrl = tool_output_ctrl
def open_raster_gui(m):
"""A widget for opening local/remote COG/STAC data.
Args:
m (object): leafmap.Map
"""
import json
from .colormaps import list_colormaps, get_palette
padding = "0px 0px 0px 5px"
style = {"description_width": "initial"}
tool_output = widgets.Output(
layout=widgets.Layout(max_height="150px", max_width="500px", overflow="auto")
)
file_type = widgets.ToggleButtons(
options=["GeoTIFF", "COG", "STAC", "Microsoft"],
tooltips=[
"Open a local GeoTIFF file",
"Open a remote COG file",
"Open a remote STAC item",
"Create COG from Microsoft Planetary Computer",
],
)
file_type.style.button_width = "110px"
file_chooser = FileChooser(
os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px")
)
file_chooser.filter_pattern = ["*.tif", "*.tiff"]
file_chooser.use_dir_icons = True
source_widget = widgets.VBox([file_chooser])
http_url = widgets.Text(
value="",
description="HTTP URL:",
tooltip="Enter an http URL to COG file",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
collection = widgets.Dropdown(
options=["landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2"],
value="landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2",
description="PC Collection:",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
items = widgets.Text(
value="LC08_L2SP_047027_20201204_02_T1",
description="STAC Items:",
tooltip="STAC Item ID",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
assets = widgets.Text(
value="SR_B7,SR_B5,SR_B4",
description="STAC Assets:",
tooltip="STAC Asset ID",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
layer_name = widgets.Text(
value="GeoTIFF",
description="Enter a layer name:",
tooltip="Enter a layer name for the selected file",
style=style,
layout=widgets.Layout(width="454px", padding=padding),
)
ok_cancel = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
# ok_cancel.style.button_width = "50px"
bands = widgets.Text(
value=None,
description="Band:",
tooltip="Enter a list of band indices",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
band_width = width = "149px"
red = widgets.Dropdown(
value=None,
options=[],
description="Red:",
tooltip="Select a band for the red channel",
style=style,
layout=widgets.Layout(width=band_width, padding=padding),
)
green = widgets.Dropdown(
value=None,
options=[],
description="Green:",
tooltip="Select a band for the green channel",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
blue = widgets.Dropdown(
value=None,
options=[],
description="Blue:",
tooltip="Select a band for the blue channel",
style=style,
layout=widgets.Layout(width=band_width, padding=padding),
)
vmin = widgets.Text(
value=None,
description="vmin:",
tooltip="Minimum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
vmax = widgets.Text(
value=None,
description="vmax:",
tooltip="Maximum value of the raster to visualize",
style=style,
layout=widgets.Layout(width="148px", padding=padding),
)
nodata = widgets.Text(
value=None,
description="Nodata:",
tooltip="Nodata the raster to visualize",
style=style,
layout=widgets.Layout(width="150px", padding=padding),
)
local_tile_palettes = list_colormaps(add_extra=True)
cog_stac_palettes = list_colormaps(lowercase=True)
palette_options = local_tile_palettes
palette = widgets.Dropdown(
options=palette_options,
value=None,
description="palette:",
layout=widgets.Layout(width="300px", padding=padding),
style=style,
)
checkbox = widgets.Checkbox(
value=False,
description="Additional params",
indent=False,
layout=widgets.Layout(width="154px", padding=padding),
style=style,
)
add_params_text1 = "Additional parameters in the format of a dictionary, for example, \n {'palette': ['#006633', '#E5FFCC', '#662A00', '#D8D8D8', '#F5F5F5']}"
add_params_text2 = "Additional parameters in the format of a dictionary, for example, \n {'expression': '(SR_B5-SR_B4)/(SR_B5+SR_B4)'}"
add_params = widgets.Textarea(
value="",
placeholder=add_params_text1,
layout=widgets.Layout(width="454px", padding=padding),
style=style,
)
params_widget = widgets.HBox()
raster_options = widgets.VBox()
raster_options.children = [
widgets.HBox([red, green, blue]),
widgets.HBox([vmin, vmax, nodata]),
widgets.HBox([palette, checkbox]),
params_widget,
]
def collection_changed(change):
if change["new"]:
if not hasattr(m, "pc_inventory"):
setattr(m, "pc_inventory", get_pc_inventory())
col_name = change["new"].split(" - ")[0]
items.value = m.pc_inventory[col_name]["first_item"]
band_names = m.pc_inventory[col_name]["bands"]
red.options = band_names
green.options = band_names
blue.options = band_names
if change["new"] == "landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2":
items.value = "LC08_L2SP_047027_20201204_02_T1"
assets.value = "SR_B7,SR_B5,SR_B4"
red.value = "SR_B7"
green.value = "SR_B5"
blue.value = "SR_B4"
elif change["new"] == "sentinel-2-l2a - Sentinel-2 Level-2A":
items.value = "S2B_MSIL2A_20190629T212529_R043_T06VVN_20201006T080531"
assets.value = "B08,B04,B03"
red.value = "B08"
green.value = "B04"
blue.value = "B03"
else:
if len(band_names) > 2:
assets.value = ",".join(band_names[:3])
red.value = band_names[0]
green.value = band_names[1]
blue.value = band_names[2]
else:
assets.value = band_names[0]
red.value = band_names[0]
green.value = band_names[0]
blue.value = band_names[0]
collection.observe(collection_changed, names="value")
def band_changed(change):
if change["name"]:
if not checkbox.value:
if file_type.value == "GeoTIFF":
if hasattr(m, "tile_client"):
min_max = local_tile_vmin_vmax(
m.tile_client, bands=[red.value, green.value, blue.value]
)
vmin.value = str(min_max[0])
vmax.value = str(min_max[1])
elif file_type.value == "Microsoft":
if len(set([red.value, green.value, blue.value])) == 1:
assets.value = f"{red.value}"
else:
assets.value = f"{red.value},{green.value},{blue.value}"
red.observe(band_changed, names="value")
green.observe(band_changed, names="value")
blue.observe(band_changed, names="value")
def checkbox_changed(change):
if change["new"]:
params_widget.children = [add_params]
else:
params_widget.children = []
checkbox.observe(checkbox_changed, names="value")
def url_change(change):
if change["new"] and change["new"].startswith("http"):
with tool_output:
try:
print("Retrieving band names...")
if file_type.value == "COG":
bandnames = cog_bands(change["new"])
elif file_type.value == "STAC":
bandnames = stac_bands(change["new"])
red.options = bandnames
green.options = bandnames
blue.options = bandnames
if len(bandnames) > 2:
red.value = bandnames[0]
green.value = bandnames[1]
blue.value = bandnames[2]
else:
red.value = bandnames[0]
green.value = bandnames[0]
blue.value = bandnames[0]
tool_output.clear_output()
except Exception as e:
print(e)
print("Error loading URL.")
return
else:
red.options = []
green.options = []
blue.options = []
vmin.value = ""
vmax.value = ""
nodata.value = ""
palette.value = None
http_url.observe(url_change, names="value")
main_widget = widgets.VBox(
[
file_type,
source_widget,
layer_name,
raster_options,
ok_cancel,
tool_output,
]
)
tool_output_ctrl = ipyleaflet.WidgetControl(widget=main_widget, position="topright")
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
def bands_changed(change):
if change["new"] and "," in change["owner"].value:
palette.value = None
palette.disabled = True
else:
palette.disabled = False
bands.observe(bands_changed, "value")
def chooser_callback(chooser):
try:
source = file_chooser.selected
tile_layer, tile_client = get_local_tile_layer(source, return_client=True)
if not hasattr(m, "tile_client"):
setattr(m, "tile_client", tile_client)
bandnames = local_tile_bands(tile_client)
red.options = bandnames
green.options = bandnames
blue.options = bandnames
if len(bandnames) > 2:
red.value = bandnames[0]
green.value = bandnames[1]
blue.value = bandnames[2]
min_max = local_tile_vmin_vmax(
tile_client, bands=[red.value, green.value, blue.value]
)
vmin.value = str(min_max[0])
vmax.value = str(min_max[1])
else:
red.value = bandnames[0]
green.value = bandnames[0]
blue.value = bandnames[0]
min_max = local_tile_vmin_vmax(tile_client)
vmin.value = str(min_max[0])
vmax.value = str(min_max[1])
except Exception as e:
with tool_output:
print(e)
file_chooser.register_callback(chooser_callback)
def file_type_changed(change):
ok_cancel.value = None
file_chooser.default_path = os.getcwd()
file_chooser.reset()
layer_name.value = file_type.value
http_url.value = ""
tool_output.clear_output()
red.value = None
green.value = None
blue.value = None
vmin.value = ""
vmax.value = ""
nodata.value = ""
palette.value = None
if change["new"] == "GeoTIFF":
source_widget.children = [file_chooser]
file_chooser.filter_pattern = ["*.tif", "*.tiff"]
palette.options = local_tile_palettes
palette.value = None
add_params.placeholder = add_params_text1
raster_options.children = [
widgets.HBox([red, green, blue]),
widgets.HBox([vmin, vmax, nodata]),
widgets.HBox([palette, checkbox]),
params_widget,
]
elif change["new"] == "COG":
http_url.value = "https://opendata.digitalglobe.com/events/california-fire-2020/post-event/2020-08-14/pine-gulch-fire20/10300100AAC8DD00.tif"
source_widget.children = [http_url]
palette.options = cog_stac_palettes
palette.value = None
add_params.placeholder = add_params_text2
elif change["new"] == "STAC":
http_url.value = "https://canada-spot-ortho.s3.amazonaws.com/canada_spot_orthoimages/canada_spot5_orthoimages/S5_2007/S5_11055_6057_20070622/S5_11055_6057_20070622.json"
source_widget.children = [http_url]
palette.options = cog_stac_palettes
palette.value = None
red.value = "B3"
green.value = "B2"
blue.value = "B1"
add_params.placeholder = add_params_text2
elif change["new"] == "Microsoft":
source_widget.children = [collection, items, assets]
palette.options = cog_stac_palettes
palette.value = None
add_params.placeholder = add_params_text2
collection.options = get_collection_list()
collection.value = "landsat-8-c2-l2 - Landsat 8 Collection 2 Level-2"
if not hasattr(m, "pc_inventory"):
setattr(m, "pc_inventory", get_pc_inventory())
items.value = "LC08_L2SP_047027_20201204_02_T1"
assets.value = "SR_B7,SR_B5,SR_B4"
def ok_cancel_clicked(change):
if change["new"] == "Apply":
m.default_style = {"cursor": "wait"}
# file_path = http_url.value
with tool_output:
tool_output.clear_output()
print("Loading data...")
if file_type.value == "GeoTIFF" and file_chooser.selected:
band = None
vis_min = None
vis_max = None
vis_nodata = None
vis_palette = None
try:
if len(red.options) > 2:
band = [red.value, green.value, blue.value]
if len(set(band)) > 1:
palette.value = None
else:
band = [red.value]
else:
band = [red.value]
if len(vmin.value) > 0:
vis_min = float(vmin.value)
if len(vmax.value) > 0:
vis_max = float(vmax.value)
if len(nodata.value) > 0:
vis_nodata = float(nodata.value)
if (
checkbox.value
and add_params.value.strip().startswith("{")
and add_params.value.strip().endswith("}")
):
vis_params = eval(add_params.value)
if "palette" in vis_params:
vis_palette = vis_params["palette"]
else:
vis_palette = get_palette(palette.value, hashtag=True)
elif palette.value is not None:
vis_palette = get_palette(palette.value, hashtag=True)
except:
pass
m.add_local_tile(
file_chooser.selected,
layer_name=layer_name.value,
band=band,
palette=vis_palette,
vmin=vis_min,
vmax=vis_max,
nodata=vis_nodata,
)
tool_output.clear_output()
elif file_type.value in ["COG", "STAC"] and http_url.value:
try:
tool_output.clear_output()
print("Loading data...")
if (
checkbox.value
and add_params.value.strip().startswith("{")
and add_params.value.strip().endswith("}")
):
vis_params = eval(add_params.value)
else:
vis_params = {}
if (
palette.value
and len(set([red.value, green.value, blue.value])) == 1
):
vis_params["colormap_name"] = palette.value
elif (
palette.value
and len(set([red.value, green.value, blue.value])) > 1
):
palette.value = None
print("Palette can only be set for single band images.")
if vmin.value and vmax.value:
vis_params["rescale"] = f"{vmin.value},{vmax.value}"
if nodata.value:
vis_params["nodata"] = nodata.value
if file_type.value == "COG":
m.add_cog_layer(
http_url.value,
name=layer_name.value,
bands=[red.value, green.value, blue.value],
**vis_params,
)
elif file_type.value == "STAC":
m.add_stac_layer(
http_url.value,
bands=[red.value, green.value, blue.value],
name=layer_name.value,
**vis_params,
)
tool_output.clear_output()
except Exception as e:
print(e)
print("Error loading data.")
return
elif file_type.value == "Microsoft":
try:
tool_output.clear_output()
print("Loading data...")
if (
checkbox.value
and add_params.value.strip().startswith("{")
and add_params.value.strip().endswith("}")
):
vis_params = eval(add_params.value)
else:
vis_params = {}
if (
palette.value
and len(set([red.value, green.value, blue.value])) == 1
) or (palette.value and "expression" in vis_params):
vis_params["colormap_name"] = palette.value
elif (
palette.value
and len(set([red.value, green.value, blue.value])) > 1
and "expression" not in vis_params
):
palette.value = None
print("Palette can only be set for single band images.")
if vmin.value and vmax.value:
vis_params["rescale"] = f"{vmin.value},{vmax.value}"
if nodata.value:
vis_params["nodata"] = nodata.value
col = collection.value.split(" - ")[0]
m.add_stac_layer(
collection=col,
items=items.value,
assets=assets.value,
name=layer_name.value,
**vis_params,
)
tool_output.clear_output()
except Exception as e:
print(e)
print("Error loading data.")
return
else:
tool_output.clear_output()
print("Please select a file and enter an http URL.")
m.toolbar_reset()
m.default_style = {"cursor": "default"}
elif change["new"] == "Reset":
file_chooser.reset()
tool_output.clear_output()
http_url.value = ""
add_params.value = ""
checkbox.value = False
palette.value = None
red.value = None
green.value = None
blue.value = None
vmin.value = ""
vmax.value = ""
nodata.value = ""
collection.value = None
items.value = ""
assets.value = ""
m.toolbar_reset()
elif change["new"] == "Close":
if m.tool_output_ctrl is not None and m.tool_output_ctrl in m.controls:
m.remove_control(m.tool_output_ctrl)
m.tool_output_ctrl = None
m.toolbar_reset()
ok_cancel.value = None
file_type.observe(file_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
m.add_control(tool_output_ctrl)
m.tool_output_ctrl = tool_output_ctrl
def change_basemap(m):
"""Widget for changing basemaps.
Args:
m (object): leafmap.Map.
"""
from .basemaps import get_xyz_dict
from .leafmap import leafmap_basemaps
xyz_dict = get_xyz_dict()
layers = list(m.layers)
if len(layers) == 1:
layers = [layers[0]] + [leafmap_basemaps["OpenStreetMap"]]
elif len(layers) > 1 and (layers[1].name != "OpenStreetMap"):
layers = [layers[0]] + [leafmap_basemaps["OpenStreetMap"]] + layers[1:]
m.layers = layers
value = "OpenStreetMap"
dropdown = widgets.Dropdown(
options=list(leafmap_basemaps.keys()),
value=value,
layout=widgets.Layout(width="200px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the basemap widget",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
basemap_widget = widgets.HBox([dropdown, close_btn])
def on_click(change):
basemap_name = change["new"]
old_basemap = m.layers[1]
m.substitute_layer(old_basemap, leafmap_basemaps[basemap_name])
if basemap_name in xyz_dict:
if "bounds" in xyz_dict[basemap_name]:
bounds = xyz_dict[basemap_name]["bounds"]
bounds = [bounds[0][1], bounds[0][0], bounds[1][1], bounds[1][0]]
m.zoom_to_bounds(bounds)
dropdown.observe(on_click, "value")
def close_click(change):
m.toolbar_reset()
if m.basemap_ctrl is not None and m.basemap_ctrl in m.controls:
m.remove_control(m.basemap_ctrl)
basemap_widget.close()
close_btn.on_click(close_click)
basemap_control = ipyleaflet.WidgetControl(
widget=basemap_widget, position="topright"
)
m.add_control(basemap_control)
m.basemap_ctrl = basemap_control
def save_map(m):
"""Saves the map as HTML, JPG, or PNG.
Args:
m (leafmap.Map): The leafmap Map object.
"""
import time
tool_output = widgets.Output()
m.tool_output = tool_output
tool_output.clear_output(wait=True)
save_map_widget = widgets.VBox()
save_type = widgets.ToggleButtons(
options=["HTML", "PNG", "JPG"],
tooltips=[
"Save the map as an HTML file",
"Take a screenshot and save as a PNG file",
"Take a screenshot and save as a JPG file",
],
)
file_chooser = FileChooser(
os.getcwd(), sandbox_path=m.sandbox_path, layout=widgets.Layout(width="454px")
)
file_chooser.default_filename = "my_map.html"
file_chooser.use_dir_icons = True
ok_cancel = widgets.ToggleButtons(
value=None,
options=["OK", "Cancel", "Close"],
tooltips=["OK", "Cancel", "Close"],
button_style="primary",
)
def save_type_changed(change):
ok_cancel.value = None
# file_chooser.reset()
file_chooser.default_path = os.getcwd()
if change["new"] == "HTML":
file_chooser.default_filename = "my_map.html"
elif change["new"] == "PNG":
file_chooser.default_filename = "my_map.png"
elif change["new"] == "JPG":
file_chooser.default_filename = "my_map.jpg"
save_map_widget.children = [save_type, file_chooser]
def chooser_callback(chooser):
save_map_widget.children = [save_type, file_chooser, ok_cancel]
def ok_cancel_clicked(change):
if change["new"] == "OK":
file_path = file_chooser.selected
ext = os.path.splitext(file_path)[1]
if save_type.value == "HTML" and ext.upper() == ".HTML":
tool_output.clear_output()
m.to_html(file_path)
elif save_type.value == "PNG" and ext.upper() == ".PNG":
tool_output.clear_output()
m.toolbar_button.value = False
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
time.sleep(2)
screen_capture(outfile=file_path)
elif save_type.value == "JPG" and ext.upper() == ".JPG":
tool_output.clear_output()
m.toolbar_button.value = False
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
time.sleep(2)
screen_capture(outfile=file_path)
else:
label = widgets.Label(
value="The selected file extension does not match the selected exporting type."
)
save_map_widget.children = [save_type, file_chooser, label]
elif change["new"] == "Cancel":
tool_output.clear_output()
file_chooser.reset()
elif change["new"] == "Close":
if m.save_map_control is not None:
m.remove_control(m.save_map_control)
m.save_map_control = None
ok_cancel.value = None
m.toolbar_reset()
save_type.observe(save_type_changed, names="value")
ok_cancel.observe(ok_cancel_clicked, names="value")
file_chooser.register_callback(chooser_callback)
save_map_widget.children = [save_type, file_chooser]
save_map_control = ipyleaflet.WidgetControl(
widget=save_map_widget, position="topright"
)
m.add_control(save_map_control)
m.save_map_control = save_map_control
def split_basemaps(
m, layers_dict=None, left_name=None, right_name=None, width="120px", **kwargs
):
"""Create a split-panel map for visualizing two maps.
Args:
m (ipyleaflet.Map): An ipyleaflet map object.
layers_dict (dict, optional): A dictionary of TileLayers. Defaults to None.
left_name (str, optional): The default value of the left dropdown list. Defaults to None.
right_name (str, optional): The default value of the right dropdown list. Defaults to None.
width (str, optional): The width of the dropdown list. Defaults to "120px".
"""
from .leafmap import leafmap_basemaps
controls = m.controls
layers = m.layers
# m.layers = [m.layers[0]]
m.clear_controls()
add_zoom = True
add_fullscreen = True
if layers_dict is None:
layers_dict = {}
keys = dict(leafmap_basemaps).keys()
for key in keys:
if isinstance(leafmap_basemaps[key], ipyleaflet.WMSLayer):
pass
else:
layers_dict[key] = leafmap_basemaps[key]
keys = list(layers_dict.keys())
if left_name is None:
left_name = keys[0]
if right_name is None:
right_name = keys[-1]
left_layer = layers_dict[left_name]
right_layer = layers_dict[right_name]
control = ipyleaflet.SplitMapControl(left_layer=left_layer, right_layer=right_layer)
m.add_control(control)
left_dropdown = widgets.Dropdown(
options=keys, value=left_name, layout=widgets.Layout(width=width)
)
left_control = ipyleaflet.WidgetControl(widget=left_dropdown, position="topleft")
m.add_control(left_control)
right_dropdown = widgets.Dropdown(
options=keys, value=right_name, layout=widgets.Layout(width=width)
)
right_control = ipyleaflet.WidgetControl(widget=right_dropdown, position="topright")
m.add_control(right_control)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
# button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
def close_btn_click(change):
if change["new"]:
m.controls = controls
m.clear_layers()
m.layers = layers
close_button.observe(close_btn_click, "value")
close_control = ipyleaflet.WidgetControl(
widget=close_button, position="bottomright"
)
m.add_control(close_control)
if add_zoom:
m.add_control(ipyleaflet.ZoomControl())
if add_fullscreen:
m.add_control(ipyleaflet.FullScreenControl())
m.add_control(ipyleaflet.ScaleControl(position="bottomleft"))
split_control = None
for ctrl in m.controls:
if isinstance(ctrl, ipyleaflet.SplitMapControl):
split_control = ctrl
break
def left_change(change):
split_control.left_layer.url = layers_dict[left_dropdown.value].url
left_dropdown.observe(left_change, "value")
def right_change(change):
split_control.right_layer.url = layers_dict[right_dropdown.value].url
right_dropdown.observe(right_change, "value")
def time_slider(
m,
layers_dict={},
labels=None,
time_interval=1,
position="bottomright",
slider_length="150px",
):
"""Adds a time slider to the map.
Args:
layers_dict (dict, optional): The dictionary containing a set of XYZ tile layers.
labels (list, optional): The list of labels to be used for the time series. Defaults to None.
time_interval (int, optional): Time interval in seconds. Defaults to 1.
position (str, optional): Position to place the time slider, can be any of ['topleft', 'topright', 'bottomleft', 'bottomright']. Defaults to "bottomright".
slider_length (str, optional): Length of the time slider. Defaults to "150px".
"""
import time
import threading
if not isinstance(layers_dict, dict):
raise TypeError("The layers_dict must be a dictionary.")
if len(layers_dict) == 0:
layers_dict = planet_monthly_tiles()
if labels is None:
labels = list(layers_dict.keys())
if len(labels) != len(layers_dict):
raise ValueError("The length of labels is not equal to that of layers_dict.")
slider = widgets.IntSlider(
min=1,
max=len(labels),
readout=False,
continuous_update=False,
layout=widgets.Layout(width=slider_length),
)
label = widgets.Label(
value=labels[0], layout=widgets.Layout(padding="0px 5px 0px 5px")
)
play_btn = widgets.Button(
icon="play",
tooltip="Play the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
pause_btn = widgets.Button(
icon="pause",
tooltip="Pause the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the time slider",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
play_chk = widgets.Checkbox(value=False)
slider_widget = widgets.HBox([label, slider, play_btn, pause_btn, close_btn])
def play_click(b):
play_chk.value = True
def work(slider):
while play_chk.value:
if slider.value < len(labels):
slider.value += 1
else:
slider.value = 1
time.sleep(time_interval)
thread = threading.Thread(target=work, args=(slider,))
thread.start()
def pause_click(b):
play_chk.value = False
play_btn.on_click(play_click)
pause_btn.on_click(pause_click)
keys = list(layers_dict.keys())
layer = layers_dict[keys[0]]
m.add_layer(layer)
def slider_changed(change):
m.default_style = {"cursor": "wait"}
index = slider.value - 1
label.value = labels[index]
layer.url = layers_dict[label.value].url
layer.name = layers_dict[label.value].name
m.default_style = {"cursor": "default"}
slider.observe(slider_changed, "value")
def close_click(b):
play_chk.value = False
m.toolbar_reset()
if m.slider_ctrl is not None and m.slider_ctrl in m.controls:
m.remove_control(m.slider_ctrl)
slider_widget.close()
close_btn.on_click(close_click)
slider_ctrl = ipyleaflet.WidgetControl(widget=slider_widget, position=position)
m.add_control(slider_ctrl)
m.slider_ctrl = slider_ctrl
def census_widget(m=None):
"""Widget for adding US Census data.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
from owslib.wms import WebMapService
census_dict = get_census_dict()
m.add_census_data("Census 2020", "States")
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="address-book",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
wms = widgets.Dropdown(
options=census_dict.keys(),
value="Census 2020",
description="WMS:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
layer = widgets.Dropdown(
options=census_dict["Census 2020"]["layers"],
value="States",
description="Layer:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
checkbox = widgets.Checkbox(
description="Replace existing census data layer",
value=True,
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
# output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
wms,
layer,
checkbox,
# output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def wms_change(change):
layer.options = census_dict[change["new"]]["layers"]
layer.value = layer.options[0]
wms.observe(wms_change, "value")
def layer_change(change):
if change["new"] != "":
if checkbox.value:
m.layers = m.layers[:-1]
m.add_census_data(wms.value, layer.value)
# with output:
# w = WebMapService(census_dict[wms.value]["url"])
# output.clear_output()
# print(w[layer.value].abstract)
layer.observe(layer_change, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def search_basemaps(m=None):
"""The widget for search XYZ tile services.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import xyzservices.providers as xyz
from xyzservices import TileProvider
layers = m.layers
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="search",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Search Quick Map Services (QMS)",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
providers = widgets.Dropdown(
options=[],
value=None,
description="XYZ Tile:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
keyword = widgets.Text(
value="",
description="Search keyword:",
placeholder="OpenStreetMap",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
def search_callback(change):
providers.options = []
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
keyword.on_submit(search_callback)
buttons = widgets.ToggleButtons(
value=None,
options=["Search", "Reset", "Close"],
tooltips=["Search", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
def providers_change(change):
if change["new"] != "":
provider = change["new"]
if provider is not None:
if provider.startswith("qms"):
with output:
output.clear_output()
print("Adding data. Please wait...")
name = provider[4:]
qms_provider = TileProvider.from_qms(name)
url = qms_provider.build_url()
attribution = qms_provider.attribution
m.layers = layers
m.add_tile_layer(url, name, attribution)
output.clear_output()
elif provider.startswith("xyz"):
name = provider[4:]
xyz_provider = xyz.flatten()[name]
url = xyz_provider.build_url()
attribution = xyz_provider.attribution
m.layers = layers
if xyz_provider.requires_token():
with output:
output.clear_output()
print(f"{provider} requires an API Key.")
m.add_tile_layer(url, name, attribution)
providers.observe(providers_change, "value")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
keyword,
providers,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Search":
providers.options = []
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
with output:
output.clear_output()
# print("Running ...")
elif change["new"] == "Reset":
keyword.value = ""
providers.options = []
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def download_osm(m=None):
"""Widget for downloading OSM data.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gear",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Checkbox",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
dropdown = widgets.Dropdown(
options=["Option 1", "Option 2", "Option 3"],
value=None,
description="Dropdown:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
int_slider = widgets.IntSlider(
min=1,
max=100,
description="Int Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
int_slider_label = widgets.Label()
widgets.jslink((int_slider, "value"), (int_slider_label, "value"))
float_slider = widgets.FloatSlider(
min=1,
max=100,
description="Float Slider: ",
readout=False,
continuous_update=True,
layout=widgets.Layout(width="220px", padding=padding),
style=style,
)
float_slider_label = widgets.Label()
widgets.jslink((float_slider, "value"), (float_slider_label, "value"))
color = widgets.ColorPicker(
concise=False,
description="Color:",
value="white",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
text = widgets.Text(
value="",
description="Textbox:",
placeholder="Placeholder",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
textarea = widgets.Textarea(
placeholder="Placeholder",
layout=widgets.Layout(width=widget_width),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Apply", "Reset", "Close"],
tooltips=["Apply", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
buttons.style.button_padding = "0px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
widgets.HBox([int_slider, int_slider_label]),
widgets.HBox([float_slider, float_slider_label]),
dropdown,
text,
color,
textarea,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Apply":
with output:
output.clear_output()
print("Running ...")
elif change["new"] == "Reset":
textarea.value = ""
output.clear_output()
elif change["new"] == "Close":
if m is not None:
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
else:
return toolbar_widget
def inspector_gui(m=None):
"""Generates a tool GUI template using ipywidgets.
Args:
m (leafmap.Map, optional): The leaflet Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import pandas as pd
widget_width = "250px"
padding = "0px 5px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
if m is not None:
marker_cluster = ipyleaflet.MarkerCluster(name="Inspector Markers")
setattr(m, "pixel_values", [])
setattr(m, "marker_cluster", marker_cluster)
if not hasattr(m, "interact_mode"):
setattr(m, "interact_mode", False)
if not hasattr(m, "inspector_output"):
inspector_output = widgets.Output(
layout=widgets.Layout(width=widget_width, padding="0px 5px 5px 5px")
)
setattr(m, "inspector_output", inspector_output)
output = m.inspector_output
output.clear_output()
if not hasattr(m, "inspector_add_marker"):
inspector_add_marker = widgets.Checkbox(
description="Add Marker at clicked location",
value=True,
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
setattr(m, "inspector_add_marker", inspector_add_marker)
add_marker = m.inspector_add_marker
if not hasattr(m, "inspector_bands_chk"):
inspector_bands_chk = widgets.Checkbox(
description="Get pixel value for visible bands only",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
setattr(m, "inspector_bands_chk", inspector_bands_chk)
bands_chk = m.inspector_bands_chk
if not hasattr(m, "inspector_class_label"):
inspector_label = widgets.Text(
value="",
description="Class label:",
placeholder="Add a label to the marker",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
setattr(m, "inspector_class_label", inspector_label)
label = m.inspector_class_label
options = []
if hasattr(m, "cog_layer_dict"):
options = list(m.cog_layer_dict.keys())
options.sort()
if len(options) == 0:
default_option = None
else:
default_option = options[0]
if not hasattr(m, "inspector_dropdown"):
inspector_dropdown = widgets.Dropdown(
options=options,
value=default_option,
description="Select a layer:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
setattr(m, "inspector_dropdown", inspector_dropdown)
dropdown = m.inspector_dropdown
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="info",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
buttons = widgets.ToggleButtons(
value=None,
options=["Download", "Reset", "Close"],
tooltips=["Download", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
if len(options) == 0:
with output:
print("No COG/STAC layers available")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
add_marker,
label,
dropdown,
bands_chk,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def chk_change(change):
if hasattr(m, "pixel_values"):
m.pixel_values = []
if hasattr(m, "marker_cluster"):
m.marker_cluster.markers = []
output.clear_output()
bands_chk.observe(chk_change, "value")
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
if m is not None:
if hasattr(m, "inspector_mode"):
delattr(m, "inspector_mode")
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
m.default_style = {"cursor": "default"}
m.marker_cluster.markers = []
m.pixel_values = []
marker_cluster_layer = m.find_layer("Inspector Markers")
if marker_cluster_layer is not None:
m.remove_layer(marker_cluster_layer)
if hasattr(m, "pixel_values"):
delattr(m, "pixel_values")
if hasattr(m, "marker_cluster"):
delattr(m, "marker_cluster")
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Download":
with output:
output.clear_output()
if len(m.pixel_values) == 0:
print(
"No pixel values available. Click on the map to start collection data."
)
else:
print("Downloading pixel values...")
df = pd.DataFrame(m.pixel_values)
temp_csv = temp_file_path("csv")
df.to_csv(temp_csv, index=False)
link = create_download_link(temp_csv)
with output:
output.clear_output()
display(link)
elif change["new"] == "Reset":
label.value = ""
output.clear_output()
if hasattr(m, "pixel_values"):
m.pixel_values = []
if hasattr(m, "marker_cluster"):
m.marker_cluster.markers = []
elif change["new"] == "Close":
if m is not None:
if hasattr(m, "inspector_mode"):
delattr(m, "inspector_mode")
m.toolbar_reset()
if m.tool_control is not None and m.tool_control in m.controls:
m.remove_control(m.tool_control)
m.tool_control = None
m.default_style = {"cursor": "default"}
m.marker_cluster.markers = []
marker_cluster_layer = m.find_layer("Inspector Markers")
if marker_cluster_layer is not None:
m.remove_layer(marker_cluster_layer)
m.pixel_values = []
if hasattr(m, "pixel_values"):
delattr(m, "pixel_values")
if hasattr(m, "marker_cluster"):
delattr(m, "marker_cluster")
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
def handle_interaction(**kwargs):
latlon = kwargs.get("coordinates")
lat = round(latlon[0], 4)
lon = round(latlon[1], 4)
if (
kwargs.get("type") == "click"
and hasattr(m, "inspector_mode")
and m.inspector_mode
):
m.default_style = {"cursor": "wait"}
with output:
output.clear_output()
print("Getting pixel value ...")
layer_dict = m.cog_layer_dict[dropdown.value]
if layer_dict["type"] == "STAC":
if bands_chk.value:
assets = layer_dict["assets"]
else:
assets = None
result = stac_pixel_value(
lon,
lat,
layer_dict["url"],
layer_dict["collection"],
layer_dict["items"],
assets,
layer_dict["titiler_endpoint"],
verbose=False,
)
if result is not None:
with output:
output.clear_output()
print(f"lat/lon: {lat:.4f}, {lon:.4f}\n")
for key in result:
print(f"{key}: {result[key]}")
result["latitude"] = lat
result["longitude"] = lon
result["label"] = label.value
m.pixel_values.append(result)
if add_marker.value:
markers = list(m.marker_cluster.markers)
markers.append(ipyleaflet.Marker(location=latlon))
m.marker_cluster.markers = markers
else:
with output:
output.clear_output()
print("No pixel value available")
bounds = m.cog_layer_dict[m.inspector_dropdown.value]["bounds"]
m.zoom_to_bounds(bounds)
elif layer_dict["type"] == "COG":
result = cog_pixel_value(lon, lat, layer_dict["url"], verbose=False)
if result is not None:
with output:
output.clear_output()
print(f"lat/lon: {lat:.4f}, {lon:.4f}\n")
for key in result:
print(f"{key}: {result[key]}")
result["latitude"] = lat
result["longitude"] = lon
result["label"] = label.value
m.pixel_values.append(result)
if add_marker.value:
markers = list(m.marker_cluster.markers)
markers.append(ipyleaflet.Marker(location=latlon))
m.marker_cluster.markers = markers
else:
with output:
output.clear_output()
print("No pixel value available")
bounds = m.cog_layer_dict[m.inspector_dropdown.value]["bounds"]
m.zoom_to_bounds(bounds)
elif layer_dict["type"] == "LOCAL":
result = local_tile_pixel_value(
lon, lat, layer_dict["tile_client"], verbose=False
)
if result is not None:
if m.inspector_bands_chk.value:
band = m.cog_layer_dict[m.inspector_dropdown.value]["band"]
band_names = m.cog_layer_dict[m.inspector_dropdown.value][
"band_names"
]
if band is not None:
sel_bands = [band_names[b - 1] for b in band]
result = {k: v for k, v in result.items() if k in sel_bands}
with output:
output.clear_output()
print(f"lat/lon: {lat:.4f}, {lon:.4f}\n")
for key in result:
print(f"{key}: {result[key]}")
result["latitude"] = lat
result["longitude"] = lon
result["label"] = label.value
m.pixel_values.append(result)
if add_marker.value:
markers = list(m.marker_cluster.markers)
markers.append(ipyleaflet.Marker(location=latlon))
m.marker_cluster.markers = markers
else:
with output:
output.clear_output()
print("No pixel value available")
bounds = m.cog_layer_dict[m.inspector_dropdown.value]["bounds"]
m.zoom_to_bounds(bounds)
m.default_style = {"cursor": "crosshair"}
if m is not None:
if not hasattr(m, "marker_cluster"):
setattr(m, "marker_cluster", marker_cluster)
m.add_layer(marker_cluster)
if not m.interact_mode:
m.on_interaction(handle_interaction)
m.interact_mode = True
if m is not None:
toolbar_control = ipyleaflet.WidgetControl(
widget=toolbar_widget, position="topright"
)
if toolbar_control not in m.controls:
m.add_control(toolbar_control)
m.tool_control = toolbar_control
if not hasattr(m, "inspector_mode"):
if hasattr(m, "cog_layer_dict"):
setattr(m, "inspector_mode", True)
else:
setattr(m, "inspector_mode", False)
else:
return toolbar_widget
def plotly_toolbar(
canvas,
):
"""Creates the main toolbar and adds it to the map.
Args:
m (plotlymap.Map): The plotly Map object.
"""
m = canvas.map
map_min_width = canvas.map_min_width
map_max_width = canvas.map_max_width
map_refresh = canvas.map_refresh
map_widget = canvas.map_widget
if not map_refresh:
width = int(map_min_width.replace("%", ""))
if width > 90:
map_min_width = "90%"
tools = {
"map": {
"name": "basemap",
"tooltip": "Change basemap",
},
"search": {
"name": "search_xyz",
"tooltip": "Search XYZ tile services",
},
"gears": {
"name": "whitebox",
"tooltip": "WhiteboxTools for local geoprocessing",
},
"folder-open": {
"name": "vector",
"tooltip": "Open local vector/raster data",
},
"picture-o": {
"name": "raster",
"tooltip": "Open COG/STAC dataset",
},
"question": {
"name": "help",
"tooltip": "Get help",
},
}
icons = list(tools.keys())
tooltips = [item["tooltip"] for item in list(tools.values())]
icon_width = "32px"
icon_height = "32px"
n_cols = 3
n_rows = math.ceil(len(icons) / n_cols)
toolbar_grid = widgets.GridBox(
children=[
widgets.ToggleButton(
layout=widgets.Layout(
width="auto", height="auto", padding="0px 0px 0px 4px"
),
button_style="primary",
icon=icons[i],
tooltip=tooltips[i],
)
for i in range(len(icons))
],
layout=widgets.Layout(
width="115px",
grid_template_columns=(icon_width + " ") * n_cols,
grid_template_rows=(icon_height + " ") * n_rows,
grid_gap="1px 1px",
padding="5px",
),
)
canvas.toolbar = toolbar_grid
def tool_callback(change):
if change["new"]:
current_tool = change["owner"]
for tool in toolbar_grid.children:
if tool is not current_tool:
tool.value = False
tool = change["owner"]
tool_name = tools[tool.icon]["name"]
canvas.container_widget.children = []
if tool_name == "basemap":
plotly_basemap_gui(canvas)
elif tool_name == "search_xyz":
plotly_search_basemaps(canvas)
elif tool_name == "whitebox":
plotly_whitebox_gui(canvas)
elif tool_name == "vector":
plotly_tool_template(canvas)
elif tool_name == "raster":
plotly_tool_template(canvas)
elif tool_name == "help":
import webbrowser
webbrowser.open_new_tab("https://leafmap.org")
tool.value = False
else:
canvas.container_widget.children = []
map_widget.layout.width = map_max_width
for tool in toolbar_grid.children:
tool.observe(tool_callback, "value")
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="wrench",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
canvas.toolbar_button = toolbar_button
layers_button = widgets.ToggleButton(
value=False,
tooltip="Layers",
icon="server",
layout=widgets.Layout(height="28px", width="72px"),
)
canvas.layers_button = layers_button
toolbar_widget = widgets.VBox(layout=widgets.Layout(overflow="hidden"))
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox(layout=widgets.Layout(overflow="hidden"))
toolbar_header.children = [layers_button, toolbar_button]
toolbar_footer = widgets.VBox(layout=widgets.Layout(overflow="hidden"))
toolbar_footer.children = [toolbar_grid]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
# map_widget.layout.width = "85%"
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
layers_button.value = False
# map_widget.layout.width = map_max_width
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
map_widget.layout.width = map_min_width
if map_refresh:
with map_widget:
map_widget.clear_output()
display(m)
layers_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
canvas.toolbar_reset()
map_widget.layout.width = map_max_width
if not layers_button.value:
toolbar_widget.children = [toolbar_button]
if map_refresh:
with map_widget:
map_widget.clear_output()
display(m)
toolbar_button.observe(toolbar_btn_click, "value")
def layers_btn_click(change):
if change["new"]:
layer_names = list(m.get_layers().keys())
layers_hbox = []
all_layers_chk = widgets.Checkbox(
value=True,
description="All layers on/off",
indent=False,
layout=widgets.Layout(height="18px", padding="0px 8px 25px 8px"),
)
all_layers_chk.layout.width = "30ex"
layers_hbox.append(all_layers_chk)
layer_chk_dict = {}
for name in layer_names:
if name in m.get_tile_layers():
index = m.find_layer_index(name)
layer = m.layout.mapbox.layers[index]
elif name in m.get_data_layers():
index = m.find_layer_index(name)
layer = m.data[index]
layer_chk = widgets.Checkbox(
value=layer.visible,
description=name,
indent=False,
layout=widgets.Layout(height="18px"),
)
layer_chk.layout.width = "25ex"
layer_chk_dict[name] = layer_chk
if hasattr(layer, "opacity"):
opacity = layer.opacity
elif hasattr(layer, "marker"):
opacity = layer.marker.opacity
else:
opacity = 1.0
layer_opacity = widgets.FloatSlider(
value=opacity,
description_tooltip=name,
min=0,
max=1,
step=0.01,
readout=False,
layout=widgets.Layout(width="80px"),
)
layer_settings = widgets.ToggleButton(
icon="gear",
tooltip=name,
layout=widgets.Layout(
width="25px", height="25px", padding="0px 0px 0px 5px"
),
)
def layer_chk_change(change):
if change["new"]:
m.set_layer_visibility(change["owner"].description, True)
else:
m.set_layer_visibility(change["owner"].description, False)
layer_chk.observe(layer_chk_change, "value")
def layer_opacity_change(change):
if change["new"]:
m.set_layer_opacity(
change["owner"].description_tooltip, change["new"]
)
layer_opacity.observe(layer_opacity_change, "value")
hbox = widgets.HBox(
[layer_chk, layer_settings, layer_opacity],
layout=widgets.Layout(padding="0px 8px 0px 8px"),
)
layers_hbox.append(hbox)
def all_layers_chk_changed(change):
if change["new"]:
for name in layer_names:
m.set_layer_visibility(name, True)
layer_chk_dict[name].value = True
else:
for name in layer_names:
m.set_layer_visibility(name, False)
layer_chk_dict[name].value = False
all_layers_chk.observe(all_layers_chk_changed, "value")
toolbar_footer.children = layers_hbox
toolbar_button.value = False
else:
toolbar_footer.children = [toolbar_grid]
layers_button.observe(layers_btn_click, "value")
return toolbar_widget
def plotly_tool_template(canvas):
container_widget = canvas.container_widget
map_widget = canvas.map_widget
map_width = "70%"
map_widget.layout.width = map_width
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gears",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
with output:
print("To be implemented")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
map_widget.layout.width = map_width
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
map_widget.layout.width = canvas.map_max_width
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
map_widget.layout.width = map_width
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
map_widget.layout.width = canvas.map_max_width
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
canvas.toolbar_reset()
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
toolbar_button.value = True
container_widget.children = [toolbar_widget]
def plotly_basemap_gui(canvas, map_min_width="78%", map_max_width="98%"):
"""Widget for changing basemaps.
Args:
m (object): leafmap.Map.
"""
from .plotlymap import plotly_basemaps
m = canvas.map
layer_count = len(m.layout.mapbox.layers)
container_widget = canvas.container_widget
map_widget = canvas.map_widget
map_widget.layout.width = map_min_width
value = "Stamen.Terrain"
m.add_basemap(value)
dropdown = widgets.Dropdown(
options=list(plotly_basemaps.keys()),
value=value,
layout=widgets.Layout(width="200px"),
)
close_btn = widgets.Button(
icon="times",
tooltip="Close the basemap widget",
button_style="primary",
layout=widgets.Layout(width="32px"),
)
basemap_widget = widgets.HBox([dropdown, close_btn])
container_widget.children = [basemap_widget]
def on_click(change):
basemap_name = change["new"]
m.layout.mapbox.layers = m.layout.mapbox.layers[:layer_count]
m.add_basemap(basemap_name)
dropdown.observe(on_click, "value")
def close_click(change):
container_widget.children = []
basemap_widget.close()
map_widget.layout.width = map_max_width
canvas.toolbar_reset()
canvas.toolbar_button.value = False
close_btn.on_click(close_click)
def plotly_search_basemaps(canvas):
"""The widget for search XYZ tile services.
Args:
m (plotlymap.Map, optional): The Plotly Map object. Defaults to None.
Returns:
ipywidgets: The tool GUI widget.
"""
import xyzservices.providers as xyz
from xyzservices import TileProvider
m = canvas.map
container_widget = canvas.container_widget
map_widget = canvas.map_widget
map_widget.layout.width = "75%"
# map_widget.layout.width = map_min_width
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="search",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
checkbox = widgets.Checkbox(
description="Search Quick Map Services (QMS)",
indent=False,
layout=widgets.Layout(padding=padding, width=widget_width),
)
providers = widgets.Dropdown(
options=[],
value=None,
description="XYZ Tile:",
layout=widgets.Layout(width=widget_width, padding=padding),
style=style,
)
keyword = widgets.Text(
value="",
description="Search keyword:",
placeholder="OpenStreetMap",
style=style,
layout=widgets.Layout(width=widget_width, padding=padding),
)
def search_callback(change):
providers.options = []
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
keyword.on_submit(search_callback)
buttons = widgets.ToggleButtons(
value=None,
options=["Search", "Reset", "Close"],
tooltips=["Search", "Reset", "Close"],
button_style="primary",
)
buttons.style.button_width = "80px"
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
def providers_change(change):
if change["new"] != "":
provider = change["new"]
if provider is not None:
if provider.startswith("qms"):
with output:
output.clear_output()
print("Adding data. Please wait...")
name = provider[4:]
qms_provider = TileProvider.from_qms(name)
url = qms_provider.build_url()
attribution = qms_provider.attribution
m.add_tile_layer(url, name, attribution)
output.clear_output()
elif provider.startswith("xyz"):
name = provider[4:]
xyz_provider = xyz.flatten()[name]
url = xyz_provider.build_url()
attribution = xyz_provider.attribution
if xyz_provider.requires_token():
with output:
output.clear_output()
print(f"{provider} requires an API Key.")
m.add_tile_layer(url, name, attribution)
providers.observe(providers_change, "value")
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
checkbox,
keyword,
providers,
buttons,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
canvas.toolbar_reset()
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
def button_clicked(change):
if change["new"] == "Search":
providers.options = []
output.clear_output()
if keyword.value != "":
tiles = search_xyz_services(keyword=keyword.value)
if checkbox.value:
tiles = tiles + search_qms(keyword=keyword.value)
providers.options = tiles
else:
with output:
print("Please enter a search keyword.")
elif change["new"] == "Reset":
keyword.value = ""
providers.options = []
output.clear_output()
elif change["new"] == "Close":
canvas.toolbar_reset()
toolbar_widget.close()
buttons.value = None
buttons.observe(button_clicked, "value")
toolbar_button.value = True
container_widget.children = [toolbar_widget]
def plotly_whitebox_gui(canvas):
import whiteboxgui.whiteboxgui as wbt
container_widget = canvas.container_widget
map_widget = canvas.map_widget
map_width = "25%"
map_widget.layout.width = map_width
widget_width = "250px"
padding = "0px 0px 0px 5px" # upper, right, bottom, left
style = {"description_width": "initial"}
toolbar_button = widgets.ToggleButton(
value=False,
tooltip="Toolbar",
icon="gears",
layout=widgets.Layout(width="28px", height="28px", padding="0px 0px 0px 4px"),
)
close_button = widgets.ToggleButton(
value=False,
tooltip="Close the tool",
icon="times",
button_style="primary",
layout=widgets.Layout(height="28px", width="28px", padding="0px 0px 0px 4px"),
)
output = widgets.Output(layout=widgets.Layout(width=widget_width, padding=padding))
tools_dict = wbt.get_wbt_dict()
wbt_toolbox = wbt.build_toolbox(
tools_dict,
max_width="800px",
max_height="500px",
sandbox_path=os.getcwd(),
)
toolbar_widget = widgets.VBox()
toolbar_widget.children = [toolbar_button]
toolbar_header = widgets.HBox()
toolbar_header.children = [close_button, toolbar_button]
toolbar_footer = widgets.VBox()
toolbar_footer.children = [
wbt_toolbox,
output,
]
toolbar_event = ipyevents.Event(
source=toolbar_widget, watched_events=["mouseenter", "mouseleave"]
)
def handle_toolbar_event(event):
if event["type"] == "mouseenter":
toolbar_widget.children = [toolbar_header, toolbar_footer]
map_widget.layout.width = map_width
elif event["type"] == "mouseleave":
if not toolbar_button.value:
toolbar_widget.children = [toolbar_button]
toolbar_button.value = False
close_button.value = False
map_widget.layout.width = canvas.map_max_width
toolbar_event.on_dom_event(handle_toolbar_event)
def toolbar_btn_click(change):
if change["new"]:
close_button.value = False
toolbar_widget.children = [toolbar_header, toolbar_footer]
map_widget.layout.width = map_width
else:
if not close_button.value:
toolbar_widget.children = [toolbar_button]
map_widget.layout.width = canvas.map_max_width
toolbar_button.observe(toolbar_btn_click, "value")
def close_btn_click(change):
if change["new"]:
toolbar_button.value = False
canvas.toolbar_reset()
toolbar_widget.close()
close_button.observe(close_btn_click, "value")
toolbar_button.value = True
container_widget.children = [toolbar_widget]
|
arrowhead_test.py
|
#!/usr/bin/env python3
import rospy
import threading
from arm_control.srv import *
def get_keyboard_input():
while(True):
inp = input("type something ")
if inp == "follow":
pass
elif inp == "forward":
pass
elif inp == "left":
pass
elif inp == "right":
pass
elif inp == "back":
pass
elif inp == "pickup":
pass
print("key: " + inp)
if(inp == "quit"):
return
self.n = int(inp)
def arrowhead_spoof(request):
qr_data = request
print(qr_data)
inp = input("arrowhead input requested. What to do? ")
# sry for ugly
print("key: " + inp)
return inp
if __name__ == '__main__':
rospy.init_node('arrowhead', anonymous=True)
#x = threading.Thread(target=get_keyboard_input)
#x.start()
#x.join()
rospy.Service('ah_req', ah_request, arrowhead_spoof)
rospy.spin()
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import absolute_import, unicode_literals
import datetime
from decimal import Decimal
import threading
from django.conf import settings
from django.core.management.color import no_style
from django.db import (connection, connections, DEFAULT_DB_ALIAS,
DatabaseError, IntegrityError, transaction)
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.util import format_number
from django.db.models import Sum, Avg, Variance, StdDev
from django.db.utils import ConnectionHandler
from django.test import (TestCase, skipUnlessDBFeature, skipIfDBFeature,
TransactionTestCase)
from django.test.utils import override_settings, str_prefix
from django.utils import six, unittest
from django.utils.six.moves import xrange
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
class OracleChecks(unittest.TestCase):
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle quote_name semantics")
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
cursor = connection.cursor()
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
cursor = connection.cursor()
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle cursor semantics")
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
c = connection.cursor()
c.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join([six.text_type(x) for x in xrange(4000)])
c.execute('INSERT INTO ltext VALUES (%s)', [long_str])
c.execute('SELECT text FROM ltext')
row = c.fetchone()
self.assertEqual(long_str, row[0].read())
c.execute('DROP TABLE ltext')
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.cursor() # Ensure the connection is initialized.
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
@unittest.skipUnless(connection.vendor == 'oracle',
"No need to check Oracle connection semantics")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
c = connection.cursor()
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
c.execute(query)
self.assertEqual(c.fetchone()[0], 1)
class MySQLTests(TestCase):
@unittest.skipUnless(connection.vendor == 'mysql',
"Test valid only for MySQL")
def test_autoincrement(self):
"""
Check that auto_increment fields are reset correctly by sql_flush().
Before MySQL version 5.0.13 TRUNCATE did not do auto_increment reset.
Refs #16961.
"""
statements = connection.ops.sql_flush(no_style(),
tables=['test'],
sequences=[{
'table': 'test',
'col': 'somecol',
}])
found_reset = False
for sql in statements:
found_reset = found_reset or 'ALTER TABLE' in sql
if connection.mysql_version < (5, 0, 13):
self.assertTrue(found_reset)
else:
self.assertFalse(found_reset)
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
persons = models.Reporter.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = persons.query.sql_with_params()
cursor = persons.query.get_compiler('default').execute_sql(None)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
@skipUnlessDBFeature('supports_long_model_names')
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertTrue(obj.pk > 10)
class PostgresVersionTest(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 8.3 beta4", 80300)
self.assert_parses("PostgreSQL 8.3", 80300)
self.assert_parses("EnterpriseDB 8.3", 80300)
self.assert_parses("PostgreSQL 8.3.6", 80306)
self.assert_parses("PostgreSQL 8.4beta1", 80400)
self.assert_parses("PostgreSQL 8.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 80301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 8.3"]
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 80300)
class PostgresNewConnectionTest(TestCase):
"""
#17062: PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back.
"""
@unittest.skipUnless(
connection.vendor == 'postgresql',
"This test applies only to PostgreSQL")
def test_connect_and_rollback(self):
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.enter_transaction_management()
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
try:
new_connection.close()
except DatabaseError:
pass
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertTrue(data["connection"].connection is connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertTrue(data == {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
# For Oracle, when you want to select a value, you need to specify the
# special pseudo-table 'dual'; a select with no from clause is invalid.
bare_select_suffix = " FROM DUAL" if connection.vendor == 'oracle' else ""
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is a sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class SqlliteAggregationTests(TestCase):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
@unittest.skipUnless(connection.vendor == 'sqlite',
"No need to check SQLite aggregation semantics")
def test_aggregation(self):
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
class BackendTestCase(TestCase):
def create_squares_with_executemany(self, args):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
cursor.executemany(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i**2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i**2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i**2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i**2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
a.save()
except IntegrityError:
return
self.skipTest("This backend does not support integrity checks.")
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
transaction.set_autocommit(False)
try:
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
finally:
transaction.set_autocommit(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
transaction.set_autocommit(False)
try:
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
finally:
transaction.rollback()
finally:
transaction.set_autocommit(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
try:
transaction.set_autocommit(False)
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
finally:
transaction.rollback()
finally:
transaction.set_autocommit(True)
class ThreadTests(TestCase):
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for automatic primary key.
"""
@skipIfDBFeature('allows_primary_key_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TransactionTestCase):
def test_can_reference_existant(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existant(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field_by_name("related_objects")[0].rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
|
simulation_1.py
|
'''
Created on Oct 12, 2016
@author: mwittie
'''
import network_1 as network
import link_1 as link
import threading
from time import sleep
##configuration parameters
router_queue_size = 0 #0 means unlimited
simulation_time = 2 #give the network sufficient time to transfer all packets before quitting#slower but works more
if __name__ == '__main__':
object_L = [] #keeps track of objects, so we can kill their threads
#create network nodes
client = network.Host(1)
object_L.append(client)
server = network.Host(2)
object_L.append(server)
router_a = network.Router(name='A', intf_count=1, max_queue_size=router_queue_size)
object_L.append(router_a)
#create a Link Layer to keep track of links between network nodes
link_layer = link.LinkLayer()
object_L.append(link_layer)
#add all the links
#link parameters: from_node, from_intf_num, to_node, to_intf_num, mtu
link_layer.add_link(link.Link(client, 0, router_a, 0, 50))
link_layer.add_link(link.Link(router_a, 0, server, 0, 50))
#start all the objects
thread_L = []
thread_L.append(threading.Thread(name=client.__str__(), target=client.run))
thread_L.append(threading.Thread(name=server.__str__(), target=server.run))
thread_L.append(threading.Thread(name=router_a.__str__(), target=router_a.run))
thread_L.append(threading.Thread(name="Network", target=link_layer.run))
for t in thread_L:
t.start()
#create some send events
sampledata = 'Far far away, behind the word mountains, far from the countries Vokalia.........'
print("Length of sample data:", len(sampledata))
# client.udt_send(2, sampledata)
for i in range(1): # here we configure the message
client.udt_send(2, sampledata)
#give the network sufficient time to transfer all packets before quitting
sleep(simulation_time)
#join all threads
for o in object_L:
o.stop = True
for t in thread_L:
t.join()
print("All simulation threads joined")
# writes to host periodically
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
import time
import ast
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from six.moves.urllib.request import urlopen # pylint: disable=import-error, ungrouped-imports
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
HybridConnection, RampUpRule, UnauthenticatedClientAction,
ManagedServiceIdentity, DeletedAppRestoreRequest,
DefaultErrorResponseException, SnapshotRestoreRequest,
SnapshotRecoverySource, SwiftVirtualNetwork)
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.storage.blob import BlockBlobService, BlobPermissions
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.mgmt.network.models import Subnet, Delegation
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter
from azure.cli.core.commands.client_factory import UA_AGENT
from .tunnel import TunnelServer
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES, LINUX_RUNTIMES, WINDOWS_RUNTIMES
from ._client_factory import web_client_factory, ex_handler_factory
from ._appservice_utils import _generic_site_operation
from .utils import _normalize_sku, get_sku_name
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group,
should_create_new_rg, set_location, should_create_new_app,
get_lang_from_content, get_num_apps_in_asp)
from ._constants import (NODE_RUNTIME_NAME, OS_DEFAULT, STATIC_RUNTIME_NAME, PYTHON_RUNTIME_NAME,
RUNTIME_TO_IMAGE, NODE_VERSION_DEFAULT)
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, multicontainer_config_type=None, multicontainer_config_file=None,
tags=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
is_linux = plan_info.reserved
node_default_version = NODE_VERSION_DEFAULT
location = plan_info.location
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags)
helper = _StackRuntimeHelper(client, linux=is_linux)
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
"Please invoke 'list-runtimes' to cross check".format(runtime))
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
else: # windows webapp without runtime specified
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
return webapp
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest in [(settings, result), (slot_settings, slot_result)]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if t.get('slotSetting', True):
slot_result[t['name']] = t['value']
else:
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(slot_result)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
new_slot_setting_names = slot_result.keys()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
from azure.mgmt.web.models import AzureStorageInfoValue
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
if is_plan_consumption(plan_info) and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
headers['User-Agent'] = UA_AGENT
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
# check the status of async deployment
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.now()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ce:
# This SDK function throws an error if Status Code is 200
if ce.status_code != 200:
raise ce
def _generic_settings_operation(cli_ctx, resource_group_name, name, operation_name,
setting_properties, slot=None, client=None):
client = client or web_client_factory(cli_ctx)
operation = getattr(client.web_apps, operation_name if slot is None else operation_name + '_slot')
if slot is None:
return operation(resource_group_name, name, str, setting_properties)
return operation(resource_group_name, name, slot, str, setting_properties)
def show_webapp(cmd, resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot)
return webapp
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None,
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.create_or_update_slot if slot else client.web_apps.create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance,
skip_dns_registration=skip_dns_registration,
skip_custom_domain_verification=skip_custom_domain_verification,
force_dns_registration=force_dns_registration,
ttl_in_seconds=ttl_in_seconds)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(instance, client_affinity_enabled=None, https_only=None):
if 'function' in instance.kind:
raise CLIError("please use 'az functionapp update' to update this function app")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
return instance
def update_functionapp(cmd, instance, plan=None):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise CLIError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(client, instance, dest_plan_info)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(client, src_functionapp_instance, dest_plan_instance):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise CLIError('Could not determine the current plan of the functionapp')
if not (is_plan_consumption(src_plan_info) or is_plan_elastic_premium(src_plan_info)):
raise CLIError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' + general_switch_msg)
if not (is_plan_consumption(dest_plan_instance) or is_plan_elastic_premium(dest_plan_instance)):
raise CLIError('You are trying to move to a plan that is not a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
if 'function' not in instance.kind:
raise CLIError('Not a function app to update')
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.create_or_update(resource_group_name, name, site_envelope=instance)
def list_webapp(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' not in r.kind]
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restore_from_deleted_app', slot, request)
def list_function_app(cmd, resource_group_name=None):
result = _list_app(cmd.cli_ctx, resource_group_name)
return [r for r in result if 'function' in r.kind]
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = list()
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def assign_identity(cmd, resource_group_name, name, role='Contributor', slot=None, scope=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='SystemAssigned')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot).identity
def remove_identity(cmd, resource_group_name, name, slot=None):
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
webapp.identity = ManagedServiceIdentity(type='None')
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(client, linux)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
linux_fx = fx_version if web_app.reserved else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any([linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES]):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(cmd, resource_group_name, name, slot=None,
linux_fx_version=None, windows_fx_version=None, reserved_instance_count=None, php_version=None, # pylint: disable=unused-argument
python_version=None, net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
min_tls_version=None, # pylint: disable=unused-argument
http20_enabled=None, # pylint: disable=unused-argument
app_command_line=None, # pylint: disable=unused-argument
ftps_state=None, # pylint: disable=unused-argument
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if reserved_instance_count is not None:
reserved_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', reserved_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled']
int_flags = ['reserved_instance_count']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
result = {}
for s in generic_configurations:
try:
result.update(get_json_object(s))
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
setattr(configs, config_name, value)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts.properties,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(location=webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.create_or_update_slot(resource_group_name, name, slot_def, slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
try:
status = vsts_provider.setup_continuous_delivery(cmd.cli_ctx, resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
except RuntimeError as ex:
raise CLIError(ex)
logger.warning(status.status_message)
return status
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location=location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, sku='B1', number_of_workers=None,
location=None, tags=None):
if is_linux and hyper_v:
raise CLIError('usage error: --is-linux | --hyper-v')
client = web_client_factory(cmd.cli_ctx)
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
return instance
def update_functionapp_app_service_plan(instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except DefaultErrorResponseException:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_location_from_resource_group(cli_ctx, resource_group_name):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(cli_ctx, ResourceManagementClient)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp))
return webapp.location
def _get_deleted_apps_locations(cli_ctx):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(cli_ctx, ResourceManagementClient)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(cmd, resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
for profile in profiles:
if profile['publishMethod'] == 'MSDeploy':
scmUrl = profile['publishUrl'].replace(":443", "")
cd_url = 'https://' + profile['userName'] + ':' + profile['userPWD'] + '@' + scmUrl + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
break
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level=level)
application_logs = ApplicationLogsConfig(file_system=fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
if action == 'swap':
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, (target_slot or 'production'), True)
return poller
if action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get')
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(cli_ctx, resource_group_name, webapp_name, location,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=location)
return _generic_site_operation(cli_ctx, resource_group_name, webapp_name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd.cli_ctx, resource_group_name, name, webapp.location,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client, linux=False):
self._client = client
self._linux = linux
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(name=k, value=v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku,
number_of_workers=None, max_burst=None, location=None, tags=None):
sku = _normalize_sku(sku)
tier = get_sku_name(sku)
if max_burst is not None:
if tier.lower() != "elasticpremium":
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-elastic-worker-count',
number_of_workers, min_val=0, max_val=20)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(plan_info):
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def validate_and_convert_to_int(flag, val):
try:
return int(val)
except ValueError:
raise CLIError("Usage error: {} is expected to have an int value.".format(flag))
def validate_range_of_int_flag(flag_name, value, min_val, max_val):
value = validate_and_convert_to_int(flag_name, value)
if min_val > value or value > max_val:
raise CLIError("Usage error: {} is expected to be between {} and {} (inclusive)".format(flag_name, min_val,
max_val))
return value
def create_function(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, runtime=None, consumption_plan_location=None,
app_insights=None, app_insights_key=None, disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
deployment_container_image_name=None, tags=None):
# pylint: disable=too-many-statements, too-many-branches
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config, tags=tags)
client = web_client_factory(cmd.cli_ctx)
plan_info = None
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime:
if is_linux and runtime not in LINUX_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in linux function apps are: {}."
.format(', '.join(LINUX_RUNTIMES)))
if not is_linux and runtime not in WINDOWS_RUNTIMES:
raise CLIError("usage error: Currently supported runtimes (--runtime) in windows function apps are: {}."
.format(', '.join(WINDOWS_RUNTIMES)))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_WORKER_RUNTIME', value=runtime))
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
if consumption_plan_location:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
else:
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
if runtime.lower() not in RUNTIME_TO_IMAGE:
raise CLIError("An appropriate linux image for runtime:'{}' was not found".format(runtime))
site_config.linux_fx_version = _format_fx_version(RUNTIME_TO_IMAGE[runtime.lower()])
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION', value='~2'))
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_NODE_DEFAULT_VERSION', value='10.14.1'))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(plan_info):
site_config.always_on = True
# If plan is elastic premium or windows consumption, we need these app settings
is_windows_consumption = consumption_plan_location is not None and not is_linux
if is_plan_elastic_premium(plan_info) or is_windows_consumption:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=name.lower()))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif not disable_app_insights:
create_app_insights = True
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully"
"created but is not active until content is published using"
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
return functionapp
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_name(sku)
return client.list_geo_regions(full_sku, linux_workers_enabled)
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization)
time.sleep(2)
res_dict = response.json()
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Zip deployment failed. {}. Please run the command az webapp log tail
-n {} -g {}""".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
if n.name == namespace:
hy_co_id = n.id
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name == "defaultSender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
print(hy_co.user_metadata)
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, hc, slot)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name == "defaultSender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None):
client = web_client_factory(cmd.cli_ctx)
vnet_client = network_client_factory(cmd.cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnet_id = ''
for v in list_all_vnets:
if v.name == vnet:
vnet_id = v.id
# parsing the arm uri in order to extract vnet_name and vnet_resource_group
vnet_id_strings = vnet_id.split('/')
vnet_resource_group = ''
i = 0
for z in vnet_id_strings:
if z == "resourceGroups":
vnet_resource_group = vnet_id_strings[i + 1]
i = i + 1
if slot is None:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection(resource_group_name, name)
else:
swift_connection_info = client.web_apps.get_swift_virtual_network_connection_slot(resource_group_name,
name, slot)
# check to see if the connection would be supported
if swift_connection_info.swift_supported is not True:
return logger.warning("""Your app must be in an Azure App Service deployment that is
capable of scaling up to Premium v2\nLearn more:
https://go.microsoft.com/fwlink/?linkid=2060115&clcid=0x409""")
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name == "Microsoft.Web/serverFarms":
delegated = True
if not delegated:
vnet_client.subnets.create_or_update(vnet_resource_group, vnet, subnet,
subnet_parameters=Subnet(name="subnet",
address_prefix=subnetObj.address_prefix,
delegations=[Delegation(name="delegation",
service_name="Microsoft" +
".Web/serverFarms")]))
id_subnet = vnet_client.subnets.get(vnet_resource_group, vnet, subnet)
subnet_resource_id = id_subnet.id
swiftVnet = SwiftVirtualNetwork(subnet_resource_id=subnet_resource_id,
swift_supported=True)
if slot is None:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection(resource_group_name, name,
swiftVnet)
else:
return_vnet = client.web_apps.create_or_update_swift_virtual_network_connection_slot(resource_group_name, name,
swiftVnet, slot)
# reformats the vnet entry, removing unecessary information
id_strings = return_vnet.id.split('/')
resourceGroup = id_strings[4]
mod_vnet = {
"id": return_vnet.id,
"location": return_vnet.additional_properties["location"],
"name": return_vnet.name,
"resourceGroup": resourceGroup,
"subnetResourceId": return_vnet.subnet_resource_id
}
return mod_vnet
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name, resource_group_name=None, plan=None, # pylint: disable=too-many-statements, too-many-branches
location=None, sku=None, dryrun=False, logs=False, launch_browser=False):
import os
from azure.cli.core._profile import Profile
client = web_client_factory(cmd.cli_ctx)
# the code to deploy is expected to be the current directory the command is running from
src_dir = os.getcwd()
user = Profile().get_current_account_user()
user = user.split('@', 1)[0]
if len(user.split('#', 1)) > 1: # on cloudShell user is in format live.com#user@domain.com
user = user.split('#', 1)[1]
logger.info("UserPrefix to use '%s'", user)
# if dir is empty, show a message in dry run
do_deployment = not os.listdir(src_dir) == []
_create_new_rg = True
_create_new_asp = True
_create_new_app = True
_set_build_app_setting = False
# determine the details for app to be created from src contents
lang_details = get_lang_from_content(src_dir)
# we support E2E create and deploy for selected stacks, any other stack, set defaults for os & runtime
# and skip deployment
language = lang_details.get('language')
if not language:
do_deployment = False
sku = sku or 'F1'
os_val = OS_DEFAULT
detected_version = '-'
runtime_version = '-'
else:
# update SKU to user set value
if sku is None:
sku = lang_details.get("default_sku")
else:
logger.info("Found sku argument, skipping use default sku")
sku = sku
is_skip_build = language.lower() == STATIC_RUNTIME_NAME
os_val = "Linux" if language.lower() == NODE_RUNTIME_NAME \
or language.lower() == PYTHON_RUNTIME_NAME else OS_DEFAULT
# detect the version
data = get_runtime_version_details(lang_details.get('file_loc'), language)
version_used_create = data.get('to_create')
detected_version = data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
full_sku = get_sku_name(sku)
location = set_location(cmd, sku, location)
loc_name = location.replace(" ", "").lower()
is_linux = os_val == 'Linux'
if resource_group_name is None:
logger.info('Using default ResourceGroup value')
rg_name = "{}_rg_{}_{}".format(user, os_val, loc_name)
else:
logger.info("Found user input for ResourceGroup %s", resource_group_name)
rg_name = resource_group_name
if plan is None:
logger.info('Using default appserviceplan value')
asp = "{}_asp_{}_{}_0".format(user, os_val, loc_name)
_asp_generic = asp[:-len(asp.split("_")[4])] # used to determine if a new ASP needs to be created
else:
asp = plan
_asp_generic = asp
_create_new_rg = should_create_new_rg(cmd, rg_name, is_linux)
logger.info("Should create new RG %s", _create_new_rg)
src_path = "{}".format(src_dir.replace("\\", "\\\\"))
rg_str = "{}".format(rg_name)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, asp, rg_str, full_sku, os_val, location, src_path,
detected_version, runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
# create RG if the RG doesn't already exist
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, location)
logger.warning("Resource group creation complete")
_create_new_asp = True
else:
logger.warning("Resource group '%s' already exists.", rg_name)
# get all asp in the RG
logger.warning("Verifying if the plan with the same sku exists or should create a new plan")
data = (list(filter(lambda x: _asp_generic in x.name,
client.app_service_plans.list_by_resource_group(rg_name))))
data_sorted = (sorted(data, key=lambda x: x.name))
num_asps = len(data)
# check if any of these matches the SKU & location to be used
# and get FirstOrDefault
selected_asp = next((a for a in data if isinstance(a.sku, SkuDescription) and
a.sku.tier.lower() == full_sku.lower() and
(a.location.replace(" ", "").lower() == location.lower() or a.location == location)), None)
if selected_asp is not None:
asp = selected_asp.name
_create_new_asp = False
elif selected_asp is None and num_asps > 0:
# from the sorted data pick the last one & check if a new ASP needs to be created
# based on SKU or not
_plan_info = data_sorted[num_asps - 1]
if plan is None:
_asp_num = int(_plan_info.name.split('_')[4]) + 1
asp = "{}_asp_{}_{}_{}".format(user, os_val, loc_name, _asp_num)
else:
asp = plan
# create new ASP if an existing one cannot be used
if _create_new_asp:
logger.warning("Creating App service plan '%s' ...", asp)
create_app_service_plan(cmd, rg_name, asp, is_linux, None, sku, 1 if is_linux else None, location)
logger.warning("App service plan creation complete")
create_json['appserviceplan'] = asp
_create_new_app = True
_show_too_many_apps_warn = False
else:
logger.warning("App service plan '%s' already exists.", asp)
_show_too_many_apps_warn = get_num_apps_in_asp(cmd, rg_name, asp) > 5
_create_new_app = should_create_new_app(cmd, rg_name, name)
# create the app
if _create_new_app:
logger.warning("Creating app '%s' ...", name)
create_webapp(cmd, rg_name, name, asp, runtime_version if is_linux else None, tags={"cli": 'webapp_up'})
logger.warning("Webapp creation complete")
create_json['name'] = name
_set_build_app_setting = True
# Update appSettings for netcore apps
if language == 'dotnetcore':
update_app_settings(cmd, rg_name, name, ['ANCM_ADDITIONAL_ERROR_PAGE_LINK=' +
'https://{}.scm.azurewebsites.net/detectors'.format(name)])
# Configure default logging
_configure_default_logging(cmd, rg_name, name)
if _show_too_many_apps_warn:
logger.warning("There are sites that have been deployed to the same hosting "
"VM of this region, to prevent performance impact please "
"delete existing site(s) or switch to a different default resource group "
"using 'az configure' command")
else:
logger.warning("App '%s' already exists", name)
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
if os_val == 'Linux' and site_config.linux_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
elif os_val == 'Windows' and site_config.windows_fx_version != runtime_version:
logger.warning('Updating runtime version from %s to %s',
site_config.windows_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, windows_fx_version=runtime_version)
create_json['runtime_version'] = runtime_version
if do_deployment and not is_skip_build:
_set_build_app_setting = True
# app settings causes an app recycle so we avoid if not needed
application_settings = client.web_apps.list_application_settings(rg_name, name)
_app_settings = application_settings.properties
for key, value in _app_settings.items():
if key.upper() == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
is_skip_build = value.upper() == "FALSE"
# if the value is already set just honor it
_set_build_app_setting = False
break
# update create_json to include the app_url
if _set_build_app_setting:
# setting to build after deployment
logger.warning("Updating app settings to enable build after deployment")
update_app_settings(cmd, rg_name, name, ["SCM_DO_BUILD_DURING_DEPLOYMENT=true"])
# wait for all the settings to completed
time.sleep(30)
if do_deployment:
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
logger.warning("Preparing to deploy %s contents to app.", '' if is_skip_build else 'and build')
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
# Remove the file after deployment, handling exception if user removed the file manually
try:
os.remove(zip_file_path)
except OSError:
pass
logger.warning("All done.")
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', full_sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', asp)
cmd.cli_ctx.config.set_value('defaults', 'location', location)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'app_url': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
return create_json
def _ping_scm_site(cmd, resource_group, name):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify())
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password)
_ping_scm_site(cmd, resource_group_name, name)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.isAlive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.isAlive() and t.isAlive():
time.sleep(5)
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
c.run('cat /etc/motd', pty=True)
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None): # pylint: disable=too-many-statements
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise CLIError('remote debugging is enabled, please disable')
import platform
if platform.system() == "Windows":
raise CLIError('webapp ssh is only supported on linux and mac')
create_tunnel_and_session(cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout)
def create_devops_pipeline(
cmd,
functionapp_name=None,
organization_name=None,
project_name=None,
repository_name=None,
overwrite_yaml=None,
allow_force_push=None,
github_pat=None,
github_repository=None
):
from .azure_devops_build_interactive import AzureDevopsBuildInteractive
azure_devops_build_interactive = AzureDevopsBuildInteractive(cmd, logger, functionapp_name,
organization_name, project_name, repository_name,
overwrite_yaml, allow_force_push,
github_pat, github_repository)
return azure_devops_build_interactive.interactive_azure_devops_build()
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
|
import_wikidata.py
|
"""
Import an wikidata file into KGTK file
TODO: references
TODO: qualifiers-order
TODO: incorporate calendar into the KGTK data model.
TODO: Incorporate geographic precision into the KGTK data model.
TODO: Incorporate URLs into the KGTK data model.
TODO: Node type needs to be optional in the edge file.
See:
https://www.mediawiki.org/wiki/Wikibase/DataModel/JSON
https://www.wikidata.org/wiki/Special:ListDatatypes
https://www.wikidata.org/wiki/Help:Data_type
"""
from argparse import Namespace
import typing
from kgtk.cli_argparse import KGTKArgumentParser, KGTKFiles
def parser():
return {
'help': 'Import an wikidata file into KGTK file'
}
def add_arguments_extended(parser: KGTKArgumentParser, parsed_shared_args: Namespace):
"""
Parse arguments
Args:
parser (argparse.ArgumentParser)
"""
from kgtk.utils.argparsehelpers import optional_bool
from kgtk.io.kgtkreader import KgtkReaderOptions
from kgtk.io.kgtkwriter import KgtkWriter
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
_expert: bool = parsed_shared_args._expert
parser.add_input_file(positional=True, who='input path file (may be .bz2)')
parser.add_argument(
'--procs',
action="store",
type=int,
dest="procs",
default=2,
help='number of processes to run in parallel, default %(default)d')
parser.add_argument(
'--max-size-per-mapper-queue',
action="store",
type=int,
dest="max_size_per_mapper_queue",
default=4,
help='max depth of server queues, default %(default)d')
parser.add_argument(
'--mapper-batch-size',
action="store",
type=int,
dest="mapper_batch_size",
default=5,
help='How many statements to queue in a batch to a worker. (default=%(default)d)')
parser.add_argument(
"--single-mapper-queue",
nargs='?',
type=optional_bool,
dest="single_mapper_queue",
const=True,
default=False,
metavar="True/False",
help="If true, use a single queue for worker tasks. If false, each worker has its own task queue. (default=%(default)s).",
)
parser.add_argument(
"--collect-results",
nargs='?',
type=optional_bool,
dest="collect_results",
const=True,
default=False,
metavar="True/False",
help="If true, collect the results before writing to disk. If false, write results to disk, then concatenate. (default=%(default)s).",
)
parser.add_argument(
"--collect-seperately",
nargs='?',
type=optional_bool,
dest="collect_seperately",
const=True,
default=False,
metavar="True/False",
help="If true, collect the node, edge, and qualifier results using seperate processes. If false, collect the results with a single process. (default=%(default)s).",
)
parser.add_argument(
'--collector-batch-size',
action="store",
type=int,
dest="collector_batch_size",
default=5,
help='How many statements to queue in a batch to the collector. (default=%(default)d)')
parser.add_argument(
"--use-shm",
nargs='?',
type=optional_bool,
dest="use_shm",
const=True,
default=False,
metavar="True/False",
help="If true, use ShmQueue. (default=%(default)s).")
parser.add_argument(
'--collector-queue-per-proc-size',
action="store",
type=int,
dest="collector_queue_per_proc_size",
default=2,
help='collector queue depth per proc, default %(default)d')
parser.add_argument(
"--node", '--node-file',
action="store",
type=str,
dest="node_file",
default=None,
help='path to output node file')
parser.add_argument(
"--edge", '--edge-file', '--detailed-edge-file',
action="store",
type=str,
dest="detailed_edge_file",
default=None,
help='path to output edge file with detailed data')
parser.add_argument(
'--minimal-edge-file',
action="store",
type=str,
dest="minimal_edge_file",
default=None,
help='path to output edge file with minimal data')
parser.add_argument(
"--qual", '--qual-file', '--detailed-qual-file',
action="store",
type=str,
dest="detailed_qual_file",
default=None,
help='path to output qualifier file with full data')
parser.add_argument(
'--minimal-qual-file',
action="store",
type=str,
dest="minimal_qual_file",
default=None,
help='path to output qualifier file with minimal data')
# Optionally write only the ID column to the node file.
parser.add_argument(
'--node-file-id-only',
nargs='?',
type=optional_bool,
dest="node_id_only",
const=True,
default=False,
metavar="True/False",
help='Option to write only the node ID in the node file. (default=%(default)s)')
# The remaining files are KGTK edge files that split out
# special properties, removing them from the edge file.
parser.add_argument(
'--split-alias-file',
action="store",
type=str,
dest="split_alias_file",
default=None,
help='path to output split alias file')
parser.add_argument(
'--split-en-alias-file',
action="store",
type=str,
dest="split_en_alias_file",
default=None,
help='path to output split English alias file')
parser.add_argument(
'--split-datatype-file',
action="store",
type=str,
dest="split_datatype_file",
default=None,
help='path to output split datatype file')
parser.add_argument(
'--split-description-file',
action="store",
type=str,
dest="split_description_file",
default=None,
help='path to output splitdescription file')
parser.add_argument(
'--split-en-description-file',
action="store",
type=str,
dest="split_en_description_file",
default=None,
help='path to output split English description file')
parser.add_argument(
'--split-label-file',
action="store",
type=str,
dest="split_label_file",
default=None,
help='path to output split label file')
parser.add_argument(
'--split-en-label-file',
action="store",
type=str,
dest="split_en_label_file",
default=None,
help='path to output split English label file')
parser.add_argument(
'--split-sitelink-file',
action="store",
type=str,
dest="split_sitelink_file",
default=None,
help='path to output split sitelink file')
parser.add_argument(
'--split-en-sitelink-file',
action="store",
type=str,
dest="split_en_sitelink_file",
default=None,
help='path to output split English sitelink file')
parser.add_argument(
'--split-type-file', '--split-entity-type-file',
action="store",
type=str,
dest="split_type_file",
default=None,
help='path to output split entry type file')
parser.add_argument(
'--split-property-edge-file',
action="store",
type=str,
dest="split_property_edge_file",
default=None,
help='path to output split property edge file')
parser.add_argument(
'--split-property-qual-file',
action="store",
type=str,
dest="split_property_qual_file",
default=None,
help='path to output split property qualifier file')
# TODO: Create a seperate file for the sitelinks.
parser.add_argument(
"--limit",
action="store",
type=int,
dest="limit",
default=None,
help='number of lines of input file to run on, default runs on all')
parser.add_argument(
"--lang",
action="store",
type=str,
dest="lang",
default="en",
help='languages to extract, comma separated, default en')
parser.add_argument(
"--source",
action="store",
type=str,
dest="source",
default="wikidata",
help='wikidata version number, default: wikidata')
parser.add_argument(
"--deprecated",
action="store_true",
dest="deprecated",
help='option to include deprecated statements, not included by default')
parser.add_argument(
"--explode-values",
nargs='?',
type=optional_bool,
dest="explode_values",
const=True,
default=True,
metavar="True/False",
help="If true, create columns with exploded value information. (default=%(default)s).",
)
parser.add_argument(
"--use-python-cat",
nargs='?',
type=optional_bool,
dest="use_python_cat",
const=True,
default=False,
metavar="True/False",
help="If true, use portable code to combine file fragments. (default=%(default)s).",
)
parser.add_argument(
"--keep-temp-files",
nargs='?',
type=optional_bool,
dest="keep_temp_files",
const=True,
default=False,
metavar="True/False",
help="If true, keep temporary files (for debugging). (default=%(default)s).",
)
parser.add_argument(
"--skip-processing",
nargs='?',
type=optional_bool,
dest="skip_processing",
const=True,
default=False,
metavar="True/False",
help="If true, skip processing the input file (for debugging). (default=%(default)s).",
)
parser.add_argument(
"--skip-merging",
nargs='?',
type=optional_bool,
dest="skip_merging",
const=True,
default=False,
metavar="True/False",
help="If true, skip merging temporary files (for debugging). (default=%(default)s).",
)
parser.add_argument(
"--interleave",
nargs='?',
type=optional_bool,
dest="interleave",
const=True,
default=False,
metavar="True/False",
help="If true, output the edges and qualifiers in a single file (the edge file). (default=%(default)s).",
)
parser.add_argument(
"--entry-type-edges",
nargs='?',
type=optional_bool,
dest="entry_type_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for the entry type field. (default=%(default)s).",
)
parser.add_argument(
"--alias-edges",
nargs='?',
type=optional_bool,
dest="alias_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for aliases. (default=%(default)s).",
)
parser.add_argument(
"--datatype-edges",
nargs='?',
type=optional_bool,
dest="datatype_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for property datatypes. (default=%(default)s).",
)
parser.add_argument(
"--description-edges",
nargs='?',
type=optional_bool,
dest="descr_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for descriptions. (default=%(default)s).",
)
parser.add_argument(
"--label-edges",
nargs='?',
type=optional_bool,
dest="label_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for labels. (default=%(default)s).",
)
parser.add_argument(
"--sitelink-edges",
nargs='?',
type=optional_bool,
dest="sitelink_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for sitelinks. (default=%(default)s).",
)
parser.add_argument(
"--sitelink-verbose-edges",
nargs='?',
type=optional_bool,
dest="sitelink_verbose_edges",
const=True,
default=False,
metavar="True/False",
help="If true, create edge records for sitelink details (lang, site, badges). (default=%(default)s).",
)
parser.add_argument(
"--sitelink-verbose-qualifiers",
nargs='?',
type=optional_bool,
dest="sitelink_verbose_qualifiers",
const=True,
default=False,
metavar="True/False",
help="If true, create qualifier records for sitelink details (lang, site, badges). (default=%(default)s).",
)
parser.add_argument(
"--parse-aliases",
nargs='?',
type=optional_bool,
dest="parse_aliases",
const=True,
default=True,
metavar="True/False",
help="If true, parse aliases. (default=%(default)s).",
)
parser.add_argument(
"--parse-descriptions",
nargs='?',
type=optional_bool,
dest="parse_descr",
const=True,
default=True,
metavar="True/False",
help="If true, parse descriptions. (default=%(default)s).",
)
parser.add_argument(
"--parse-labels",
nargs='?',
type=optional_bool,
dest="parse_labels",
const=True,
default=True,
metavar="True/False",
help="If true, parse labels. (default=%(default)s).",
)
parser.add_argument(
"--parse-sitelinks",
nargs='?',
type=optional_bool,
dest="parse_sitelinks",
const=True,
default=True,
metavar="True/False",
help="If true, parse sitelinks. (default=%(default)s).",
)
parser.add_argument(
"--parse-claims",
nargs='?',
type=optional_bool,
dest="parse_claims",
const=True,
default=True,
metavar="True/False",
help="If true, parse claims. (default=%(default)s).",
)
parser.add_argument(
"--fail-if-missing",
nargs='?',
type=optional_bool,
dest="fail_if_missing",
const=True,
default=True,
metavar="True/False",
help="If true, fail if expected data is missing. (default=%(default)s).",
)
parser.add_argument(
"--all-languages",
nargs='?',
type=optional_bool,
dest="all_languages",
const=True,
default=False,
metavar="True/False",
help="If true, override --lang and import aliases, dscriptions, and labels in all languages. (default=%(default)s).",
)
parser.add_argument(
"--warn-if-missing",
nargs='?',
type=optional_bool,
dest="warn_if_missing",
const=True,
default=True,
metavar="True/False",
help="If true, print a warning message if expected data is missing. (default=%(default)s).",
)
parser.add_argument(
'--progress-interval',
action="store",
type=int,
dest="progress_interval",
default=500000,
help='How often to report progress. (default=%(default)d)')
parser.add_argument(
"--use-kgtkwriter",
nargs='?',
type=optional_bool,
dest="use_kgtkwriter",
const=True,
default=True,
metavar="True/False",
help="If true, use KgtkWriter instead of csv.writer. (default=%(default)s).")
parser.add_argument(
"--use-mgzip-for-input",
nargs='?',
type=optional_bool,
dest="use_mgzip_for_input",
const=True,
default=False,
metavar="True/False",
help="If true, use the multithreaded gzip package, mgzip, for input. (default=%(default)s).")
parser.add_argument(
"--use-mgzip-for-output",
nargs='?',
type=optional_bool,
dest="use_mgzip_for_output",
const=True,
default=False,
metavar="True/False",
help="If true, use the multithreaded gzip package, mgzip, for output. (default=%(default)s).")
parser.add_argument(
"--mgzip-threads-for-input",
type=int,
default=KgtkReaderOptions.MGZIP_THREAD_COUNT_DEFAULT,
dest="mgzip_threads_for_input",
help="The number of threads per mgzip input streama. (default=%(default)s).")
parser.add_argument(
"--mgzip-threads-for-output",
type=int,
default=KgtkWriter.MGZIP_THREAD_COUNT_DEFAULT,
dest="mgzip_threads_for_output",
help="The number of threads per mgzip output streama. (default=%(default)s).")
parser.add_argument(
'--value-hash-width',
action="store",
type=int,
dest="value_hash_width",
default=8,
help='How many characters should be used in a value hash? (default=%(default)d)')
parser.add_argument(
'--claim-id-hash-width',
action="store",
type=int,
dest="claim_id_hash_width",
default=0,
help='How many characters should be used to hash the claim ID? 0 means do not hash the claim ID. (default=%(default)d)')
parser.add_argument(
"--clean",
nargs='?',
type=optional_bool,
dest="clean_input_values",
const=True,
default=False,
metavar="True/False",
help="If true, clean the input values before writing it. (default=%(default)s).")
parser.add_argument(
"--clean-verbose",
nargs='?',
type=optional_bool,
dest="clean_verbose",
const=True,
default=False,
metavar="True/False",
help="If true, give verbose feedback when cleaning input values. (default=%(default)s).")
parser.add_argument(
'--invalid-edge-file',
action="store",
type=str,
dest="invalid_edge_file",
default=None,
help='path to output edges with invalid input values')
parser.add_argument(
'--invalid-qual-file',
action="store",
type=str,
dest="invalid_qual_file",
default=None,
help='path to output qual edges with invalid input values')
parser.add_argument(
"--skip-validation",
nargs='?',
type=optional_bool,
dest="skip_validation",
const=True,
default=False,
metavar="True/False",
help="If true, skip output record validation. (default=%(default)s).",
)
KgtkValueOptions.add_arguments(parser, expert=_expert)
def custom_progress()->bool:
return True # We want to start a custom progress monitor.
def run(input_file: KGTKFiles,
procs: int,
max_size_per_mapper_queue: int,
node_file: typing.Optional[str],
detailed_edge_file: typing.Optional[str],
minimal_edge_file: typing.Optional[str],
detailed_qual_file: typing.Optional[str],
minimal_qual_file: typing.Optional[str],
invalid_edge_file: typing.Optional[str],
invalid_qual_file: typing.Optional[str],
node_id_only: bool,
split_alias_file: typing.Optional[str],
split_en_alias_file: typing.Optional[str],
split_datatype_file: typing.Optional[str],
split_description_file: typing.Optional[str],
split_en_description_file: typing.Optional[str],
split_label_file: typing.Optional[str],
split_en_label_file: typing.Optional[str],
split_sitelink_file: typing.Optional[str],
split_en_sitelink_file: typing.Optional[str],
split_type_file: typing.Optional[str],
split_property_edge_file: typing.Optional[str],
split_property_qual_file: typing.Optional[str],
limit: int,
lang: str,
source: str,
deprecated: bool,
explode_values: bool,
use_python_cat: bool,
keep_temp_files: bool,
skip_processing: bool,
skip_merging: bool,
interleave: bool,
entry_type_edges: bool,
alias_edges: bool,
datatype_edges: bool,
descr_edges: bool,
label_edges: bool,
sitelink_edges: bool,
sitelink_verbose_edges: bool,
sitelink_verbose_qualifiers: bool,
parse_aliases: bool,
parse_descr: bool,
parse_labels: bool,
parse_sitelinks: bool,
parse_claims: bool,
fail_if_missing: bool,
all_languages: bool,
warn_if_missing: bool,
collect_results: bool,
collect_seperately: bool,
collector_queue_per_proc_size: int,
progress_interval: int,
use_shm: bool,
mapper_batch_size: int,
collector_batch_size: int,
single_mapper_queue: bool,
use_kgtkwriter: bool,
use_mgzip_for_input: bool,
use_mgzip_for_output: bool,
mgzip_threads_for_input: int,
mgzip_threads_for_output: int,
value_hash_width: int,
claim_id_hash_width: int,
clean_input_values: bool,
clean_verbose: bool,
skip_validation: bool,
**kwargs # Whatever KgtkValueOptions wants.
):
# import modules locally
import bz2
import simplejson as json
import csv
import hashlib
import io
import multiprocessing as mp
import os
from pathlib import Path
import pyrallel
import sys
import time
from kgtk.kgtkformat import KgtkFormat
from kgtk.cli_argparse import KGTKArgumentParser
from kgtk.cli_entry import progress_startup
from kgtk.exceptions import KGTKException
from kgtk.utils.cats import platform_cat
from kgtk.value.kgtkvalue import KgtkValue
from kgtk.value.kgtkvalueoptions import KgtkValueOptions
value_options: KgtkValueOptions = KgtkValueOptions.from_dict(kwargs)
languages=lang.split(',')
ADDL_SITELINK_LABEL: str = "addl_wikipedia_sitelink"
ALIAS_LABEL: str = "alias"
DATATYPE_LABEL: str = "datatype"
DESCRIPTION_LABEL: str = "description"
LABEL_LABEL: str = "label"
SITELINK_LABEL: str = "wikipedia_sitelink"
SITELINK_BADGE_LABEL: str = "sitelink-badge"
SITELINK_LANGUAGE_LABEL: str = "sitelink-language"
SITELINK_SITE_LABEL: str = "sitelink-site"
SITELINK_TITLE_LABEL: str = "sitelink-title"
TYPE_LABEL: str = "type"
SNAKTYPE_NOVALUE: str = "novalue"
SNAKTYPE_SOMEVALUE: str = "somevalue"
SNAKTYPE_VALUE: str = "value"
NOVALUE_VALUE: str = "novalue"
SOMEVALUE_VALUE: str = "somevalue"
CLAIM_TYPE_STATEMENT: str = "statement"
MAINSNAK_DATATYPE: str = "datatype"
MAINSNAK_DATAVALUE: str = "datavalue"
MAINSNAK_SNAKTYPE: str = "snaktype"
DATATYPE_WIKIBASE_PREFIX: str = "wikibase"
DATATYPE_QUANTITY: str = "quantity"
DATATYPE_GLOBECOORDINATE: str = "globe-coordinate"
DATATYPE_TIME: str = "time"
DATATYPE_MONOLINGUALTEXT: str = "monolingualtext"
collector_q: typing.Optional[pyrallel.ShmQueue] = None
node_collector_q: typing.Optional[pyrallel.ShmQueue] = None
edge_collector_q: typing.Optional[pyrallel.ShmQueue] = None
qual_collector_q: typing.Optional[pyrallel.ShmQueue] = None
invalid_edge_collector_q: typing.Optional[pyrallel.ShmQueue] = None
invalid_qual_collector_q: typing.Optional[pyrallel.ShmQueue] = None
description_collector_q: typing.Optional[pyrallel.ShmQueue] = None
sitelink_collector_q: typing.Optional[pyrallel.ShmQueue] = None
class MyMapper(pyrallel.Mapper):
def enter(self):
print("Starting worker process {} (pid {}).".format(self._idx, os.getpid()), file=sys.stderr, flush=True)
self.first=True
self.cnt=0
self.write_mode='w'
self.node_f = None
if node_file and not collect_results:
self.node_f = open(node_file+'_{}'.format(self._idx), self.write_mode, newline='')
self.node_wr = csv.writer(
self.node_f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
self.edge_f = None
if detailed_edge_file and not collect_results:
self.edge_f = open(detailed_edge_file+'_{}'.format(self._idx), self.write_mode, newline='')
self.edge_wr = csv.writer(
self.edge_f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
self.qual_f = None
if detailed_qual_file and not collect_results:
self.qual_f = open(detailed_qual_file+'_{}'.format(self._idx), self.write_mode, newline='')
self.qual_wr = csv.writer(
self.qual_f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
self.invalid_edge_f = None
if invalid_edge_file and not collect_results:
self.invalid_edge_f = open(invalid_edge_file+'_{}'.format(self._idx), self.write_mode, newline='')
self.invalid_edge_wr = csv.writer(
self.invalid_edge_f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
self.invalid_qual_f = None
if invalid_qual_file and not collect_results:
self.invalid_qual_f = open(invalid_qual_file+'_{}'.format(self._idx), self.write_mode, newline='')
self.invalid_qual_wr = csv.writer(
self.invalid_qual_f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
if collect_results and collector_batch_size > 1:
self.collector_batch_cnt = 0
self.collector_nrows_batch = [ ]
self.collector_erows_batch = [ ]
self.collector_qrows_batch = [ ]
self.collector_invalid_erows_batch = [ ]
self.collector_invalid_qrows_batch = [ ]
self.collector_description_erows_batch = [ ]
self.collector_sitelink_erows_batch = [ ]
self.process_row_data = \
node_file or \
entry_type_edges or \
label_edges or \
alias_edges or \
descr_edges
def exit(self, *args, **kwargs):
print("Exiting worker process {} (pid {}).".format(self._idx, os.getpid()), file=sys.stderr, flush=True)
if collect_results:
if collector_batch_size > 1:
if len(self.collector_nrows_batch) > 0 or \
len(self.collector_erows_batch) > 0 or \
len(self.collector_qrows_batch) > 0 or \
len(self.collector_invalid_erows_batch) > 0 or \
len(self.collector_invalid_qrows_batch) > 0:
if collect_seperately:
if len(self.collector_nrows_batch) > 0:
node_collector_q.put(("rows", self.collector_nrows_batch, [], [], [], [], None))
if len(self.collector_erows_batch) > 0:
edge_collector_q.put(("rows", [], self.collector_erows_batch, [], [], [], None))
if len(self.collector_qrows_batch) > 0:
qual_collector_q.put(("rows", [], [], self.collector_qrows_batch, [], [], None))
if len(self.collector_invalid_erows_batch) > 0:
invalid_edge_collector_q.put(("rows", [], [], [], self.collector_invalid_erows_batch, [], None))
if len(self.collector_invalid_qrows_batch) > 0:
invalid_qual_collector_q.put(("rows", [], [], [], [], self.collector_invalid_qrows_batch, None))
if len(self.collector_description_erows_batch) > 0:
description_collector_q.put(("rows", [], self.collector_description_erows_batch, [], [], [], None))
if len(self.collector_sitelink_erows_batch) > 0:
sitelink_collector_q.put(("rows", [], self.collector_sitelink_erows_batch, [], [], [], None))
else:
collector_q.put(("rows",
self.collector_nrows_batch,
self.collector_erows_batch,
self.collector_qrows_batch,
self.collector_invalid_erows_batch,
self.collector_invalid_qrows_batch,
None))
else:
if self.node_f is not None:
self.node_f.close()
if self.edge_f is not None:
self.edge_f.close()
if self.qual_f is not None:
self.qual_f.close()
if self.invalid_edge_f is not None:
self.invalid_edge_f.close()
if self.invalid_qual_f is not None:
self.invalid_qual_f.close()
def erows_append(self, erows, edge_id, node1, label, node2,
rank="",
magnitude="",
unit="",
date="",
item="",
lower="",
upper="",
latitude="",
longitude="",
wikidatatype="",
claim_id="",
claim_type="",
val_type="",
entity_type="",
datahash="",
precision="",
calendar="",
entrylang="",
invalid_erows=None,
)->bool:
if len(claim_type) > 0 and claim_type != "statement":
raise ValueError("Unexpected claim type %s" % claim_type)
values_are_valid: bool = True
if clean_input_values:
error_buffer: io.StringIO = io.StringIO()
kv: KgtkValue
kv = KgtkValue(edge_id, options=value_options, error_file=error_buffer, verbose=clean_verbose)
values_are_valid &= kv.is_valid()
if kv.repaired:
edge_id = kv.value
kv = KgtkValue(node1, options=value_options, error_file=error_buffer, verbose=clean_verbose)
values_are_valid &= kv.is_valid()
if kv.repaired:
node1 = kv.value
kv = KgtkValue(label, options=value_options, error_file=error_buffer, verbose=clean_verbose)
values_are_valid &= kv.is_valid()
if kv.repaired:
label = kv.value
kv = KgtkValue(node2, options=value_options, error_file=error_buffer, verbose=clean_verbose)
values_are_valid &= kv.is_valid()
if kv.repaired:
node2 = kv.value
if not values_are_valid and invalid_erows is not None:
erows = invalid_erows
if not values_are_valid and clean_verbose:
print("Value validation error in edge %s: %s" % ("|".join([repr(edge_id), repr(node1), repr(label), repr(node2)]),
error_buffer.getvalue().rstrip()),
file=sys.stderr, flush=True)
error_buffer.close()
if explode_values:
erows.append([edge_id,
node1,
label,
node2,
rank,
magnitude,
unit,
date,
item,
lower,
upper,
latitude,
longitude,
precision,
calendar,
entity_type,
wikidatatype,
entrylang,
]
)
else:
erows.append([edge_id,
node1,
label,
node2,
rank,
wikidatatype,
claim_id,
# claim_type,
val_type,
entity_type,
datahash,
precision,
calendar,
entrylang,
]
)
return values_are_valid
def qrows_append(self, qrows, edge_id, node1, label, node2,
magnitude="",
unit="",
date="",
item="",
lower="",
upper="",
latitude="",
longitude="",
wikidatatype="",
val_type="",
entity_type="",
datahash="",
precision="",
calendar="",
invalid_qrows=None,
erows=None,
invalid_erows=None,
)->bool:
values_are_valid: bool = True
if clean_input_values:
error_buffer: io.StringIO = io.StringIO()
kv: KgtkValue
kv = KgtkValue(edge_id, options=value_options, error_file=error_buffer, verbose=clean_verbose)
values_are_valid &= kv.is_valid()
if kv.repaired:
edge_id = kv.value
kv = KgtkValue(node1, options=value_options, error_file=error_buffer, verbose=clean_verbose)
values_are_valid &= kv.is_valid()
if kv.repaired:
node1 = kv.value
kv = KgtkValue(label, options=value_options, error_file=error_buffer, verbose=clean_verbose)
values_are_valid &= kv.is_valid()
if kv.repaired:
label = kv.value
kv = KgtkValue(node2, options=value_options, error_file=error_buffer, verbose=clean_verbose)
values_are_valid &= kv.is_valid()
if kv.repaired:
node2 = kv.value
if not values_are_valid and invalid_qrows is not None:
qrows = invalid_qrows
if not values_are_valid and clean_verbose:
print("Value validation error in qual %s: %s" % ("|".join([repr(edge_id), repr(node1), repr(label), repr(node2)]),
error_buffer.getvalue().rstrip()),
file=sys.stderr, flush=True)
error_buffer.close()
if minimal_qual_file is not None or detailed_qual_file is not None:
if explode_values:
qrows.append([edge_id,
node1,
label,
node2,
magnitude,
unit,
date,
item,
lower,
upper,
latitude,
longitude,
precision,
calendar,
entity_type,
wikidatatype,
])
else:
qrows.append([edge_id,
node1,
label,
node2,
wikidatatype,
val_type,
entity_type,
datahash,
precision,
calendar,
])
if interleave:
self.erows_append(erows,
edge_id=edge_id,
node1=node1,
label=label,
node2=node2,
magnitude=magnitude,
unit=unit,
date=date,
item=item,
lower=lower,
upper=upper,
latitude=latitude,
longitude=longitude,
wikidatatype=wikidatatype,
entity_type=entity_type,
datahash=datahash,
precision=precision,
calendar=calendar,
invalid_erows=invalid_erows)
return values_are_valid
# def process(self,line,node_file,edge_file,qual_file,languages,source):
def process(self, line):
if progress_interval > 0 and self.cnt % progress_interval == 0 and self.cnt>0:
print("{} lines processed by processor {}".format(self.cnt,self._idx), file=sys.stderr, flush=True)
self.cnt+=1
# csv_line_terminator = "\n" if os.name == 'posix' else "\r\n"
nrows=[]
erows=[]
qrows=[]
invalid_erows = [] if invalid_edge_file is not None else None
invalid_qrows = [] if invalid_qual_file is not None else None
description_erows = []
sitelink_erows = []
# These maps avoid avoid ID collisions due to hash collision or
# repeated values in the input data. We assume that a top-level
# property (obj["id"]) will not occur in multiple input lines.
alias_id_collision_map: typing.MutableMapping[str, int] = dict()
edge_id_collision_map: typing.MutableMapping[str, int] = dict()
qual_id_collision_map: typing.MutableMapping[str, int] = dict()
sitelink_id_collision_map: typing.MutableMapping[str, int] = dict()
clean_line = line.strip()
if clean_line.endswith(b","):
clean_line = clean_line[:-1]
if len(clean_line) > 1:
obj = json.loads(clean_line)
entry_type = obj["type"]
keep: bool = False
if entry_type == "item" or entry_type == "property":
keep = True
elif warn_if_missing:
print("Unknown object type {}.".format(entry_type), file=sys.stderr, flush=True)
if self.process_row_data and keep:
row = []
qnode = obj["id"]
row.append(qnode)
if parse_labels:
labels = obj.get("labels")
if labels is None:
if fail_if_missing:
raise KGTKException("Qnode %s is missing its labels" % qnode)
elif warn_if_missing:
print("Object id {} has no labels.".format(qnode), file=sys.stderr, flush=True)
label_list=[]
if labels:
if all_languages:
label_languages = labels.keys()
else:
label_languages = languages
for lang in label_languages:
lang_label = labels.get(lang, None)
if lang_label:
# We needn't worry about duplicate label entries if this check passes.
if lang_label['language'] != lang:
print("*** Conflicting language key %s for the %s label for %s" % (repr(lang_label['language']), repr(lang), qnode),
file=sys.stderr, flush=True)
# lang_label['value']=lang_label['value'].replace('|','\\|')
# label_list.append('\'' + lang_label['value'].replace("'","\\'") + '\'' + "@" + lang)
value = KgtkFormat.stringify(lang_label['value'], language=lang)
label_list.append(value)
if label_edges:
langid: str = qnode + '-' + LABEL_LABEL + '-' + lang
self.erows_append(erows,
edge_id=langid,
node1=qnode,
label=LABEL_LABEL,
node2=value,
entrylang=lang,
invalid_erows=invalid_erows)
if not node_id_only:
if len(label_list)>0:
row.append("|".join(label_list))
else:
row.append("")
if not node_id_only:
row.append(entry_type)
if entry_type_edges:
typeid: str = qnode + '-' + TYPE_LABEL + '-' + entry_type
self.erows_append(erows,
edge_id=typeid,
node1=qnode,
label=TYPE_LABEL,
node2=entry_type,
invalid_erows=invalid_erows)
if parse_descr:
descriptions = obj.get("descriptions")
if descriptions is None:
if fail_if_missing:
raise KGTKException("Qnode %s is missing its descriptions" % qnode)
elif warn_if_missing:
print("Object id {} has no descriptions.".format(qnode), file=sys.stderr, flush=True)
descr_list=[]
if descriptions:
if all_languages:
desc_languages = descriptions.keys()
else:
desc_languages = languages
for lang in desc_languages:
lang_descr = descriptions.get(lang, None)
if lang_descr:
# We needn't worry about duplicate description entries if this check passes.
if lang_descr['language'] != lang:
print("*** Conflicting language key %s for the %s description for %s" % (repr(lang_descr['language']), repr(lang), qnode),
file=sys.stderr, flush=True)
# lang_descr['value']=lang_descr['value'].replace('|','\\|')
# descr_list.append('\'' + lang_descr['value'].replace("'","\\'") + '\'' + "@" + lang)
value = KgtkFormat.stringify(lang_descr['value'], language=lang)
descr_list.append(value)
if descr_edges:
descrid: str = qnode + '-' + DESCRIPTION_LABEL + '-' + lang
self.erows_append(description_erows if collect_seperately else erows,
edge_id=descrid,
node1=qnode,
label=DESCRIPTION_LABEL,
node2=value,
entrylang=lang,
invalid_erows=invalid_erows)
if not node_id_only:
if len(descr_list)>0:
row.append("|".join(descr_list))
else:
row.append("")
if parse_aliases:
aliases = obj.get("aliases")
if aliases is None:
if fail_if_missing:
raise KGTKException("Qnode %s is missing its aliases" % qnode)
elif warn_if_missing:
print("Object id {} has no aliasees.".format(qnode), file=sys.stderr, flush=True)
alias_list = []
if aliases:
if all_languages:
alias_languages = aliases.keys()
else:
alias_languages = languages
for lang in alias_languages:
lang_aliases = aliases.get(lang, None)
if lang_aliases:
for item in lang_aliases:
# item['value']=item['value'].replace('|','\\|')
# alias_list.append('\'' + item['value'].replace("'","\\'") + '\'' + "@" + lang)
value = KgtkFormat.stringify(item['value'], language=lang)
alias_list.append(value)
if alias_edges:
# Hash the value to save space and avoid syntactic difficulties.
# Take a subset of the hash value to save space.
alias_value_hash: str = hashlib.sha256(value.encode('utf-8')).hexdigest()[:value_hash_width]
aliasid = qnode + '-' + ALIAS_LABEL + "-" + lang + '-' + alias_value_hash
alias_seq_no: int # In case of hash collision
if aliasid in alias_id_collision_map:
alias_seq_no = alias_id_collision_map[aliasid]
print("\n*** Alias collision #%d detected for %s (%s)" % (alias_seq_no, aliasid, value), file=sys.stderr, flush=True)
else:
alias_seq_no = 0
alias_id_collision_map[aliasid] = alias_seq_no + 1
aliasid += '-' + str(alias_seq_no)
self.erows_append(erows,
edge_id=aliasid,
node1=qnode,
label=ALIAS_LABEL,
node2=value,
entrylang=lang,
invalid_erows=invalid_erows)
if not node_id_only:
if len(alias_list)>0:
row.append("|".join(alias_list))
else:
row.append("")
datatype = obj.get("datatype", "")
if not node_id_only:
row.append(datatype)
if len(datatype) > 0 and datatype_edges:
datatypeid: str = qnode + '-' + "datatype"
# We expect the datatype to be a valid KGTK symbol, so
# there's no need to stringify it.
self.erows_append(erows,
edge_id=datatypeid,
node1=qnode,
label=DATATYPE_LABEL,
node2=datatype,
invalid_erows=invalid_erows)
#row.append(source)
if node_file:
nrows.append(row)
if parse_claims and "claims" not in obj:
if fail_if_missing:
raise KGTKException("Qnode %s is missing its claims" % obj.get("id", "<UNKNOWN>"))
elif warn_if_missing:
print("Object id {} is missing its claims.".format(obj.get("id", "<UNKNOWN>")), file=sys.stderr, flush=True)
if parse_claims and "claims" in obj:
claims = obj["claims"]
if keep:
qnode = obj.get("id", "")
if len(qnode) == 0:
if fail_if_missing:
raise KGTKException("A claim is missing its Qnode id.")
elif warn_if_missing:
print("A claim is missing its Qnode id", file=sys.stderr, flush=True)
qnode = "UNKNOWN" # This will cause trouble down the line.
for prop, claim_property in claims.items():
for cp in claim_property:
if (deprecated or cp['rank'] != 'deprecated'):
mainsnak = cp['mainsnak']
snaktype = mainsnak.get(MAINSNAK_SNAKTYPE)
rank=cp['rank']
claim_id = cp['id']
claim_type = cp['type']
if claim_type != CLAIM_TYPE_STATEMENT:
print("Unknown claim type %s, ignoring claim_property for (%s, %s)." % (repr(claim_type), repr(qnode), repr(prop)),
file=sys.stderr, flush=True)
continue
if snaktype is None:
print("Mainsnak without snaktype, ignoring claim_property for (%s, %s)." % (repr(qnode), repr(prop)),
file=sys.stderr, flush=True)
continue
if snaktype == SNAKTYPE_VALUE:
datavalue = mainsnak[MAINSNAK_DATAVALUE]
val = datavalue.get('value')
val_type = datavalue.get("type", "")
if val is not None:
if val_type in ("string", "wikibase-unmapped-entityid"):
if not isinstance(val, str):
print("Value type is %s but the value is not a string, ignoring claim_property for (%s, %s)." % (repr(val_type), repr(qnode), repr(prop)),
file=sys.stderr, flush=True)
continue
elif not isinstance(val, dict):
print("Value type %s is not a known string type and value is not a dict, ignoring claim_property for (%s, %s)." % (repr(val_type), repr(qnode), repr(prop)),
file=sys.stderr, flush=True)
continue
elif snaktype == SNAKTYPE_SOMEVALUE:
val = None
val_type = SOMEVALUE_VALUE
elif snaktype == SNAKTYPE_NOVALUE:
val = None
val_type = NOVALUE_VALUE
else:
print("Unknown snaktype %s, ignoring claim_property for (%s, %s)." % (repr(snaktype), repr(qnode), repr(prop)),
file=sys.stderr, flush=True)
continue
typ = mainsnak.get(MAINSNAK_DATATYPE)
if typ is None:
print("Mainsnak without datatype, ignoring claim_property for (%s, %s)" % (repr(qnode), repr(prop)),
file=sys.stderr, flush=True)
continue
# if typ != val_type:
# print("typ %s != val_type %s" % (typ, val_type), file=sys.stderr, flush=True)
value = ''
mag = ''
unit = ''
date=''
item=''
lower = ''
upper = ''
precision = ''
calendar = ''
lat = ''
long = ''
enttype = ''
if val is None:
value = val_type
elif typ.startswith(DATATYPE_WIKIBASE_PREFIX):
if isinstance(val, dict):
enttype = val.get('entity-type')
value = val.get('id', '')
else:
value = val
# TODO: Can we find something less ad-hoc to do here?
if typ == "wikibase-lexeme":
enttype = "lexeme"
else:
enttype = "unknown"
# Older Wikidata dumps do not have an 'id' here.
if len(value) == 0:
if isinstance(val, dict) and 'numeric-id' in val:
numeric_id = str(val['numeric-id'])
else:
raise ValueError("No numeric ID for datatype %s, entity type %s, in (%s, %s)." % (repr(typ), repr(enttype), repr(qnode), repr(prop)))
if enttype == "item":
value = 'Q' + numeric_id
elif enttype == "property":
value = 'P' + numeric_id
elif enttype == "lexeme":
value = 'L' + numeric_id
else:
raise ValueError('Unknown entity type %s for datatype %s in (%s, %s).' % (repr(enttype), repr(typ), repr(qnode), repr(prop)))
item=value
elif typ == DATATYPE_QUANTITY:
# Strip whitespace from the numeric fields. Some older Wikidata dumps
# (20150805-20160502) sometimes have trailing newlines in these fields.
# Convert actual numbers to strings before attempting to strip leading
# and trailing whitespace.
value = str(val['amount']).strip()
mag = value
if val.get(
'upperBound',
None) or val.get(
'lowerBound',
None):
lower = str(val.get('lowerBound', '')).strip()
upper = str(val.get('upperBound', '')).strip()
value += '[' + lower + \
',' + upper + ']'
# TODO: Don't lose the single-character unit code. At a minimum, verify that it is the value "1".
if len(val.get('unit')) > 1:
unit = val.get(
'unit').split('/')[-1]
if unit not in ["undefined"]:
# TODO: don't lose track of "undefined" units.
value += unit
elif typ == DATATYPE_GLOBECOORDINATE:
# Strip potential leading and trailing whitespace.
lat = str(val['latitude']).strip()
long = str(val['longitude']).strip()
precision = str(val.get('precision', ''))
value = '@' + lat + '/' + long
# TODO: what about "globe"?
elif typ == DATATYPE_TIME:
if val['time'][0]=='-':
pre="^-"
else:
pre="^"
# TODO: Maybe strip leading and traiming whitespace here?
date = pre + val['time'][1:]
# Cautiously strip leading and trailing whitespace from precision?
precision = str(val['precision']).strip()
calendar = val.get('calendarmodel', '').split('/')[-1]
value = date + '/' + precision
elif typ == DATATYPE_MONOLINGUALTEXT:
# value = '\'' + \
# val['text'].replace("'","\\'").replace("|", "\\|") + '\'' + '@' + val['language']
value = KgtkFormat.stringify(val['text'], language=val['language'])
else:
# value = '\"' + val.replace('"','\\"').replace("|", "\\|") + '\"'
value = KgtkFormat.stringify(val)
if minimal_edge_file is not None or detailed_edge_file is not None:
prop_value_hash: str
if value.startswith(('P', 'Q')):
prop_value_hash = value
else:
prop_value_hash = hashlib.sha256(value.encode('utf-8')).hexdigest()[:value_hash_width]
edgeid: str = qnode + '-' + prop + '-' + prop_value_hash + '-'
if claim_id_hash_width == 0:
edgeid += claim_id.lower()
else:
edgeid += hashlib.sha256(claim_id.lower().encode('utf-8')).hexdigest()[:claim_id_hash_width]
prop_seq_no: int # In case of hash collision
if edgeid in edge_id_collision_map:
prop_seq_no = edge_id_collision_map[edgeid]
print("\n*** Edge collision #%d detected for %s (%s)" % (prop_seq_no, edgeid, value), file=sys.stderr, flush=True)
else:
prop_seq_no = 0
edge_id_collision_map[edgeid] = prop_seq_no + 1
edgeid += '-' + str(prop_seq_no)
self.erows_append(erows,
edge_id=edgeid,
node1=qnode,
label=prop,
node2=value,
rank=rank,
magnitude=mag,
unit=unit,
date=date,
item=item,
lower=lower,
upper=upper,
latitude=lat,
longitude=long,
wikidatatype=typ,
claim_id=claim_id,
claim_type=claim_type,
val_type=val_type,
entity_type=enttype,
precision=precision,
calendar=calendar,
invalid_erows=invalid_erows)
if minimal_qual_file is not None or detailed_qual_file is not None or interleave:
if cp.get('qualifiers', None):
quals = cp['qualifiers']
for qual_prop, qual_claim_property in quals.items():
for qcp in qual_claim_property:
snaktype = qcp[MAINSNAK_SNAKTYPE]
if snaktype == SNAKTYPE_VALUE:
datavalue = qcp[MAINSNAK_DATAVALUE]
val = datavalue.get('value')
val_type = datavalue.get("type", "")
elif snaktype == SNAKTYPE_SOMEVALUE:
val = None
val_type = SOMEVALUE_VALUE
elif snaktype == SNAKTYPE_NOVALUE:
val = None
val_type = NOVALUE_VALUE
else:
raise ValueError("Unknown qualifier snaktype %s" % repr(snaktype))
if True:
value = ''
mag = ''
unit = ''
date= ''
item=''
lower = ''
upper = ''
precision = ''
calendar = ''
lat = ''
long = ''
enttype = ''
datahash = '"' + qcp['hash'] + '"'
typ = qcp.get(MAINSNAK_DATATYPE)
if typ is None:
if fail_if_missing:
raise KGTKException("Found qualifier %s without a datatype for (%s, %s)" % (repr(qual_prop), repr(qnode), repr(prop)))
elif warn_if_missing:
if val_type == SOMEVALUE_VALUE:
print("Somevalue qualifier %s without a datatype for (%s, %s)" % (repr(qual_prop), repr(qnode), repr(prop)), file=sys.stderr, flush=True)
elif val_type == NOVALUE_VALUE:
print("Novalue qualifier %s without a datatype for (%s, %s)" % (repr(qual_prop), repr(qnode), repr(prop)), file=sys.stderr, flush=True)
else:
print("Found qualifier %s without a datatype for (%s, %s)" % (repr(qual_prop), repr(qnode), repr(prop)), file=sys.stderr, flush=True)
continue
if val is None:
value = val_type
elif typ.startswith(DATATYPE_WIKIBASE_PREFIX):
if isinstance(val, dict):
enttype = val.get('entity-type')
value = val.get('id', '')
else:
value = val
if typ == "wikibase-lexeme":
enttype = "lexeme"
else:
enttype = "unknown"
# Older Wikidata dumps do not have an 'id' here.
if len(value) == 0:
if isinstance(val, dict) and 'numeric-id' in val:
numeric_id = str(val['numeric-id'])
else:
raise ValueError("No numeric ID for datatype %s, entity type %s, in (%s, %s)." % (repr(typ), repr(enttype), repr(qnode), repr(prop)))
if enttype == "item":
value = 'Q' + numeric_id
elif enttype == "property":
value = 'P' + numeric_id
elif enttype == "lexeme":
value = 'L' + numeric_id
else:
raise ValueError('Unknown entity type %s for datatype %s in (%s, %s).' % (repr(enttype), repr(typ), repr(qnode), repr(prop)))
item=value
elif typ == DATATYPE_QUANTITY:
value = val['amount']
mag = val['amount']
if val.get(
'upperBound',
None) or val.get(
'lowerBound',
None):
lower = val.get(
'lowerBound', '')
upper = val.get(
'upperBound', '')
value += '[' + lower + \
',' + upper + ']'
if len(
val.get('unit')) > 1:
unit = val.get(
'unit').split('/')[-1]
value += unit
elif typ == DATATYPE_GLOBECOORDINATE:
lat = str(
val['latitude'])
long = str(
val['longitude'])
precision = str(val.get(
'precision', ''))
value = '@' + lat + '/' + long
elif typ == DATATYPE_TIME:
if val['time'][0]=='-':
pre="^-"
else:
pre="^"
date = pre + \
val['time'][1:]
precision = str(
val['precision'])
calendar = val.get(
'calendarmodel', '').split('/')[-1]
value = pre + \
val['time'][1:] + '/' + str(val['precision'])
elif typ == DATATYPE_MONOLINGUALTEXT:
# value = '\'' + \
# val['text'].replace("'","\\'") + '\'' + '@' + val['language']
value = KgtkFormat.stringify(val['text'], language=val['language'])
else:
# value = '\"' + val.replace('"','\\"') + '\"'
value = KgtkFormat.stringify(val)
qual_value_hash: str
if value.startswith(('P', 'Q')):
qual_value_hash = value
else:
qual_value_hash = hashlib.sha256(value.encode('utf-8')).hexdigest()[:value_hash_width]
qualid: str = edgeid + '-' + qual_prop + '-' + qual_value_hash
qual_seq_no: int # In case of hash collision
if qualid in qual_id_collision_map:
qual_seq_no = qual_id_collision_map[qualid]
print("\n*** Qualifier collision #%d detected for %s (%s)" % (qual_seq_no, qualid, value), file=sys.stderr, flush=True)
else:
qual_seq_no = 0
qual_id_collision_map[qualid] = qual_seq_no + 1
qualid += '-' + str(qual_seq_no)
self.qrows_append(qrows=qrows,
edge_id=qualid,
node1=edgeid,
label=qual_prop,
node2=value,
magnitude=mag,
unit=unit,
date=date,
item=item,
lower=lower,
upper=upper,
latitude=lat,
longitude=long,
wikidatatype=typ,
entity_type=enttype,
datahash=datahash,
precision=precision,
calendar=calendar,
invalid_qrows=invalid_qrows,
erows=erows,
invalid_erows=invalid_erows)
if parse_sitelinks:
sitelinks=obj.get('sitelinks',None)
else:
sitelinks = None
if sitelinks:
for link in sitelinks:
# TODO: If the title might contain vertical bar, more work is needed
# to make the sitetitle safe for KGTK.
if link.endswith('wiki') and link not in ('commonswiki', 'simplewiki'):
linklabel = SITELINK_LABEL
sitetitle='_'.join(sitelinks[link]['title'].split())
# The following leads to ambuiguity if there are both
# "afwiki" and "afwikibooks".
#
# TODO: Need to research the sitelink structure more fully.
sitelang=link.split('wiki')[0].replace('_','-')
sitelink='http://'+sitelang+'.wikipedia.org/wiki/'+sitetitle
else:
linklabel = ADDL_SITELINK_LABEL
sitetitle='_'.join(sitelinks[link]['title'].split())
if "wiki" in link:
# TODO: needs more work here.
sitelang=link.split("wiki")[0]
if sitelang in ("commons", "simple"):
sitelang = "en" # TODO: Need to retain the distinction we lose here.
else:
sitelang=""
sitehost=link+'.org' # TODO: Needs more work here
sitelink = 'http://'+sitehost+'/wiki/'+sitetitle
if sitelink is not None:
serows = sitelink_erows if collect_seperately else erows
sitelink_value_hash: str = hashlib.sha256(sitelink.encode('utf-8')).hexdigest()[:value_hash_width]
sitelinkid: str = qnode + '-' + linklabel + '-' + sitelink_value_hash
sitelink_seq_no: int = 0
if sitelinkid in sitelink_id_collision_map:
sitelink_seq_no = sitelink_id_collision_map[sitelinkid]
print("\n*** Sitelink collision #%d detected for %s (%s)" % (sitelink_seq_no, sitelinkid, sitelink), file=sys.stderr, flush=True)
else:
sitelink_seq_no = 0
sitelink_id_collision_map[sitelinkid] = sitelink_seq_no + 1
sitelinkid += '-' + str(sitelink_seq_no)
if sitelink_edges:
self.erows_append(serows,
edge_id=sitelinkid,
node1=qnode,
label=linklabel,
node2=sitelink,
entrylang=sitelang,
invalid_erows=invalid_erows)
if sitelink_verbose_edges:
if len(sitelang) > 0:
self.erows_append(serows,
edge_id=sitelinkid + '-language-0',
node1=sitelinkid,
label=SITELINK_LANGUAGE_LABEL,
node2=sitelang,
entrylang=sitelang,
invalid_erows=invalid_erows)
self.erows_append(serows,
edge_id=sitelinkid + '-site-0',
node1=sitelinkid,
label=SITELINK_SITE_LABEL,
node2=link,
entrylang=sitelang,
invalid_erows=invalid_erows)
self.erows_append(serows,
edge_id=sitelinkid + '-title-0',
node1=sitelinkid,
label=SITELINK_TITLE_LABEL,
node2=KgtkFormat.stringify(sitelinks[link]['title']),
entrylang=sitelang, invalid_erows=invalid_erows)
for badge in sitelinks[link]['badges']:
badgeid = sitelinkid + '-badge-' + badge
self.erows_append(serows,
edge_id=badgeid,
node1=sitelinkid,
label=SITELINK_BADGE_LABEL,
node2=badge,
entrylang=sitelang,
invalid_erows=invalid_erows)
if sitelink_verbose_qualifiers:
if len(sitelang) > 0:
self.qrows_append(qrows,
edge_id=sitelinkid + '-language-0',
node1=sitelinkid,
label=SITELINK_LANGUAGE_LABEL,
node2=sitelang,
invalid_qrows=invalid_qrows,
erows=erows,
invalid_erows=invalid_erows)
self.qrows_append(qrows,
edge_id=sitelinkid + '-site-0',
node1=sitelinkid,
label=SITELINK_SITE_LABEL,
node2=link,
invalid_qrows=invalid_qrows,
erows=erows,
invalid_erows=invalid_erows)
self.qrows_append(qrows,
edge_id=sitelinkid + '-title-0',
node1=sitelinkid,
label=SITELINK_TITLE_LABEL,
node2=KgtkFormat.stringify(sitelinks[link]['title']),
invalid_qrows=invalid_qrows,
erows=erows,
invalid_erows=invalid_erows)
for badge in sitelinks[link]['badges']:
badgeid = sitelinkid + '-badge-' + badge
self.qrows_append(qrows,
edge_id=badgeid,
node1=sielinkid,
label=SITELINK_BADGE_LABEL,
node2=badge,
invalid_qrows=invalid_qrows,
erows=erows,
invalid_erows=invalid_erows)
if len(nrows) > 0 or \
len(erows) > 0 or \
len(qrows) > 0 or \
len(invalid_erows) > 0 or \
len(invalid_qrows) > 0 or \
len(description_erows) > 0 or \
len(sitelink_erows) > 0:
if collect_results:
if collector_batch_size == 1:
if collect_seperately:
if len(nrows) > 0 and node_collector_q is not None:
node_collector_q.put(("rows", nrows, [], [], [], [], None))
if len(erows) > 0 and edge_collector_q is not None:
edge_collector_q.put(("rows", [], erows, [], [], [], None))
if len(qrows) > 0 and qual_collector_q is not None:
qual_collector_q.put(("rows", [], [], qrows, [], [], None))
if invalid_erows is not None and len(invalid_erows) > 0 and invalid_edge_collector_q is not None:
invalid_edge_collector_q.put(("rows", [], [], [], invalid_erows, [], None))
if invalid_qrows is not None and len(invalid_qrows) > 0 and invalid_qual_collector_q is not None:
invalid_qual_collector_q.put(("rows", [], [], [], [], invalid_qrows, None))
if len(description_erows) > 0 and description_collector_q is not None:
description_collector_q.put(("rows", [], description_erows, [], [], [], None))
if len(sitelink_erows) > 0 and sitelink_collector_q is not None:
sitelink_collector_q.put(("rows", [], sitelink_erows, [], [], [], None))
elif collector_q is not None:
collector_q.put(("rows", nrows, erows, qrows, invalid_erows, invalid_qrows, None))
else:
self.collector_nrows_batch.extend(nrows)
self.collector_erows_batch.extend(erows)
self.collector_qrows_batch.extend(qrows)
if invalid_erows is not None:
self.collector_invalid_erows_batch.extend(invalid_erows)
if invalid_qrows is not None:
self.collector_invalid_qrows_batch.extend(invalid_qrows)
if collect_seperately:
self.collector_description_erows_batch.extend(description_erows)
self.collector_sitelink_erows_batch.extend(sitelink_erows)
self.collector_batch_cnt += 1
if self.collector_batch_cnt >= collector_batch_size:
if collect_seperately:
if len(self.collector_nrows_batch) > 0 and node_collector_q is not None:
node_collector_q.put(("rows", self.collector_nrows_batch, [], [], [], [], None))
if len(self.collector_erows_batch) > 0 and edge_collector_q is not None:
edge_collector_q.put(("rows", [], self.collector_erows_batch, [], [], [], None))
if len(self.collector_qrows_batch) > 0 and qual_collector_q is not None:
qual_collector_q.put(("rows", [], [], self.collector_qrows_batch, [], [], None))
if len(self.collector_invalid_erows_batch) > 0 and invalid_edge_collector_q is not None:
invalid_edge_collector_q.put(("rows", [], [], [], self.collector_invalid_erows_batch, [], None))
if len(self.collector_invalid_qrows_batch) > 0 and invalid_qual_collector_q is not None:
invalid_qual_collector_q.put(("rows", [], [], [], [], self.collector_invalid_qrows_batch, None))
if len(self.collector_description_erows_batch) > 0 and description_collector_q is not None:
description_collector_q.put(("rows", [], self.collector_description_erows_batch, [], [], [], None))
self.collector_description_erows_batch.clear()
if len(self.collector_sitelink_erows_batch) > 0 and sitelink_collector_q is not None:
sitelink_collector_q.put(("rows", [], self.collector_sitelink_erows_batch, [], [], [], None))
self.collector_sitelink_erows_batch.clear()
elif collector_q is not None:
collector_q.put(("rows",
self.collector_nrows_batch,
self.collector_erows_batch,
self.collector_qrows_batch,
self.collector_invalid_erows_batch,
self.collector_invalid_qrows_batch,
None))
self.collector_nrows_batch.clear()
self.collector_erows_batch.clear()
self.collector_qrows_batch.clear()
self.collector_invalid_erows_batch.clear()
self.collector_invalid_qrows_batch.clear()
self.collector_batch_cnt = 0
else:
if node_file:
for row in nrows:
self.node_wr.writerow(row)
if detailed_edge_file:
for row in erows:
if skip_validation or validate(row, "detailed edge uncollected"):
self.edge_wr.writerow(row)
if detailed_qual_file:
for row in qrows:
if skip_validation or validate(row, "detailed qual uncollected"):
self.qual_wr.writerow(row)
if invalid_edge_file:
for row in invalid_erows:
self.invalid_edge_wr.writerow(row)
if invalid_qual_file:
for row in invalid_qrows:
self.invalid_qual_wr.writerow(row)
class MyCollector:
def __init__(self):
# Prepare to use the collector.
self.node_f: typing.Optional[typing.TextIO] = None
self.node_wr = None
self.nrows: int = 0
self.minimal_edge_f: typing.Optional[typing.TextIO] = None
self.minimal_edge_wr = None
self.detailed_edge_f: typing.Optional[typing.TextIO] = None
self.detailed_edge_wr = None
self.erows: int = 0
self.minimal_qual_f: typing.Optional[typing.TextIO] = None
self.minimal_qual_wr = None
self.detailed_qual_f: typing.Optional[typing.TextIO] = None
self.detailed_qual_wr = None
self.qrows: int = 0
self.invalid_edge_f: typing.Optional[typing.TextIO] = None
self.invalid_edge_wr = None
self.invalid_erows: int = 0
self.invalid_qual_f: typing.Optional[typing.TextIO] = None
self.invalid_qual_wr = None
self.invalid_qrows: int = 0
self.split_alias_f: typing.Optional[typing.TextIO] = None
self.split_alias_wr = None
self.n_alias_rows: int = 0
self.split_en_alias_f: typing.Optional[typing.TextIO] = None
self.split_en_alias_wr = None
self.n_en_alias_rows: int = 0
self.split_datatype_f: typing.Optional[typing.TextIO] = None
self.split_datatype_wr = None
self.n_datatype_rows: int = 0
self.split_description_f: typing.Optional[typing.TextIO] = None
self.split_description_wr = None
self.n_description_rows: int = 0
self.split_en_description_f: typing.Optional[typing.TextIO] = None
self.split_en_description_wr = None
self.n_en_description_rows: int = 0
self.split_label_f: typing.Optional[typing.TextIO] = None
self.split_label_wr = None
self.n_label_rows: int = 0
self.split_en_label_f: typing.Optional[typing.TextIO] = None
self.split_en_label_wr = None
self.n_en_label_rows: int = 0
self.split_sitelink_f: typing.Optional[typing.TextIO] = None
self.split_sitelink_wr = None
self.n_sitelink_rows: int = 0
self.split_en_sitelink_f: typing.Optional[typing.TextIO] = None
self.split_en_sitelink_wr = None
self.n_en_sitelink_rows: int = 0
self.split_type_f: typing.Optional[typing.TextIO] = None
self.split_type_wr = None
self.n_type_rows: int = 0
self.split_property_edge_f: typing.Optional[typing.TextIO] = None
self.split_property_edge_wr = None
self.n_property_edge_rows: int = 0
self.split_property_qual_f: typing.Optional[typing.TextIO] = None
self.split_property_qual_wr = None
self.n_property_qual_rows: int = 0
self.process_split_files: bool = False
self.setup_split_dispatcher()
self.cnt: int = 0
self.started: bool = False
def run(self,
collector_q,
who: str):
print("The %s collector is starting (pid %d)." % (who, os.getpid()), file=sys.stderr, flush=True)
while True:
action, nrows, erows, qrows, invalid_erows, invalid_qrows, header = collector_q.get()
# print("Collector action %s." % action, file=sys.stderr, flush=True)
if action == "rows":
self.collect(nrows, erows, qrows, invalid_erows, invalid_qrows, who)
elif action == "node_header":
self.open_node_file(header, who)
elif action == "minimal_edge_header":
self.open_minimal_edge_file(header, who)
self.process_split_files = True
elif action == "detailed_edge_header":
self.open_detailed_edge_file(header, who)
elif action == "minimal_qual_header":
self.open_minimal_qual_file(header, who)
elif action == "detailed_qual_header":
self.open_detailed_qual_file(header, who)
elif action == "invalid_edge_header":
self.open_invalid_edge_file(header, who)
elif action == "invalid_qual_header":
self.open_invalid_qual_file(header, who)
elif action == "split_alias_header":
self.open_split_alias_file(header, who)
self.process_split_files = True
elif action == "split_en_alias_header":
self.open_split_en_alias_file(header, who)
self.process_split_files = True
elif action == "split_datatype_header":
self.open_split_datatype_file(header, who)
self.process_split_files = True
elif action == "split_description_header":
self.open_split_description_file(header, who)
self.process_split_files = True
elif action == "split_en_description_header":
self.open_split_en_description_file(header, who)
self.process_split_files = True
elif action == "split_label_header":
self.open_split_label_file(header, who)
self.process_split_files = True
elif action == "split_en_label_header":
self.open_split_en_label_file(header, who)
self.process_split_files = True
elif action == "split_sitelink_header":
self.open_split_sitelink_file(header, who)
self.process_split_files = True
elif action == "split_en_sitelink_header":
self.open_split_en_sitelink_file(header, who)
self.process_split_files = True
elif action == "split_type_header":
self.open_split_type_file(header, who)
self.process_split_files = True
elif action == "split_property_edge_header":
self.open_split_property_edge_file(header, who)
self.process_split_files = True
elif action == "split_property_qual_header":
self.open_split_property_qual_file(header, who)
elif action == "shutdown":
self.shutdown(who)
break
def _open_file(self, the_file: typing.Optional[str], header: typing.List[str], file_type: str, who: str):
if the_file is None or len(the_file) == 0:
raise ValueError("%s header without a %s file in the %s collector." % (file_type, file_type, who))
f: typing.Optional[typing.TextIO]
wr: typing.Any
if use_kgtkwriter:
from kgtk.io.kgtkwriter import KgtkWriter
print("Opening the %s file in the %s collector with KgtkWriter: %s" % (file_type, who, the_file), file=sys.stderr, flush=True)
wr = KgtkWriter.open(header, Path(the_file), who=who + " collector", use_mgzip=use_mgzip_for_output, mgzip_threads=mgzip_threads_for_output)
return None, wr
else:
print("Opening the %s file in the %s collector with csv.writer." % (file_type, who), file=sys.stderr, flush=True)
csv_line_terminator = "\n" if os.name == 'posix' else "\r\n"
f = open(the_file, "w", newline='')
wr = csv.writer(
f,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
wr.writerow(header)
return f, wr
def open_node_file(self, header: typing.List[str], who: str):
self.node_f, self.node_wr = self._open_file(node_file, header, "node", who)
def open_minimal_edge_file(self, header: typing.List[str], who: str):
self.minimal_edge_f, self.minimal_edge_wr = self._open_file(minimal_edge_file, header, "minimal edge", who)
def open_detailed_edge_file(self, header: typing.List[str], who: str):
self.detailed_edge_f, self.detailed_edge_wr = self._open_file(detailed_edge_file, header, "detailed edge", who)
def open_minimal_qual_file(self, header: typing.List[str], who: str):
self.minimal_qual_f, self.minimal_qual_wr = self._open_file(minimal_qual_file, header, "minimal qual", who)
def open_detailed_qual_file(self, header: typing.List[str], who: str):
self.detailed_qual_f, self.detailed_qual_wr = self._open_file(detailed_qual_file, header, "qual", who)
def open_invalid_edge_file(self, header: typing.List[str], who: str):
self.invalid_edge_f, self.invalid_edge_wr = self._open_file(invalid_edge_file, header, "invalid edge", who)
def open_invalid_qual_file(self, header: typing.List[str], who: str):
self.invalid_qual_f, self.invalid_qual_wr = self._open_file(invalid_qual_file, header, "qual", who)
def open_split_alias_file(self, header: typing.List[str], who: str):
self.split_alias_f, self.split_alias_wr = self._open_file(split_alias_file, header, ALIAS_LABEL, who)
def open_split_en_alias_file(self, header: typing.List[str], who: str):
self.split_en_alias_f, self.split_en_alias_wr = self._open_file(split_en_alias_file, header, "English " + ALIAS_LABEL, who)
def open_split_datatype_file(self, header: typing.List[str], who: str):
self.split_datatype_f, self.split_datatype_wr = self._open_file(split_datatype_file, header, DATATYPE_LABEL, who)
def open_split_description_file(self, header: typing.List[str], who: str):
self.split_description_f, self.split_description_wr = self._open_file(split_description_file, header, DESCRIPTION_LABEL, who)
def open_split_en_description_file(self, header: typing.List[str], who: str):
self.split_en_description_f, self.split_en_description_wr = self._open_file(split_en_description_file, header, "English " + DESCRIPTION_LABEL, who)
def open_split_label_file(self, header: typing.List[str], who: str):
self.split_label_f, self.split_label_wr = self._open_file(split_label_file, header, LABEL_LABEL, who)
def open_split_en_label_file(self, header: typing.List[str], who: str):
self.split_en_label_f, self.split_en_label_wr = self._open_file(split_en_label_file, header, "English " + LABEL_LABEL, who)
def open_split_sitelink_file(self, header: typing.List[str], who: str):
self.split_sitelink_f, self.split_sitelink_wr = self._open_file(split_sitelink_file, header, SITELINK_LABEL, who)
def open_split_en_sitelink_file(self, header: typing.List[str], who: str):
self.split_en_sitelink_f, self.split_en_sitelink_wr = self._open_file(split_en_sitelink_file, header, "English " + SITELINK_LABEL, who)
def open_split_type_file(self, header: typing.List[str], who: str):
self.split_type_f, self.split_type_wr = self._open_file(split_type_file, header, TYPE_LABEL, who)
def open_split_property_edge_file(self, header: typing.List[str], who: str):
self.split_property_edge_f, self.split_property_edge_wr = self._open_file(split_property_edge_file, header, "property edge", who)
def open_split_property_qual_file(self, header: typing.List[str], who: str):
self.split_property_qual_f, self.split_property_qual_wr = self._open_file(split_property_qual_file, header, "property qual", who)
def shutdown(self, who: str):
print("Exiting the %s collector (pid %d)." % (who, os.getpid()), file=sys.stderr, flush=True)
if use_kgtkwriter:
if self.node_wr is not None:
self.node_wr.close()
if self.minimal_edge_wr is not None:
self.minimal_edge_wr.close()
if self.detailed_edge_wr is not None:
self.detailed_edge_wr.close()
if self.invalid_edge_wr is not None:
self.invalid_edge_wr.close()
if self.minimal_qual_wr is not None:
self.minimal_qual_wr.close()
if self.detailed_qual_wr is not None:
self.detailed_qual_wr.close()
if self.invalid_qual_wr is not None:
self.invalid_qual_wr.close()
if self.split_alias_wr is not None:
self.split_alias_wr.close()
if self.split_en_alias_wr is not None:
self.split_en_alias_wr.close()
if self.split_datatype_wr is not None:
self.split_datatype_wr.close()
if self.split_description_wr is not None:
self.split_description_wr.close()
if self.split_en_description_wr is not None:
self.split_en_description_wr.close()
if self.split_label_wr is not None:
self.split_label_wr.close()
if self.split_en_label_wr is not None:
self.split_en_label_wr.close()
if self.split_sitelink_wr is not None:
self.split_sitelink_wr.close()
if self.split_en_sitelink_wr is not None:
self.split_en_sitelink_wr.close()
if self.split_type_wr is not None:
self.split_type_wr.close()
if self.split_property_edge_wr is not None:
self.split_property_edge_wr.close()
if self.split_property_edge_wr is not None:
self.split_property_edge_wr.close()
else:
if self.node_f is not None:
self.node_f.close()
if self.minimal_edge_f is not None:
self.minimal_edge_f.close()
if self.detailed_edge_f is not None:
self.detailed_edge_f.close()
if self.minimal_qual_f is not None:
self.minimal_qual_f.close()
if self.detailed_qual_f is not None:
self.detailed_qual_f.close()
if self.invalid_edge_f is not None:
self.invalid_edge_f.close()
if self.invalid_qual_f is not None:
self.invalid_qual_f.close()
if self.split_alias_f is not None:
self.split_alias_f.close()
if self.split_en_alias_f is not None:
self.split_en_alias_f.close()
if self.split_datatype_f is not None:
self.split_datatype_f.close()
if self.split_description_f is not None:
self.split_description_f.close()
if self.split_en_description_f is not None:
self.split_en_description_f.close()
if self.split_label_f is not None:
self.split_label_f.close()
if self.split_en_label_f is not None:
self.split_en_label_f.close()
if self.split_sitelink_f is not None:
self.split_sitelink_f.close()
if self.split_en_sitelink_f is not None:
self.split_en_sitelink_f.close()
if self.split_type_f is not None:
self.split_type_f.close()
if self.split_property_edge_f is not None:
self.split_property_edge_f.close()
if self.split_property_qual_f is not None:
self.split_property_qual_f.close()
print("The %s collector has closed its output files." % who, file=sys.stderr, flush=True)
def collect(self,
nrows: typing.List[typing.List[str]],
erows: typing.List[typing.List[str]],
qrows: typing.List[typing.List[str]],
invalid_erows: typing.List[typing.List[str]],
invalid_qrows: typing.List[typing.List[str]],
who: str):
self.nrows += len(nrows)
self.erows += len(erows)
self.qrows += len(qrows)
self.invalid_erows += len(invalid_erows)
self.invalid_qrows += len(invalid_qrows)
self.cnt += 1
if progress_interval > 0 and self.cnt % progress_interval == 0:
print("The {} collector called {} times: {} nrows, {} erows, {} qrows, {} invalid erows, {} invalid qrows".format(who,
self.cnt,
self.nrows,
self.erows,
self.qrows,
self.invalid_erows,
self.invalid_qrows),
file=sys.stderr, flush=True)
row: typing.List[str]
if len(nrows) > 0:
if self.node_wr is None:
raise ValueError("Unexpected node rows in the %s collector." % who)
if use_kgtkwriter:
for row in nrows:
self.node_wr.write(row)
else:
self.node_wr.writerows(nrows)
if len(erows) > 0:
if use_kgtkwriter:
if not self.process_split_files:
if self.detailed_edge_wr is None:
raise ValueError("Unexpected edge rows in the %s collector." % who)
for row in erows:
if skip_validation or validate(row, "unsplit detailed edge"):
self.detailed_edge_wr.write(row)
else:
for row in erows:
split: bool = False
label: str = row[2] # Hack: knows the structure of the row.
method: typing.Optional[typing.Callable[[typing.List[str]], bool]] = self.split_dispatcher.get(label)
if method is not None:
split = method(row)
if not split:
if self.minimal_edge_wr is None and self.detailed_edge_wr is None and self.split_property_edge_wr is None:
raise ValueError("Unexpected %s edge rows in the %s collector." % (label, who))
if self.split_property_edge_wr is not None and row[1].startswith("P"): # Hack: knows the structure of the row.
# For now, split property files are minimal.
if skip_validation or validate(row, "split property edge"):
self.split_property_edge_wr.write((row[0], row[1], row[2], row[3], row[4], row[5])) # Hack: knows the structure of the row.
elif self.minimal_edge_wr is not None:
if skip_validation or validate(row, "minimal edge"):
self.minimal_edge_wr.write((row[0], row[1], row[2], row[3], row[4], row[5])) # Hack: knows the structure of the row.
if self.detailed_edge_wr is not None:
if skip_validation or validate(row, "split detailed edge"):
self.detailed_edge_wr.write(row)
else:
if self.minimal_edge_wr is None:
raise ValueError("Unexpected edge rows in the %s collector." % who)
if skip_validation:
self.minimal_edge_wr.writerows(erows)
else:
for row in erows:
if validate(row, "minimal edge csv"):
self.minimal_edge_wr.write(row)
if len(qrows) > 0:
if use_kgtkwriter:
if self.minimal_qual_wr is None and self.detailed_qual_wr is None:
raise ValueError("Unexpected qual rows in the %s collector." % who)
for row in qrows:
if self.split_property_qual_wr is not None and row[0].startswith("P"): # Hack: knows the structure of the row.
if skip_validation or validate(row, "split property qual"):
self.split_property_qual_wr.write((row[0], row[1], row[2], row[3], row[4])) # Hack: knows the structure of the row.
elif self.minimal_qual_wr is not None:
if skip_validation or validate(row, "minimal qual"):
self.minimal_qual_wr.write((row[0], row[1], row[2], row[3], row[4])) # Hack: knows the structure of the row.
if self.detailed_qual_wr is not None:
if skip_validation or validate(row, "detailed qual"):
self.detailed_qual_wr.write(row)
else:
if self.detailed_qual_wr is None:
raise ValueError("Unexpected qual rows in the %s collector." % who)
if skip_validation:
self.detailed_qual_wr.writerows(qrows)
else:
for row in qrows:
if validate(row, "detailed qual csv"):
self.detailed_qual_wr.write(row)
if len(invalid_erows) > 0:
# print("Writing invalid erows", file=sys.stderr, flush=True) # ***
if self.invalid_edge_wr is None:
raise ValueError("Unexpected invalid edge rows in the %s collector." % who)
if use_kgtkwriter:
for row in invalid_erows:
if minimal_edge_file is not None: # messy
self.invalid_edge_wr.write((row[0], row[1], row[2], row[3], row[4], row[5])) # Hack: knows the structure of the row.
else:
self.invalid_edge_wr.write(row)
else:
self.invalid_edge_wr.writerows(invalid_erows)
if len(invalid_qrows) > 0:
if self.invalid_qual_wr is None:
raise ValueError("Unexpected invalid qual rows in the %s collector." % who)
if use_kgtkwriter:
for row in invalid_qrows:
if minimal_qual_file is not None: # messy
self.invalid_qual_wr.write((row[0], row[1], row[2], row[3], row[4])) # Hack: knows the structure of the row.
else:
self.invalid_qual_wr.write(row)
else:
self.invalid_qual_wr.writerows(invalid_qrows)
def setup_split_dispatcher(self):
self.split_dispatcher: typing.MutableMapping[str, typing.Callable[[typing.List[str]], bool]] = dict()
self.split_dispatcher[ADDL_SITELINK_LABEL] = self.split_sitelink
self.split_dispatcher[ALIAS_LABEL] = self.split_alias
self.split_dispatcher[DATATYPE_LABEL] = self.split_datatype
self.split_dispatcher[DESCRIPTION_LABEL] = self.split_description
self.split_dispatcher[LABEL_LABEL] = self.split_label
self.split_dispatcher[SITELINK_LABEL] = self.split_sitelink
self.split_dispatcher[SITELINK_BADGE_LABEL] = self.split_sitelink
self.split_dispatcher[SITELINK_LANGUAGE_LABEL] = self.split_sitelink
self.split_dispatcher[SITELINK_SITE_LABEL] = self.split_sitelink
self.split_dispatcher[SITELINK_TITLE_LABEL] = self.split_sitelink
self.split_dispatcher[TYPE_LABEL] = self.split_type
def split_alias(self, row: typing.List[str])->bool:
split: bool = False
lang: str = row[-1] # Hack: knows the structure of the row.
if self.split_alias_wr is not None:
self.split_alias_wr.write((row[0], row[1], row[2], row[3], lang)) # Hack: knows the structure of the row.
split= True
if self.split_en_alias_wr is not None and lang == "en":
self.split_en_alias_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_datatype(self, row: typing.List[str])->bool:
split: bool = False
if self.split_datatype_wr is not None:
self.split_datatype_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_description(self, row: typing.List[str])->bool:
split: bool = False
lang: str = row[-1] # Hack: knows the structure of the row.
if self.split_description_wr is not None:
self.split_description_wr.write((row[0], row[1], row[2], row[3], lang)) # Hack: knows the structure of the row.
split = True
if self.split_en_description_wr is not None and lang == "en":
self.split_en_description_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_label(self, row: typing.List[str])->bool:
split: bool = False
lang: str = row[-1] # Hack: knows the structure of the row.
if self.split_label_wr is not None:
self.split_label_wr.write((row[0], row[1], row[2], row[3], lang)) # Hack: knows the structure of the row.
split = True
if self.split_en_label_wr is not None and lang == "en":
self.split_en_label_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_sitelink(self, row: typing.List[str])->bool:
split: bool = False
lang: str = row[-1] # Hack: knows the structure of the row.
if self.split_sitelink_wr is not None:
self.split_sitelink_wr.write((row[0], row[1], row[2], row[3], lang)) # Hack: knows the structure of the row.
split = True
if self.split_en_sitelink_wr is not None and lang == "en":
self.split_en_sitelink_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
def split_type(self, row: typing.List[str])->bool:
split: bool = False
if self.split_type_wr is not None:
self.split_type_wr.write((row[0], row[1], row[2], row[3])) # Hack: knows the structure of the row.
split = True
return split
try:
UPDATE_VERSION: str = "2021-02-24T21:11:49.602037+00:00#sgB3FM8zpy/0bbx1RwyRawYnB1spAUBS+FVVQBL8DtJVxXE8mYCTTLr2lHJqbKVe5fBPp+k5iQjTDmJ6GRVf8Q=="
print("kgtk import-wikidata version: %s" % UPDATE_VERSION, file=sys.stderr, flush=True)
print("Starting main process (pid %d)." % os.getpid(), file=sys.stderr, flush=True)
inp_path = KGTKArgumentParser.get_input_file(input_file)
csv_line_terminator = "\n" if os.name == 'posix' else "\r\n"
start=time.time()
if not skip_processing:
from gzip import GzipFile
print("Processing.", file=sys.stderr, flush=True)
# Open the input file first to make it easier to monitor with "pv".
input_f: typing.Union[GzipFile, typing.IO[typing.Any]]
if str(inp_path) == "-":
print('Processing wikidata from standard input', file=sys.stderr, flush=True)
# It is not well documented, but this is how you read binary data
# from stdin in Python 3.
#
# TODO: Add decompression.
input_f = sys.stdin.buffer
else:
print('Processing wikidata file %s' % str(inp_path), file=sys.stderr, flush=True)
input_f = open(inp_path, mode='rb')
progress_startup(fd=input_f.fileno()) # Start the custom progress monitor.
if str(inp_path).endswith(".bz2"):
print('Decompressing (bz2)', file=sys.stderr, flush=True)
# TODO: Optionally use a system decompression program.
input_f = bz2.open(input_f)
elif str(inp_path).endswith(".gz"):
# TODO: Optionally use a system decompression program.
if use_mgzip_for_input:
import mgzip
print('Decompressing (mgzip)', file=sys.stderr, flush=True)
input_f = mgzip.open(input_f, thread=mgzip_threads_for_input)
else:
import gzip
print('Decompressing (gzip)', file=sys.stderr, flush=True)
input_f = gzip.open(input_f)
collector_p = None
node_collector_p = None
edge_collector_p = None
qual_collector_p = None
invalid_edge_collector_p = None
invalid_qual_collector_p = None
description_collector_p = None
sitelink_collector_p = None
if collect_results:
print("Creating the collector queue.", file=sys.stderr, flush=True)
# collector_q = pyrallel.ShmQueue()
collector_q_maxsize = procs*collector_queue_per_proc_size
if collect_seperately:
if node_file is not None:
node_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector node queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the node_collector.", file=sys.stderr, flush=True)
node_collector: MyCollector = MyCollector()
print("Creating the node collector process.", file=sys.stderr, flush=True)
node_collector_p = mp.Process(target=node_collector.run, args=(node_collector_q, "node"))
print("Starting the node collector process.", file=sys.stderr, flush=True)
node_collector_p.start()
print("Started the node collector process.", file=sys.stderr, flush=True)
if minimal_edge_file is not None or detailed_edge_file is not None:
edge_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector edge queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the edge_collector.", file=sys.stderr, flush=True)
edge_collector: MyCollector = MyCollector()
print("Creating the edge collector process.", file=sys.stderr, flush=True)
edge_collector_p = mp.Process(target=edge_collector.run, args=(edge_collector_q, "edge"))
print("Starting the edge collector process.", file=sys.stderr, flush=True)
edge_collector_p.start()
print("Started the edge collector process.", file=sys.stderr, flush=True)
if minimal_qual_file is not None or detailed_qual_file is not None:
qual_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector qual queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the qual_collector.", file=sys.stderr, flush=True)
qual_collector: MyCollector = MyCollector()
print("Creating the qual collector process.", file=sys.stderr, flush=True)
qual_collector_p = mp.Process(target=qual_collector.run, args=(qual_collector_q, "qual"))
print("Starting the qual collector process.", file=sys.stderr, flush=True)
qual_collector_p.start()
print("Started the qual collector process.", file=sys.stderr, flush=True)
if invalid_edge_file is not None:
invalid_edge_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector invalid edge queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the invalid_edge_collector.", file=sys.stderr, flush=True)
invalid_edge_collector: MyCollector = MyCollector()
print("Creating the invalid edge collector process.", file=sys.stderr, flush=True)
invalid_edge_collector_p = mp.Process(target=invalid_edge_collector.run, args=(invalid_edge_collector_q, "invalid edge"))
print("Starting the invalid edge collector process.", file=sys.stderr, flush=True)
invalid_edge_collector_p.start()
print("Started the invalid edge collector process.", file=sys.stderr, flush=True)
if invalid_qual_file is not None:
invalid_qual_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector invalid qual queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the invalid_qual_collector.", file=sys.stderr, flush=True)
invalid_qual_collector: MyCollector = MyCollector()
print("Creating the invalid qual collector process.", file=sys.stderr, flush=True)
invalid_qual_collector_p = mp.Process(target=invalid_qual_collector.run, args=(invalid_qual_collector_q, "invalid qual"))
print("Starting the invalid qual collector process.", file=sys.stderr, flush=True)
invalid_qual_collector_p.start()
print("Started the invalid qual collector process.", file=sys.stderr, flush=True)
if split_description_file is not None:
description_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector description queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the description collector.", file=sys.stderr, flush=True)
description_collector: MyCollector = MyCollector()
print("Creating the description collector process.", file=sys.stderr, flush=True)
description_collector_p = mp.Process(target=description_collector.run, args=(description_collector_q, "description"))
print("Starting the description collector process.", file=sys.stderr, flush=True)
description_collector_p.start()
print("Started the description collector process.", file=sys.stderr, flush=True)
if split_sitelink_file is not None:
sitelink_collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The collector sitelink queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the sitelink collector.", file=sys.stderr, flush=True)
sitelink_collector: MyCollector = MyCollector()
print("Creating the sitelink collector process.", file=sys.stderr, flush=True)
sitelink_collector_p = mp.Process(target=sitelink_collector.run, args=(sitelink_collector_q, "sitelink"))
print("Starting the sitelink collector process.", file=sys.stderr, flush=True)
sitelink_collector_p.start()
print("Started the sitelink collector process.", file=sys.stderr, flush=True)
else:
collector_q = pyrallel.ShmQueue(maxsize=collector_q_maxsize)
print("The common collector queue has been created (maxsize=%d)." % collector_q_maxsize, file=sys.stderr, flush=True)
print("Creating the common collector.", file=sys.stderr, flush=True)
collector: MyCollector = MyCollector()
print("Creating the common collector process.", file=sys.stderr, flush=True)
collector_p = mp.Process(target=collector.run, args=(collector_q, "common"))
print("Starting the common collector process.", file=sys.stderr, flush=True)
collector_p.start()
print("Started the common collector process.", file=sys.stderr, flush=True)
if node_file:
if node_id_only:
node_file_header = ['id']
else:
node_file_header = ['id','label','type','description','alias','datatype']
ncq = collector_q if collector_q is not None else node_collector_q
if ncq is not None:
print("Sending the node header to the collector.", file=sys.stderr, flush=True)
ncq.put(("node_header", None, None, None, None, None, node_file_header))
print("Sent the node header to the collector.", file=sys.stderr, flush=True)
else:
with open(node_file+'_header', 'w', newline='') as myfile:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
wr.writerow(node_file_header)
if explode_values:
edge_file_header = ['id','node1','label','node2','rank','node2;magnitude','node2;unit','node2;date','node2;item','node2;lower','node2;upper',
'node2;latitude','node2;longitude','node2;precision','node2;calendar','node2;entity-type','node2;wikidatatype', 'lang']
else:
edge_file_header = ['id','node1','label','node2',
'rank', 'node2;wikidatatype',
'claim_id', 'val_type', 'entity_type', 'datahash', 'precision', 'calendar', 'lang']
ecq = collector_q if collector_q is not None else edge_collector_q
if detailed_edge_file:
if ecq is not None:
print("Sending the detailed edge header to the collector.", file=sys.stderr, flush=True)
ecq.put(("detailed_edge_header", None, None, None, None, None, edge_file_header))
print("Sent the detailed edge header to the collector.", file=sys.stderr, flush=True)
else:
with open(detailed_edge_file+'_header', 'w', newline='') as myfile:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
wr.writerow(edge_file_header)
if minimal_edge_file and ecq is not None:
print("Sending the minimal edge file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("minimal_edge_header", None, None, None, None, None, edge_file_header[0:6]))
print("Sent the minimal edge file header to the collector.", file=sys.stderr, flush=True)
if split_alias_file and ecq is not None:
alias_file_header = ['id', 'node1', 'label', 'node2', 'lang']
print("Sending the alias file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_alias_header", None, None, None, None, None, alias_file_header))
print("Sent the alias file header to the collector.", file=sys.stderr, flush=True)
if split_en_alias_file and ecq is not None:
en_alias_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the English alias file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_en_alias_header", None, None, None, None, None, en_alias_file_header))
print("Sent the English alias file header to the collector.", file=sys.stderr, flush=True)
if split_datatype_file and ecq is not None:
datatype_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the datatype file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_datatype_header", None, None, None, None, None, datatype_file_header))
print("Sent the datatype file header to the collector.", file=sys.stderr, flush=True)
dcq = collector_q if collector_q is not None else description_collector_q
if split_description_file and dcq is not None:
description_file_header = ['id', 'node1', 'label', 'node2', 'lang']
print("Sending the description file header to the collector.", file=sys.stderr, flush=True)
dcq.put(("split_description_header", None, None, None, None, None, description_file_header))
print("Sent the description file header to the collector.", file=sys.stderr, flush=True)
if split_en_description_file and dcq is not None:
en_description_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the English description file header to the collector.", file=sys.stderr, flush=True)
dcq.put(("split_en_description_header", None, None, None, None, None, en_description_file_header))
print("Sent the English description file header to the collector.", file=sys.stderr, flush=True)
if split_label_file and ecq is not None:
label_file_header = ['id', 'node1', 'label', 'node2', 'lang']
print("Sending the label file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_label_header", None, None, None, None, None, label_file_header))
print("Sent the label file header to the collector.", file=sys.stderr, flush=True)
if split_en_label_file and ecq is not None:
en_label_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the English label file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_en_label_header", None, None, None, None, None, en_label_file_header))
print("Sent the English label file header to the collector.", file=sys.stderr, flush=True)
scq = collector_q if collector_q is not None else sitelink_collector_q
if split_sitelink_file and scq is not None:
sitelink_file_header = ['id', 'node1', 'label', 'node2', 'lang']
print("Sending the sitelink file header to the collector.", file=sys.stderr, flush=True)
scq.put(("split_sitelink_header", None, None, None, None, None, sitelink_file_header))
print("Sent the sitelink file header to the collector.", file=sys.stderr, flush=True)
if split_en_sitelink_file and scq is not None:
en_sitelink_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the English sitelink file header to the collector.", file=sys.stderr, flush=True)
scq.put(("split_en_sitelink_header", None, None, None, None, None, en_sitelink_file_header))
print("Sent the English sitelink file header to the collector.", file=sys.stderr, flush=True)
if split_type_file and ecq is not None:
type_file_header = ['id', 'node1', 'label', 'node2']
print("Sending the entry type file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_type_header", None, None, None, None, None, type_file_header))
print("Sent the entry type file header to the collector.", file=sys.stderr, flush=True)
if split_property_edge_file and ecq is not None:
print("Sending the property edge file header to the collector.", file=sys.stderr, flush=True)
ecq.put(("split_property_edge_header", None, None, None, None, None, edge_file_header[0:6]))
print("Sent the property edge file header to the collector.", file=sys.stderr, flush=True)
if invalid_edge_file and invalid_edge_collector_q is not None:
if detailed_edge_file:
print("Sending the detailed invalid edge header to the collector.", file=sys.stderr, flush=True)
invalid_edge_collector_q.put(("invalid_edge_header", None, None, None, None, None, edge_file_header))
print("Sent the detailed invalid edge header to the collector.", file=sys.stderr, flush=True)
elif minimal_edge_file:
print("Sending the minimal invalid edge header to the collector.", file=sys.stderr, flush=True)
invalid_edge_collector_q.put(("invalid_edge_header", None, None, None, None, None, edge_file_header[0:6]))
print("Sent the minimal invalid edge header to the collector.", file=sys.stderr, flush=True)
if minimal_qual_file is not None or detailed_qual_file is not None or split_property_qual_file is not None:
qual_file_header = edge_file_header.copy()
if "rank" in qual_file_header:
qual_file_header.remove('rank')
if "claim_type" in qual_file_header:
qual_file_header.remove('claim_type')
if "claim_id" in qual_file_header:
qual_file_header.remove('claim_id')
if "lang" in qual_file_header:
qual_file_header.remove('lang')
qcq = collector_q if collector_q is not None else qual_collector_q
if detailed_qual_file is not None:
if qcq is not None:
print("Sending the detailed qual file header to the collector.", file=sys.stderr, flush=True)
qcq.put(("detailed_qual_header", None, None, None, None, None, qual_file_header))
print("Sent the detailed qual file header to the collector.", file=sys.stderr, flush=True)
else:
with open(detailed_qual_file+'_header', 'w', newline='') as myfile:
wr = csv.writer(
myfile,
quoting=csv.QUOTE_NONE,
delimiter="\t",
escapechar="\n",
quotechar='',
lineterminator=csv_line_terminator)
wr.writerow(qual_file_header)
if minimal_qual_file is not None and qcq is not None:
print("Sending the minimal qual file header to the collector.", file=sys.stderr, flush=True)
qcq.put(("minimal_qual_header", None, None, None, None, None, qual_file_header[0:5]))
print("Sent the minimal qual file header to the collector.", file=sys.stderr, flush=True)
if split_property_qual_file and qcq is not None:
print("Sending the property qual file header to the collector.", file=sys.stderr, flush=True)
qcq.put(("split_property_qual_header", None, None, None, None, None, qual_file_header[0:5]))
print("Sent the property qual file header to the collector.", file=sys.stderr, flush=True)
if invalid_qual_file and invalid_qual_collector_q is not None:
if detailed_qual_file:
print("Sending the detailed invalid qual header to the collector.", file=sys.stderr, flush=True)
invalid_qual_collector_q.put(("invalid_qual_header", None, None, None, None, None, qual_file_header))
print("Sent the detailed invalid qual header to the collector.", file=sys.stderr, flush=True)
elif minimal_qual_file:
print("Sending the minimal invalid qual header to the collector.", file=sys.stderr, flush=True)
invalid_qual_collector_q.put(("invalid_qual_header", None, None, None, None, None, qual_file_header[0:5]))
print("Sent the minimal invalid qual header to the collector.", file=sys.stderr, flush=True)
print('Creating parallel processor for {}'.format(str(inp_path)), file=sys.stderr, flush=True)
if use_shm or single_mapper_queue:
pp = pyrallel.ParallelProcessor(procs, MyMapper,enable_process_id=True, max_size_per_mapper_queue=max_size_per_mapper_queue,
use_shm=use_shm, enable_collector_queues=False, batch_size=mapper_batch_size,
single_mapper_queue=single_mapper_queue)
else:
pp = pyrallel.ParallelProcessor(procs, MyMapper,enable_process_id=True, max_size_per_mapper_queue=max_size_per_mapper_queue,
batch_size=mapper_batch_size)
print('Start parallel processing', file=sys.stderr, flush=True)
pp.start()
for cnt, line in enumerate(input_f):
if limit and cnt >= limit:
break
# pp.add_task(line,node_file,edge_file,qual_file,languages,source)
pp.add_task(line)
print('Done processing {}'.format(str(inp_path)), file=sys.stderr, flush=True)
input_f.close()
print('Telling the workers to shut down.', file=sys.stderr, flush=True)
pp.task_done()
print('Waiting for the workers to shut down.', file=sys.stderr, flush=True)
pp.join()
print('Worker shut down is complete.', file=sys.stderr, flush=True)
if collector_q is not None:
print('Telling the collector to shut down.', file=sys.stderr, flush=True)
collector_q.put(("shutdown", None, None, None, None, None, None))
if collector_p is not None:
print('Waiting for the collector to shut down.', file=sys.stderr, flush=True)
collector_p.join()
print('Collector shut down is complete.', file=sys.stderr, flush=True)
if collector_q is not None:
collector_q.close()
if node_collector_q is not None:
print('Telling the node collector to shut down.', file=sys.stderr, flush=True)
node_collector_q.put(("shutdown", None, None, None, None, None, None))
if node_collector_p is not None:
print('Waiting for the node collector to shut down.', file=sys.stderr, flush=True)
node_collector_p.join()
print('Node collector shut down is complete.', file=sys.stderr, flush=True)
if node_collector_q is not None:
node_collector_q.close()
if edge_collector_q is not None:
print('Telling the edge collector to shut down.', file=sys.stderr, flush=True)
edge_collector_q.put(("shutdown", None, None, None, None, None, None))
if edge_collector_p is not None:
print('Waiting for the edge collector to shut down.', file=sys.stderr, flush=True)
edge_collector_p.join()
print('Edge collector shut down is complete.', file=sys.stderr, flush=True)
if edge_collector_q is not None:
edge_collector_q.close()
if qual_collector_q is not None:
print('Telling the qual collector to shut down.', file=sys.stderr, flush=True)
qual_collector_q.put(("shutdown", None, None, None, None, None, None))
if qual_collector_p is not None:
print('Waiting for the qual collector to shut down.', file=sys.stderr, flush=True)
qual_collector_p.join()
print('Qual collector shut down is complete.', file=sys.stderr, flush=True)
if qual_collector_q is not None:
qual_collector_q.close()
if invalid_edge_collector_q is not None:
print('Telling the invalid edge collector to shut down.', file=sys.stderr, flush=True)
invalid_edge_collector_q.put(("shutdown", None, None, None, None, None, None))
if invalid_edge_collector_p is not None:
print('Waiting for the invalid edge collector to shut down.', file=sys.stderr, flush=True)
invalid_edge_collector_p.join()
print('Invalid edge collector shut down is complete.', file=sys.stderr, flush=True)
if invalid_edge_collector_q is not None:
invalid_edge_collector_q.close()
if invalid_qual_collector_q is not None:
print('Telling the invalid qual collector to shut down.', file=sys.stderr, flush=True)
invalid_qual_collector_q.put(("shutdown", None, None, None, None, None, None))
if invalid_qual_collector_p is not None:
print('Waiting for the invalid qual collector to shut down.', file=sys.stderr, flush=True)
invalid_qual_collector_p.join()
print('Invalid qual collector shut down is complete.', file=sys.stderr, flush=True)
if invalid_qual_collector_q is not None:
invalid_qual_collector_q.close()
if description_collector_q is not None:
print('Telling the description collector to shut down.', file=sys.stderr, flush=True)
description_collector_q.put(("shutdown", None, None, None, None, None, None))
if description_collector_p is not None:
print('Waiting for the description collector to shut down.', file=sys.stderr, flush=True)
description_collector_p.join()
print('Description collector shut down is complete.', file=sys.stderr, flush=True)
if description_collector_q is not None:
description_collector_q.close()
if sitelink_collector_q is not None:
print('Telling the sitelink collector to shut down.', file=sys.stderr, flush=True)
sitelink_collector_q.put(("shutdown", None, None, None, None, None, None))
if sitelink_collector_p is not None:
print('Waiting for the sitelink collector to shut down.', file=sys.stderr, flush=True)
sitelink_collector_p.join()
print('Sitelink collector shut down is complete.', file=sys.stderr, flush=True)
if sitelink_collector_q is not None:
sitelink_collector_q.close()
if not skip_merging and not collect_results:
# We've finished processing the input data, possibly using multiple
# server processes. We need to assemble the final output file(s) with
# the header first, then the fragments produced by parallel
# processing.
#
# If we assume that we are on Linux, then os.sendfile(...)
# should provide the simplest, highest-performing solution.
if node_file:
print('Combining the node file fragments', file=sys.stderr, flush=True)
node_file_fragments=[node_file+'_header']
for n in range(procs):
node_file_fragments.append(node_file+'_'+str(n))
platform_cat(node_file_fragments, node_file, remove=not keep_temp_files, use_python_cat=use_python_cat, verbose=True)
if detailed_edge_file:
print('Combining the edge file fragments', file=sys.stderr, flush=True)
edge_file_fragments=[detailed_edge_file+'_header']
for n in range(procs):
edge_file_fragments.append(detailed_edge_file+'_'+str(n))
platform_cat(edge_file_fragments, detailed_edge_file, remove=not keep_temp_files, use_python_cat=use_python_cat, verbose=True)
if detailed_qual_file:
print('Combining the qualifier file fragments', file=sys.stderr, flush=True)
qual_file_fragments=[detailed_qual_file+'_header']
for n in range(procs):
qual_file_fragments.append(detailed_qual_file+'_'+str(n))
platform_cat(qual_file_fragments, detailed_qual_file, remove=not keep_temp_files, use_python_cat=use_python_cat, verbose=True)
print('import complete', file=sys.stderr, flush=True)
end=time.time()
print('time taken : {}s'.format(end-start), file=sys.stderr, flush=True)
except Exception as e:
raise KGTKException(str(e))
def validate(row: typing.List[str], who: str)->bool:
"""Ensure that output edge rows meet minimal validation criteria."""
import sys
# There must be at least four fields (id, node1, label, node2):
if len(row) < 4:
print("%s row too short: %s" % (who, repr(row)), file=sys.stderr, flush=True)
return False
# Ensure that the first four fields (id, node1, label, node2) are all
# non-empty.
if len(row[0]) == 0 or len(row[1]) == 0 or len(row[2]) == 0 or len(row[3]) ==0:
print("Invalid %s row: (%s, %s, %s, %s)" % (who, repr(row[0]), repr(row[1]), repr(row[2]), repr(row[3])), file=sys.stderr, flush=True)
return False
return True
|
execution.py
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import Queue
import StringIO
import logging
import multiprocessing
import traceback
from threading import RLock, Thread, Event
from xml.sax import SAXParseException
import gc
from _bsddb import DBNotFoundError
from datetime import datetime as dt
import requests
from agora.client.namespaces import AGORA
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import wait
from rdflib import ConjunctiveGraph, RDF, URIRef
pool = ThreadPoolExecutor(max_workers=20)
__author__ = 'Fernando Serena'
log = logging.getLogger('agora.client')
_accept_mimes = {'turtle': 'text/turtle', 'xml': 'application/rdf+xml'}
class StopException(Exception):
pass
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
:param l:
:param n:
:return:
"""
if n:
for i in xrange(0, len(l), n):
yield l[i:i + n]
def __extend_uri(prefixes, short):
"""
Extend a prefixed uri with the help of a specific dictionary of prefixes
:param prefixes: Dictionary of prefixes
:param short: Prefixed uri to be extended
:return:
"""
for prefix in prefixes:
if short.startswith(prefix):
return short.replace(prefix + ':', prefixes[prefix])
return short
class PlanExecutor(object):
def __init__(self, plan):
self.__plan_graph = plan
self.__fragment = set([])
self.__uri_cache = {}
self.__node_spaces = {}
self.__node_patterns = {}
self.__spaces = None
self.__patterns = {}
self.__subjects_to_ignore = {}
self.__resource_queue = {}
self.__resource_lock = RLock()
self.__completed = False
self.__last_success_format = None
self.__last_iteration_ts = dt.now()
# Request a search plan on initialization and extract patterns and spaces
self.__extract_patterns_and_spaces()
def __extract_patterns_and_spaces(self):
"""
Analyses the search plan graph in order to build the required data structures from patterns
and spaces.
:return:
"""
def __decorate_nodes(nodes, space):
"""
Performs a backward search from a list of pattern nodes and assigns a set of search spaces
to all encountered nodes.
:param nodes: List of pattern nodes that belongs to a search space
:param space: List of search space id
:return:
"""
for n in nodes:
if n not in self.__node_spaces:
self.__node_spaces[n] = set([])
self.__node_spaces[n].add(space)
pred_nodes = self.__plan_graph.subjects(AGORA.next, n)
__decorate_nodes(pred_nodes, space)
# Extract all search spaces in the plan and build a dictionary of subjects-to-ignore per each of them.
# Ignored subjects are those that won't be dereferenced due to a explicit graph pattern (object) filter,
# e.g. ?s doap:name "jenkins" -> All ?s that don't match the filter will be ignored.
self.__spaces = set(self.__plan_graph.subjects(RDF.type, AGORA.SearchSpace))
self.__subjects_to_ignore = dict([(sp, set([])) for sp in self.__spaces])
patterns = list(self.__plan_graph.subjects(RDF.type, AGORA.TriplePattern))
for tp in patterns:
# A triple pattern belongs to a UNIQUE search space
space = list(self.__plan_graph.subjects(AGORA.definedBy, tp)).pop()
self.__patterns[tp] = {'space': space}
# Depending on the format of each triple pattern (either '?s a Concept' or '?s prop O'),
# it is required to extract different properties.
tp_pred = list(self.__plan_graph.objects(tp, predicate=AGORA.predicate)).pop()
if tp_pred == RDF.type: # ?s a Concept
self.__patterns[tp]['type'] = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()
try:
check_type = list(self.__plan_graph.objects(tp, predicate=AGORA.checkType)).pop().toPython()
except IndexError:
check_type = True
self.__patterns[tp]['check'] = check_type
else: # ?s prop O
self.__patterns[tp]['property'] = tp_pred
tp_obj = list(self.__plan_graph.objects(tp, predicate=AGORA.object)).pop()
if (tp_obj, RDF.type, AGORA.Literal) in self.__plan_graph: # In case O is a Literal
self.__patterns[tp]['filter_object'] = list(self.__plan_graph.objects(tp_obj, AGORA.value)).pop()
elif isinstance(tp_obj, URIRef):
self.__patterns[tp]['filter_object'] = tp_obj
tp_sub = list(self.__plan_graph.objects(tp, predicate=AGORA.subject)).pop()
if isinstance(tp_sub, URIRef):
self.__patterns[tp]['filter_subject'] = tp_sub
# Get all pattern nodes (those that have a byPattern properties) of the search plan and search backwards
# in order to set the scope of each search space.
nodes = list(self.__plan_graph.subjects(AGORA.byPattern, tp))
for n in nodes:
if n not in self.__node_patterns:
self.__node_patterns[n] = set([])
self.__node_patterns[n].add(tp)
__decorate_nodes(nodes, space)
def get_fragment(self, **kwargs):
"""
Return a complete fragment.
:param gp:
:return:
"""
gen, namespaces, plan = self.get_fragment_generator(**kwargs)
graph = ConjunctiveGraph()
[graph.bind(prefix, u) for (prefix, u) in namespaces]
[graph.add((s, p, o)) for (_, s, p, o) in gen]
return graph
def get_fragment_generator(self, on_load=None, on_seeds=None, on_plink=None, on_link=None, on_type=None,
on_type_validation=None, on_tree=None, workers=None, stop_event=None, queue_wait=None,
queue_size=100, provider=None, lazy=True):
"""
Create a fragment generator that executes the search plan.
:param on_load: Function to be called just after a new URI is dereferenced
:param on_seeds: Function to be called just after a seed of a tree is identified
:param on_plink: Function to be called when a pattern link is reached
:param on_link: Function to be called when following a property that is not of a pattern
:param on_type: Function to be called when search for a type triple
:param on_type_validation: Function to be called just after a type is validated
:param on_tree: Function to be called just before a tree is going to be explored
:param provider:
:param queue_size:
:param workers:
:param stop_event:
:param queue_wait:
:param lazy:
:return:
"""
if workers is None:
workers = multiprocessing.cpu_count()
fragment_queue = Queue.Queue(maxsize=queue_size)
workers_queue = Queue.Queue(maxsize=workers)
if stop_event is None:
stop_event = Event()
def __create_graph():
if provider is None:
return ConjunctiveGraph()
else:
return provider.create(conjunctive=True)
def __release_graph(g):
if provider is not None:
provider.release(g)
else:
g.remove((None, None, None))
g.close()
def __open_graph(gid, loader, format):
if provider is None:
content, headers = loader(gid, format)
if not isinstance(content, bool):
g = ConjunctiveGraph()
g.parse(source=content, format=format)
return g
return content
else:
return provider.create(gid=gid, loader=loader, format=format)
def __get_content(uri, format):
try:
# log.debug('[Dereference][START] {}'.format(uri))
response = requests.get(uri, headers={'Accept': _accept_mimes[format]}, timeout=30)
except requests.Timeout:
log.debug('[Dereference][TIMEOUT][GET] {}'.format(uri))
return True
except UnicodeEncodeError:
log.debug('[Dereference][ERROR][ENCODE] {}'.format(uri))
return True
except Exception:
log.debug('[Dereference][ERROR][GET] {}'.format(uri))
return True
if response.status_code == 200:
try:
return StringIO.StringIO(response.content), response.headers
except SyntaxError:
traceback.print_exc()
log.error('[Dereference][ERROR][PARSE] {}'.format(uri))
return False
except ValueError:
traceback.print_exc()
log.debug('[Dereference][ERROR][VAL] {}'.format(uri))
return False
except DBNotFoundError:
# Ignore this exception... it is raised due to a stupid problem with prefixes
return True
except SAXParseException:
traceback.print_exc()
log.error('[Dereference][ERROR][SAX] {}'.format(uri))
return False
except Exception:
traceback.print_exc()
log.error('[Dereference][ERROR] {}'.format(uri))
return True
def __dereference_uri(tg, uri):
if not isinstance(uri, URIRef):
return
uri = uri.encode('utf-8')
def treat_resource_content(parse_format):
g = __open_graph(uri, loader=__get_content, format=parse_format)
if isinstance(g, bool):
return g
try:
tg.get_context(uri).__iadd__(g)
return True
finally:
if g is not None:
__release_graph(g)
"""
Load in a tree graph the set of triples contained in uri, trying to not deference the same uri
more than once in the context of a search plan execution
:param tg: The graph to be loaded with all the triples obtained from uri
:param uri: A resource uri to be dereferenced
:return:
"""
loaded = False
for fmt in sorted(_accept_mimes.keys(), key=lambda x: x != self.__last_success_format):
loaded = treat_resource_content(fmt)
if loaded:
self.__last_success_format = fmt
break
if loaded and on_load is not None:
triples = list(tg.get_context(uri).triples((None, None, None)))
on_load(uri, triples)
def __process_link_seed(seed, tree_graph, link, next_seeds):
__check_stop()
try:
__dereference_uri(tree_graph, seed)
seed_pattern_objects = tree_graph.objects(subject=seed, predicate=link)
next_seeds.update(seed_pattern_objects)
except Exception as e:
traceback.print_exc()
log.warning(e.message)
def __process_pattern_link_seed(seed, tree_graph, pattern_link):
__check_stop()
try:
__dereference_uri(tree_graph, seed)
except:
pass
seed_pattern_objects = tree_graph.objects(subject=seed, predicate=pattern_link)
return seed_pattern_objects
def __check_stop():
if stop_event.isSet():
with self.__resource_lock:
self.__fragment.clear()
for tg in self.__resource_queue.keys():
try:
tg.remove((None, None, None))
tg.store.close()
except KeyError:
pass
tg.close()
__release_graph(tg)
self.__plan_graph = None
self.__uri_cache = None
self.__node_spaces = None
self.__node_patterns = None
self.__spaces = None
self.__patterns = None
self.__subjects_to_ignore.clear()
self.__resource_queue.clear()
gc.collect()
raise StopException()
def __put_triple_in_queue(quad):
if (dt.now() - self.__last_iteration_ts).total_seconds() > 100:
log.info('Aborted fragment collection!')
stop_event.set()
fragment_queue.put(quad, timeout=queue_wait)
def __follow_node(node, tree_graph, seed_space, seed):
"""
Recursively search for relevant triples following the current node and all its successors
:param node: Tree node to be followed
:param tree_graph:
:param seed_space:
:param seed: Collected seed for the current node
:return:
"""
def node_has_filter(x):
"""
Check if a node is a pattern node and has an object filter
"""
p_node = list(self.__plan_graph.objects(subject=x, predicate=AGORA.byPattern))
try:
p_node = p_node.pop()
return 'filter_object' in self.__patterns[p_node] or 'filter_subject' in self.__patterns[p_node]
except IndexError:
return False
try:
# Get the sorted list of current node's successors
nxt = sorted(list(self.__plan_graph.objects(node, AGORA.next)),
key=lambda x: node_has_filter(x), reverse=True)
# Per each successor...
for n in nxt:
if seed_space in self.__node_spaces[n]:
node_patterns = self.__node_patterns.get(n, [])
# In case the node is not a leaf, 'onProperty' tells which is the next link to follow
try:
link = list(self.__plan_graph.objects(subject=n, predicate=AGORA.onProperty)).pop()
except IndexError:
link = None
filter_next_seeds = set([])
next_seeds = set([])
# If the current node is a pattern node, it must search for triples to yield
for pattern in node_patterns:
pattern_space = self.__patterns[pattern].get('space', None)
if pattern_space != seed_space or seed in self.__subjects_to_ignore[pattern_space]:
continue
subject_filter = self.__patterns[pattern].get('filter_subject', None)
if subject_filter is not None and seed != subject_filter:
self.__subjects_to_ignore[pattern_space].add(seed)
continue
pattern_link = self.__patterns[pattern].get('property', None)
# If pattern is of type '?s prop O'...
if pattern_link is not None:
if (seed, pattern_link) not in self.__fragment:
obj_filter = self.__patterns[pattern].get('filter_object', None)
if on_plink is not None:
on_plink(pattern_link, [seed], pattern_space)
seed_was_filtered = True
try:
for seed_object in list(
__process_pattern_link_seed(seed, tree_graph, pattern_link)):
__check_stop()
quad = (pattern, seed, pattern_link, seed_object)
if obj_filter is None or u''.join(seed_object).encode(
'utf-8') == u''.join(obj_filter.toPython()).encode('utf-8'):
self.__fragment.add((seed, pattern_link))
__put_triple_in_queue(quad)
seed_was_filtered = False
if isinstance(obj_filter, URIRef):
filter_next_seeds.add(obj_filter)
if obj_filter is not None and seed_was_filtered:
self.__subjects_to_ignore[pattern_space].add(seed)
except AttributeError as e:
log.warning('Trying to find {} objects of {}: {}'.format(link, seed, e.message))
# If pattern is of type '?s a Concept'...
obj_type = self.__patterns[pattern].get('type', None)
if obj_type is not None:
check_type = self.__patterns[pattern].get('check', False)
if on_type is not None:
on_type(obj_type, [seed], pattern_space)
__dereference_uri(tree_graph, seed)
try:
seed_objects = list(tree_graph.objects(subject=seed, predicate=link))
for seed_object in seed_objects:
type_triple = (pattern, seed_object, RDF.type, obj_type)
# In some cases, it is necessary to verify the type of the seed
if (seed_object, obj_type) not in self.__fragment:
if check_type:
__dereference_uri(tree_graph, seed_object)
types = list(
tree_graph.objects(subject=seed_object, predicate=RDF.type))
if obj_type in types:
self.__fragment.add((seed_object, obj_type))
__put_triple_in_queue(type_triple)
else:
self.__subjects_to_ignore[pattern_space].add(seed_object)
else:
self.__fragment.add((seed_object, obj_type))
__put_triple_in_queue(type_triple)
except AttributeError as e:
log.warning('Trying to find {} objects of {}: {}'.format(link, seed, e.message))
# If the current node is not a leaf... go on finding seeds for the successors
if link is not None and seed not in self.__subjects_to_ignore[seed_space]:
if on_link is not None:
on_link(link, [seed], seed_space)
__process_link_seed(seed, tree_graph, link, next_seeds)
if filter_next_seeds:
next_seeds = set.intersection(next_seeds, filter_next_seeds)
chs = list(chunks(list(next_seeds), min(len(next_seeds), max(1, workers / 2))))
next_seeds.clear()
try:
while True:
__check_stop()
chunk = chs.pop()
threads = []
for s in chunk:
try:
workers_queue.put_nowait(s)
future = pool.submit(__follow_node, n, tree_graph, seed_space, s)
threads.append(future)
except Queue.Full:
# If all threads are busy...I'll do it myself
__follow_node(n, tree_graph, seed_space, s)
except Queue.Empty:
pass
wait(threads)
[(workers_queue.get_nowait(), workers_queue.task_done()) for _ in threads]
except (IndexError, KeyError):
pass
except Queue.Full:
stop_event.set()
except Exception as e:
traceback.print_exc()
log.error(e.message)
return
def get_fragment_triples():
"""
Iterate over all search trees and yield relevant triples
:return:
"""
def execute_plan():
for tree in trees:
if on_tree is not None:
on_tree(tree)
# Prepare an dedicated graph for the current tree and a set of type triples (?s a Concept)
# to be evaluated retrospectively
tree_graph = __create_graph()
try:
self.__resource_queue[tree_graph] = []
# Get all seeds of the current tree
seeds = list(self.__plan_graph.objects(tree, AGORA.hasSeed))
if on_seeds is not None:
on_seeds(seeds)
# Check if the tree root is a pattern node and in that case, adds a type triple to the
# respective set
root_pattern = list(self.__plan_graph.objects(tree, AGORA.byPattern))
if len(root_pattern):
pattern_node = list(
self.__plan_graph.objects(subject=tree, predicate=AGORA.byPattern)).pop()
seed_type = self.__patterns[pattern_node].get('type', None)
[type_triples.add((pattern_node, sd, seed_type)) for sd in seeds]
# Get the children of the root node and follow them recursively
nxt = list(self.__plan_graph.objects(tree, AGORA.next))
if len(nxt):
# Prepare the list of seeds to start the exploration with, taking into account all
# search spaces that were defined
s_seeds = set(seeds)
for sp in self.__spaces:
for seed in s_seeds:
__follow_node(tree, tree_graph, sp, seed)
finally:
__release_graph(tree_graph)
if lazy and found_data and len(self.__spaces) == 1:
break
self.__completed = True
def get_tree_length(x):
"""
Return the value of the Agora length property in the given tree node
:param x:
:return:
"""
length = list(self.__plan_graph.objects(subject=x, predicate=AGORA.length)).pop()
return length
# Get all search trees contained in the search plan and sort them by length. A shorter tree is going
# to be explored first.
trees = self.__plan_graph.subjects(RDF.type, AGORA.SearchTree)
trees = sorted(trees, key=lambda x: get_tree_length(x))
type_triples = set([])
thread = Thread(target=execute_plan)
thread.daemon = True
thread.start()
found_data = False
while not self.__completed or fragment_queue.not_empty:
try:
(t, s, p, o) = fragment_queue.get(timeout=1)
found_data = True
fragment_queue.task_done()
if p == RDF.type:
type_triples.add((t, s, o))
else:
yield (t, s, p, o)
except Queue.Empty:
if self.__completed:
break
self.__last_iteration_ts = dt.now()
thread.join()
# All type triples that are of subjects to ignore won't be returned (this has to be done this way
# because of generators nature)
all_ignores = {}
if self.__subjects_to_ignore.values():
all_ignores = set.intersection(*self.__subjects_to_ignore.values())
valid_type_triples = [(t, s, o) for (t, s, o) in type_triples if s not in all_ignores]
for (t, s, o) in valid_type_triples:
if on_type_validation is not None:
on_type_validation((t, s, RDF.type, o))
yield (t, s, RDF.type, o)
return get_fragment_triples(), self.__plan_graph.namespaces(), self.__plan_graph
|
dynamixel_serial_proxy.py
|
# -*- coding: utf-8 -*-
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2010-2011, Antons Rebguns.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of University of Arizona nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = 'Antons Rebguns'
__copyright__ = 'Copyright (c) 2010-2011 Antons Rebguns'
__credits__ = 'Cody Jorgensen, Cara Slutter'
__license__ = 'BSD'
__maintainer__ = 'Cong Liu'
__email__ = 'liucong.cdhaw@gmail.com'
import math
import sys
import errno
from collections import deque
from threading import Thread
from collections import defaultdict
import roslib
roslib.load_manifest('cute_xqtor_driver')
import rospy
import dynamixel_io
from cute_xqtor_driver.dynamixel_const import *
from diagnostic_msgs.msg import DiagnosticArray
from diagnostic_msgs.msg import DiagnosticStatus
from diagnostic_msgs.msg import KeyValue
from dynamixel_msgs.msg import MotorState
from dynamixel_msgs.msg import MotorStateList
class SerialProxy():
def __init__(self,
port_name='/dev/ttyUSB0',
port_namespace='ttyUSB0',
baud_rate='1000000',
min_motor_id=1,
max_motor_id=25,
update_rate=5,
diagnostics_rate=1,
error_level_temp=75,
warn_level_temp=70,
readback_echo=False):
self.port_name = port_name
self.port_namespace = port_namespace
self.baud_rate = baud_rate
self.min_motor_id = min_motor_id
self.max_motor_id = max_motor_id
self.update_rate = update_rate
self.diagnostics_rate = diagnostics_rate
self.error_level_temp = error_level_temp
self.warn_level_temp = warn_level_temp
self.readback_echo = readback_echo
self.actual_rate = update_rate
self.error_counts = {'non_fatal': 0, 'checksum': 0, 'dropped': 0}
self.current_state = MotorStateList()
self.num_ping_retries = 5
self.motor_states_pub = rospy.Publisher('motor_states/%s' % self.port_namespace, MotorStateList, queue_size=1)
self.diagnostics_pub = rospy.Publisher('/diagnostics', DiagnosticArray, queue_size=1)
def connect(self):
try:
self.dxl_io = dynamixel_io.DynamixelIO(self.port_name, self.baud_rate, self.readback_echo)
self.__find_motors()
except dynamixel_io.SerialOpenError, e:
rospy.logfatal(e.message)
sys.exit(1)
self.running = True
if self.update_rate > 0: Thread(target=self.__update_motor_states).start()
if self.diagnostics_rate > 0: Thread(target=self.__publish_diagnostic_information).start()
def disconnect(self):
self.running = False
def __fill_motor_parameters(self, motor_id, model_number):
"""
Stores some extra information about each motor on the parameter server.
Some of these paramters are used in joint controller implementation.
"""
angles = self.dxl_io.get_angle_limits(motor_id)
voltage = self.dxl_io.get_voltage(motor_id)
voltages = self.dxl_io.get_voltage_limits(motor_id)
rospy.set_param('dynamixel/%s/%d/model_number' %(self.port_namespace, motor_id), model_number)
rospy.set_param('dynamixel/%s/%d/model_name' %(self.port_namespace, motor_id), DXL_MODEL_TO_PARAMS[model_number]['name'])
rospy.set_param('dynamixel/%s/%d/min_angle' %(self.port_namespace, motor_id), angles['min'])
rospy.set_param('dynamixel/%s/%d/max_angle' %(self.port_namespace, motor_id), angles['max'])
torque_per_volt = DXL_MODEL_TO_PARAMS[model_number]['torque_per_volt']
rospy.set_param('dynamixel/%s/%d/torque_per_volt' %(self.port_namespace, motor_id), torque_per_volt)
rospy.set_param('dynamixel/%s/%d/max_torque' %(self.port_namespace, motor_id), torque_per_volt * voltage)
velocity_per_volt = DXL_MODEL_TO_PARAMS[model_number]['velocity_per_volt']
rpm_per_tick = DXL_MODEL_TO_PARAMS[model_number]['rpm_per_tick']
rospy.set_param('dynamixel/%s/%d/velocity_per_volt' %(self.port_namespace, motor_id), velocity_per_volt)
rospy.set_param('dynamixel/%s/%d/max_velocity' %(self.port_namespace, motor_id), velocity_per_volt * voltage)
rospy.set_param('dynamixel/%s/%d/radians_second_per_encoder_tick' %(self.port_namespace, motor_id), rpm_per_tick * RPM_TO_RADSEC)
encoder_resolution = DXL_MODEL_TO_PARAMS[model_number]['encoder_resolution']
range_degrees = DXL_MODEL_TO_PARAMS[model_number]['range_degrees']
range_radians = math.radians(range_degrees)
rospy.set_param('dynamixel/%s/%d/encoder_resolution' %(self.port_namespace, motor_id), encoder_resolution)
rospy.set_param('dynamixel/%s/%d/range_degrees' %(self.port_namespace, motor_id), range_degrees)
rospy.set_param('dynamixel/%s/%d/range_radians' %(self.port_namespace, motor_id), range_radians)
rospy.set_param('dynamixel/%s/%d/encoder_ticks_per_degree' %(self.port_namespace, motor_id), encoder_resolution / range_degrees)
rospy.set_param('dynamixel/%s/%d/encoder_ticks_per_radian' %(self.port_namespace, motor_id), encoder_resolution / range_radians)
rospy.set_param('dynamixel/%s/%d/degrees_per_encoder_tick' %(self.port_namespace, motor_id), range_degrees / encoder_resolution)
rospy.set_param('dynamixel/%s/%d/radians_per_encoder_tick' %(self.port_namespace, motor_id), range_radians / encoder_resolution)
# keep some parameters around for diagnostics
self.motor_static_info[motor_id] = {}
self.motor_static_info[motor_id]['model'] = DXL_MODEL_TO_PARAMS[model_number]['name']
self.motor_static_info[motor_id]['firmware'] = self.dxl_io.get_firmware_version(motor_id)
self.motor_static_info[motor_id]['delay'] = self.dxl_io.get_return_delay_time(motor_id)
self.motor_static_info[motor_id]['min_angle'] = angles['min']
self.motor_static_info[motor_id]['max_angle'] = angles['max']
self.motor_static_info[motor_id]['min_voltage'] = voltages['min']
self.motor_static_info[motor_id]['max_voltage'] = voltages['max']
def __find_motors(self):
rospy.loginfo('%s: Pinging motor IDs %d through %d...' % (self.port_namespace, self.min_motor_id, self.max_motor_id))
self.motors = []
self.motor_static_info = {}
for motor_id in range(self.min_motor_id, self.max_motor_id + 1):
for trial in range(self.num_ping_retries):
try:
result = self.dxl_io.ping(motor_id)
except Exception as ex:
rospy.logerr('Exception thrown while pinging motor %d - %s' % (motor_id, ex))
continue
if result:
self.motors.append(motor_id)
break
if not self.motors:
rospy.logfatal('%s: No motors found.' % self.port_namespace)
sys.exit(1)
counts = defaultdict(int)
to_delete_if_error = []
for motor_id in self.motors:
for trial in range(self.num_ping_retries):
try:
model_number = self.dxl_io.get_model_number(motor_id)
self.__fill_motor_parameters(motor_id, model_number)
except Exception as ex:
rospy.logerr('Exception thrown while getting attributes for motor %d - %s' % (motor_id, ex))
if trial == self.num_ping_retries - 1: to_delete_if_error.append(motor_id)
continue
counts[model_number] += 1
break
for motor_id in to_delete_if_error:
self.motors.remove(motor_id)
rospy.set_param('dynamixel/%s/connected_ids' % self.port_namespace, self.motors)
status_str = '%s: Found %d motors - ' % (self.port_namespace, len(self.motors))
for model_number,count in counts.items():
if count:
model_name = DXL_MODEL_TO_PARAMS[model_number]['name']
status_str += '%d %s [' % (count, model_name)
for motor_id in self.motors:
if self.motor_static_info[motor_id]['model'] == model_name:
status_str += '%d, ' % motor_id
status_str = status_str[:-2] + '], '
rospy.loginfo('%s, initialization complete.' % status_str[:-2])
def __update_motor_states(self):
num_events = 50
rates = deque([float(self.update_rate)]*num_events, maxlen=num_events)
last_time = rospy.Time.now()
rate = rospy.Rate(self.update_rate)
while not rospy.is_shutdown() and self.running:
# get current state of all motors and publish to motor_states topic
motor_states = []
for motor_id in self.motors:
try:
state = self.dxl_io.get_feedback(motor_id)
if state:
motor_states.append(MotorState(**state))
if dynamixel_io.exception: raise dynamixel_io.exception
except dynamixel_io.FatalErrorCodeError, fece:
rospy.logerr(fece)
except dynamixel_io.NonfatalErrorCodeError, nfece:
self.error_counts['non_fatal'] += 1
rospy.logdebug(nfece)
except dynamixel_io.ChecksumError, cse:
self.error_counts['checksum'] += 1
rospy.logdebug(cse)
except dynamixel_io.DroppedPacketError, dpe:
self.error_counts['dropped'] += 1
rospy.logdebug(dpe.message)
except OSError, ose:
if ose.errno != errno.EAGAIN:
rospy.logfatal(errno.errorcode[ose.errno])
rospy.signal_shutdown(errno.errorcode[ose.errno])
if motor_states:
msl = MotorStateList()
msl.motor_states = motor_states
self.motor_states_pub.publish(msl)
self.current_state = msl
# calculate actual update rate
current_time = rospy.Time.now()
rates.append(1.0 / (current_time - last_time).to_sec())
self.actual_rate = round(sum(rates)/num_events, 2)
last_time = current_time
rate.sleep()
def __publish_diagnostic_information(self):
diag_msg = DiagnosticArray()
rate = rospy.Rate(self.diagnostics_rate)
while not rospy.is_shutdown() and self.running:
diag_msg.status = []
diag_msg.header.stamp = rospy.Time.now()
status = DiagnosticStatus()
status.name = 'Dynamixel Serial Bus (%s)' % self.port_namespace
status.hardware_id = 'Dynamixel Serial Bus on port %s' % self.port_name
status.values.append(KeyValue('Baud Rate', str(self.baud_rate)))
status.values.append(KeyValue('Min Motor ID', str(self.min_motor_id)))
status.values.append(KeyValue('Max Motor ID', str(self.max_motor_id)))
status.values.append(KeyValue('Desired Update Rate', str(self.update_rate)))
status.values.append(KeyValue('Actual Update Rate', str(self.actual_rate)))
status.values.append(KeyValue('# Non Fatal Errors', str(self.error_counts['non_fatal'])))
status.values.append(KeyValue('# Checksum Errors', str(self.error_counts['checksum'])))
status.values.append(KeyValue('# Dropped Packet Errors', str(self.error_counts['dropped'])))
status.level = DiagnosticStatus.OK
status.message = 'OK'
if self.actual_rate - self.update_rate < -5:
status.level = DiagnosticStatus.WARN
status.message = 'Actual update rate is lower than desired'
diag_msg.status.append(status)
for motor_state in self.current_state.motor_states:
mid = motor_state.id
status = DiagnosticStatus()
status.name = 'Robotis Dynamixel Motor %d on port %s' % (mid, self.port_namespace)
status.hardware_id = 'DXL-%d@%s' % (motor_state.id, self.port_namespace)
status.values.append(KeyValue('Model Name', str(self.motor_static_info[mid]['model'])))
status.values.append(KeyValue('Firmware Version', str(self.motor_static_info[mid]['firmware'])))
status.values.append(KeyValue('Return Delay Time', str(self.motor_static_info[mid]['delay'])))
status.values.append(KeyValue('Minimum Voltage', str(self.motor_static_info[mid]['min_voltage'])))
status.values.append(KeyValue('Maximum Voltage', str(self.motor_static_info[mid]['max_voltage'])))
status.values.append(KeyValue('Minimum Position (CW)', str(self.motor_static_info[mid]['min_angle'])))
status.values.append(KeyValue('Maximum Position (CCW)', str(self.motor_static_info[mid]['max_angle'])))
status.values.append(KeyValue('Goal', str(motor_state.goal)))
status.values.append(KeyValue('Position', str(motor_state.position)))
status.values.append(KeyValue('Error', str(motor_state.error)))
status.values.append(KeyValue('Velocity', str(motor_state.speed)))
status.values.append(KeyValue('Load', str(motor_state.load)))
status.values.append(KeyValue('Voltage', str(motor_state.voltage)))
status.values.append(KeyValue('Temperature', str(motor_state.temperature)))
status.values.append(KeyValue('Moving', str(motor_state.moving)))
if motor_state.temperature >= self.error_level_temp:
status.level = DiagnosticStatus.ERROR
status.message = 'OVERHEATING'
elif motor_state.temperature >= self.warn_level_temp:
status.level = DiagnosticStatus.WARN
status.message = 'VERY HOT'
else:
status.level = DiagnosticStatus.OK
status.message = 'OK'
diag_msg.status.append(status)
self.diagnostics_pub.publish(diag_msg)
rate.sleep()
if __name__ == '__main__':
try:
serial_proxy = SerialProxy()
serial_proxy.connect()
rospy.spin()
serial_proxy.disconnect()
except rospy.ROSInterruptException: pass
|
train_abstractive.py
|
#!/usr/bin/env python
"""
Main training workflow
"""
from __future__ import division
import argparse
import glob
import os
import random
import signal
import time
import torch
from pytorch_transformers import BertTokenizer
import distributed
from models import data_loader, model_builder
from models.data_loader import load_dataset
from models.loss import abs_loss
from models.model_builder import AbsSummarizer
from models.predictor import build_predictor
from models.trainer import build_trainer
from others.logging import logger, init_logger
model_flags = ['hidden_size', 'ff_size', 'heads', 'emb_size', 'enc_layers', 'enc_hidden_size', 'enc_ff_size',
'dec_layers', 'dec_hidden_size', 'dec_ff_size', 'encoder', 'ff_actv', 'use_interval']
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def train_abs_multi(args):
""" Spawns 1 process per GPU """
init_logger()
nb_gpu = args.world_size
mp = torch.multiprocessing.get_context('spawn')
# Create a thread to listen for errors in the child processes.
error_queue = mp.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Train with multiprocessing.
procs = []
for i in range(nb_gpu):
device_id = i
procs.append(mp.Process(target=run, args=(args,
device_id, error_queue,), daemon=True))
procs[i].start()
logger.info(" Starting process pid: %d " % procs[i].pid)
error_handler.add_child(procs[i].pid)
for p in procs:
p.join()
def run(args, device_id, error_queue):
""" run process """
setattr(args, 'gpu_ranks', [int(i) for i in args.gpu_ranks])
try:
gpu_rank = distributed.multi_init(device_id, args.world_size, args.gpu_ranks)
print('gpu_rank %d' % gpu_rank)
if gpu_rank != args.gpu_ranks[device_id]:
raise AssertionError("An error occurred in \
Distributed initialization")
train_abs_single(args, device_id)
except KeyboardInterrupt:
pass # killed by parent, do nothing
except Exception:
# propagate exception to parent process, keeping original traceback
import traceback
error_queue.put((args.gpu_ranks[device_id], traceback.format_exc()))
class ErrorHandler(object):
"""A class that listens for exceptions in children processes and propagates
the tracebacks to the parent process."""
def __init__(self, error_queue):
""" init error handler """
import signal
import threading
self.error_queue = error_queue
self.children_pids = []
self.error_thread = threading.Thread(
target=self.error_listener, daemon=True)
self.error_thread.start()
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
""" error handler """
self.children_pids.append(pid)
def error_listener(self):
""" error listener """
(rank, original_trace) = self.error_queue.get()
self.error_queue.put((rank, original_trace))
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, signalnum, stackframe):
""" signal handler """
for pid in self.children_pids:
os.kill(pid, signal.SIGINT) # kill children processes
(rank, original_trace) = self.error_queue.get()
msg = """\n\n-- Tracebacks above this line can probably
be ignored --\n\n"""
msg += original_trace
raise Exception(msg)
def validate_abs(args, device_id):
timestep = 0
if (args.test_all):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
xent_lst = []
for i, cp in enumerate(cp_files):
step = int(cp.split('.')[-2].split('_')[-1])
if (args.test_start_from != -1 and step < args.test_start_from):
xent_lst.append((1e6, cp))
continue
xent = validate(args, device_id, cp, step)
xent_lst.append((xent, cp))
max_step = xent_lst.index(min(xent_lst))
if (i - max_step > 10):
break
xent_lst = sorted(xent_lst, key=lambda x: x[0])[:5]
logger.info('PPL %s' % str(xent_lst))
for xent, cp in xent_lst:
step = int(cp.split('.')[-2].split('_')[-1])
test_abs(args, device_id, cp, step)
else:
while (True):
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (not os.path.getsize(cp) > 0):
time.sleep(60)
continue
if (time_of_cp > timestep):
timestep = time_of_cp
step = int(cp.split('.')[-2].split('_')[-1])
validate(args, device_id, cp, step)
test_abs(args, device_id, cp, step)
cp_files = sorted(glob.glob(os.path.join(args.model_path, 'model_step_*.pt')))
cp_files.sort(key=os.path.getmtime)
if (cp_files):
cp = cp_files[-1]
time_of_cp = os.path.getmtime(cp)
if (time_of_cp > timestep):
continue
else:
time.sleep(300)
def validate(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
valid_iter = data_loader.Dataloader(args, load_dataset(args, 'valid', shuffle=False),
args.batch_size, device,
shuffle=False, is_test=False)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
valid_loss = abs_loss(model.generator, symbols, model.vocab_size, train=False, device=device)
trainer = build_trainer(args, device_id, model, None, valid_loss)
stats = trainer.validate(valid_iter, step)
return stats.xent()
def test_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def test_text_abs(args, device_id, pt, step):
device = "cpu" if args.visible_gpus == '-1' else "cuda"
if (pt != ''):
test_from = pt
else:
test_from = args.test_from
logger.info('Loading checkpoint from %s' % test_from)
checkpoint = torch.load(test_from, map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
print(args)
model = AbsSummarizer(args, device, checkpoint)
model.eval()
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.test_batch_size, device,
shuffle=False, is_test=True)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
predictor = build_predictor(args, tokenizer, symbols, model, logger)
predictor.translate(test_iter, step)
def baseline(args, cal_lead=False, cal_oracle=False):
test_iter = data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False),
args.batch_size, 'cpu',
shuffle=False, is_test=True)
trainer = build_trainer(args, '-1', None, None, None)
#
if (cal_lead):
trainer.test(test_iter, 0, cal_lead=True)
elif (cal_oracle):
trainer.test(test_iter, 0, cal_oracle=True)
def train_abs(args, device_id):
if (args.world_size > 1):
train_abs_multi(args)
else:
train_abs_single(args, device_id)
def train_abs_single(args, device_id):
init_logger(args.log_file)
logger.info(str(args))
device = "cpu" if args.visible_gpus == '-1' else "cuda"
logger.info('Device ID %d' % device_id)
logger.info('Device %s' % device)
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
torch.cuda.manual_seed(args.seed)
if args.train_from != '':
logger.info('Loading checkpoint from %s' % args.train_from)
checkpoint = torch.load(args.train_from,
map_location=lambda storage, loc: storage)
opt = vars(checkpoint['opt'])
for k in opt.keys():
if (k in model_flags):
setattr(args, k, opt[k])
else:
checkpoint = None
if (args.load_from_extractive != ''):
logger.info('Loading bert from extractive model %s' % args.load_from_extractive)
bert_from_extractive = torch.load(args.load_from_extractive, map_location=lambda storage, loc: storage)
bert_from_extractive = bert_from_extractive['model']
else:
bert_from_extractive = None
torch.manual_seed(args.seed)
random.seed(args.seed)
torch.backends.cudnn.deterministic = True
def train_iter_fct():
if args.is_debugging:
# print("YES it is debugging")
return data_loader.Dataloader(args, load_dataset(args, 'test', shuffle=False), args.batch_size, device,
shuffle=False, is_test=False)
else:
return data_loader.Dataloader(args, load_dataset(args, 'train', shuffle=True), args.batch_size, device,
shuffle=True, is_test=False)
model = AbsSummarizer(args, device, checkpoint, bert_from_extractive)
if (args.sep_optim):
optim_bert = model_builder.build_optim_bert(args, model, checkpoint)
optim_dec = model_builder.build_optim_dec(args, model, checkpoint)
optim = [optim_bert, optim_dec]
else:
optim = [model_builder.build_optim(args, model, checkpoint)]
logger.info(model)
# tokenizer = BertTokenizer.from_pretrained('/home/ybai/projects/PreSumm/PreSumm/temp/', do_lower_case=True, cache_dir=args.temp_dir)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True, cache_dir=args.temp_dir)
symbols = {'BOS': tokenizer.vocab['[unused0]'], 'EOS': tokenizer.vocab['[unused1]'],
'PAD': tokenizer.vocab['[PAD]'], 'EOQ': tokenizer.vocab['[unused2]']}
train_loss = abs_loss(model.generator, symbols, model.vocab_size, device, train=True,
label_smoothing=args.label_smoothing)
trainer = build_trainer(args, device_id, model, optim, train_loss)
trainer.train(train_iter_fct, args.train_steps)
|
main_backup.py
|
from flask import Flask, render_template, request, Response, send_from_directory, redirect, url_for, jsonify
from flask_sslify import SSLify
import os
from PIL import Image
import json
import base64
import cv2
import numpy as np
#from src import classifier
import web_face_recognition
import time
import subprocess as sp
import requests
import threading
from threading import Thread
app = Flask(__name__)
#sslify = SSLify(app)
save_path = str('/work/MachineLearning/my_dataset/train_aligned/')
running = False
extProc = None
queue = []
# added to put object in JSON
class Object(object):
def __init__(self):
self.name = "Queue list"
def toJSON(self):
return json.dumps(self.__dict__)
#for CORS
# @app.before_first_request
# def activate_job():
# def check_queue():
# global running
# global extProc
# global queue
# while True:
# if queue and extProc is not None:
# if sp.Popen.poll(extProc) is not None:
# queue.pop(0)
# print('start new task : ' + str(queue[0].name))
# extProc = sp.Popen('/work/MachineLearning/facenet_m360/retrain.sh', shell=True)
# else:
# print('still running old taks : ' + str(queue[0].name))
# else:
# running = False
# extProc = None
# print('no task')
# time.sleep(3)
# thread = threading.Thread(target=check_queue)
# thread.start()
#@app.before_request
#def before_request():
# if not request.url.startswith('http://'):
# url = request.url.replace('http://', 'https://', 1)
# code = 301
# return redirect(url, code=code)
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization')
response.headers.add('Access-Control-Allow-Methods', 'GET,POST')
return response
@app.route('/')
def index():
return Response(os.getcwd())
@app.route('/app')
def remote():
return Response(open('/work/MachineLearning/facenet_m360/contributed/templates/index.html').read(), mimetype="text/html")
@app.route('/recognition')
def start():
return Response(open('/work/MachineLearning/facenet_m360/contributed/templates/recognition.html').read(), mimetype="text/html")
@app.route('/enrol', methods=['POST'])
def enrol():
global queue
global running
global extProc
try:
if request.method == 'POST':
print('POST /enrol success!')
image_file = json.loads(request.data)
name = str(image_file['id'])
if not os.path.exists(os.path.join(save_path+name)):
os.mkdir(save_path+name)
count = 0
for images in image_file['data']:
filename = name + str(count)
img = base64.b64decode(images)
img_array = np.fromstring(img, np.uint8)
imgdata = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
#faces = web_face_recognition.detect(imgdata)
#if len(faces) == 1:
# frame = faces[0].image
rgb_frame = imgdata[:, :, ::-1]
img = Image.fromarray(rgb_frame, "RGB")
if img is not None:
img.save(os.path.join(save_path+name+'/'+filename+".jpg"))
count += 1
person = Object()
person.name = name
queue.append(person)
if running is False:
running = True
web_face_recognition.enrol(incremental=True)
return redirect(url_for('getStatus', name=name))
except Exception as e:
print('POST /enrol error : %s' % e)
return e
@app.route('/getStatus')
def getStatus():
try:
global running
global extProc
global queue
req = str(request.args.get('name'))
return json.dumps([ob.__dict__ for ob in queue])
except Exception as e:
print('Enrolling failed : %s' % e)
return e
@app.route('/recognition_result', methods=['POST'])
def face_recognition():
try:
if request.method == 'POST':
start = time.time()
print('POST /recognition_result success!')
# web_face_recognition.debug()
image_file = json.loads(request.data)
img = base64.b64decode(image_file['data'])
img_array = np.fromstring(img, np.uint8)
imgdata = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
boxes = web_face_recognition.recognize(imgdata)
end = time.time()
print(end - start)
return boxes
except Exception as e:
print('Recognition failed : %s' % e)
return e
@app.route('/loading.gif')
def loading():
return send_from_directory(os.path.join(app.root_path, 'templates'),
'loading.gif', mimetype='image/gif')
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'),
'favicon.ico', mimetype='image/vnd.microsoft.icon')
# def start_runner():
# def start_loop():
# not_started = True
# while not_started:
# print('In start loop')
# try:
# r = requests.get('http://localhost:8081/app')
# #r = requests.get('https://ml.deekie.com/enrol/app')
# if r.status_code == 200:
# print('Server started, quiting start_loop')
# not_started = False
# print(r.status_code)
# except:
# print('Server not yet started')
# time.sleep(2)
# print('Started runner')
# thread = threading.Thread(target=start_loop)
# thread.start()
if __name__ == '__main__':
#start_runner()
app.run(host="localhost", port=8081, threaded=True)
#app.run(host='0.0.0.0', port=8081, threaded=True, debug=True, ssl_context="adhoc")
|
degrunk-data.py
|
from code import compile_command
import re
from sys import maxsize
from binance.client import Client
import time
import csv
import os
import fire
import random
import queue
client1 = Client('igEARWI7LNtjhzHa3zrNAMtLlLtUjnNb3VFHSHCf5Nlnga4h3vAzthAQKe8wLYlC', 'BM8EVK6TI5kHKQ7sORXpkwHet8mtq8alhOV5JJQ25kAIunKL7YkGgfc80inJad0I')
client2 = Client('FCbtPk3mQj2IqpFbvR5rgPdXgZL8O3s4634zP5thOb0ob6MuiG7sxsvdzVy3MSe2','aljR5fx3pHWSEc6JRkN0YMNlNk28rdM5CBE1XCzUvi8MQy4qoG5q7T5QAD3V9E1w')
client3 = Client('yNt4nLpNc4sg4l7ZwFf3uqBbRq2YidMzIrrmAqWNjQMGCcPvTt66CXMl4S7LGyqO','EWx8Fh5VjQrE6PGA9ywkIiAuOs0VX9Hk22dsBgLFdV4EqkE765ov5GxFDCpdbbr0')
client4 = Client('zS74mTu25foQTQP2ttuH6gRN2cfbSnBzuIDMlktZRVvCQvHpq4G3CvQeHEszlSjH','MFxQEWT28n7UQZ3JK6XsXcsWsRp0l8dYPvrZbQOxN9jK5ez11gqnXDt4aCaRJUFO')
client5 = Client('xVHPQDvR2mvDITO9pRi2yFxhxmv1AyqS8cCxJepbH74Kt6XeB9Zn5lTmgbauSg1d','sGx5MBrbi2iMpZKTvwurEuHA1OYPqSwT9DeJjC8ppkmWSokTDjZsbRjmq58nBG1c')
client6 = Client('J9HNtB1mXWiqwaOLyxOQTB6yiy6Vg7ZfLOXdTdYPofc2hI8XDBcuc7yeIv02EtUx','P1aQFvKuFyGOFgnufWKr61o0lPWuQjt1ZwzDDzZ1RsxMuUFDUiqx5uqI4JlT6sPJ')
client7 = Client('ZQCSfbLRqUQifBmltgLf30Lm9gHSiRovZVyAhvsxi7nKA3TEC9ehsnsl1sdkqSct','iIZOWSTGfTsdA1krWEz2sGU0pRjyGVzFUinMkq6eGwgIf45dorp6xCuxIHhXFQdt')
client8 = Client('DocIMuZHP2x0TxprspUUX0eJSDwXsDkhKLvSa0TF6bKH69otCpSLXYqnKPLQsdzv','j8KqFMMET2QzobtX82pe7VhjuA2eWz9ucc088RHgzKLD16QpHFUAVm4QZ4naIDWr')
from pycoingecko import CoinGeckoAPI
import json
from binance.client import Client
import binance.enums
from datetime import date
import dateparser
import pytz
from datetime import datetime
import threading
from threading import Lock
s_print_lock = Lock()
def s_print(*a, **b):
"""Thread safe print function"""
with s_print_lock:
print(*a, **b)
def date_to_milliseconds(date_str):
"""Convert UTC date to milliseconds
If using offset strings add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
See dateparse docs for formats http://dateparser.readthedocs.io/en/latest/
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch).total_seconds() * 1000.0)
def date_to_milliseconds1(date_str):
"""Convert UTC date to milliseconds
If using offset strings add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
See dateparse docs for formats http://dateparser.readthedocs.io/en/latest/
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch).total_seconds() * 1000.0)
def make_dirs():
dirs = os.path.dirname(os.path.realpath(__file__))
os.mkdir(os.path.join(dirs, 'data-download'))
os.mkdir(os.path.join(dirs, 'data-download/day-unprocessed'))
os.mkdir(os.path.join(dirs, 'data-download/1min-unprocessed'))
os.mkdir(os.path.join(dirs, 'data-download/day-futures'))
os.mkdir(os.path.join(dirs, 'data-download/1min-futures'))
os.mkdir(os.path.join(dirs, 'data-download/day-processed'))
os.mkdir(os.path.join(dirs, 'data-download/1min-processed'))
def get_exchange_info():
dd = client8.get_exchange_info()
return dd
def newest(path):
files = os.listdir(path)
paths = [os.path.join(path, basename) for basename in files]
#try:
max(paths, key=os.path.getctime)
#except:
#print('skippin')
return
def checkprog(pth):
outlist = []
qqq = os.listdir(pth)
qqq = os.listdir(pth)
for qqi in qqq:
qqi = qqi.split('.')[0]
outlist.append(qqi)
return outlist
def getstate(outlist, exchangeinfo, tokennames = ['USDT', 'BUSD']):
currlist = []
for ab in exchangeinfo['symbols']:
if ab['quoteAsset'] == tokennames[0]:
if not ab['symbol'] in outlist:
if not "BEAR" in ab['symbol']:
if not "BULL" in ab['symbol']:
currlist.append(ab['symbol'])
if ab['quoteAsset'] == tokennames[1]:
if not ab['symbol'] in outlist:
if not "BEAR" in ab['symbol']:
if not "BULL" in ab['symbol']:
currlist.append(ab['symbol'])
print("number of tokens:", len(currlist))
return currlist
def getcgdata(start, end, token, aa,cg):
ccdcdc = date_to_milliseconds(start)//1000 + 86400
ccdcdd = date_to_milliseconds(end)//1000 + 86400 + 86400
ccdccc = date_to_milliseconds(end)//1000
tokid = {}
tokidbsd = {}
b = token.split('.')[0].split('USDT')[0].lower()
bt = token.split('.')[0].split('BUSD')[0].lower()
for tokn in aa:
if tokn['symbol'] == b:
tokid[token] = tokn['id']
for tokn in aa:
if tokn['symbol'] == bt:
tokidbsd[token] = tokn['id']
try:
try:
cval = cg.get_coin_market_chart_range_by_id(id=tokid[token], vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except:
cval = cg.get_coin_market_chart_range_by_id(id=tokidbsd[token], vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except:
try:
cval = cg.get_coin_market_chart_range_by_id(id=token.split('USDT')[0].lower(), vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except:
try:
cval = cg.get_coin_market_chart_range_by_id(id=token.split('BUSD')[0].lower(), vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except Exception as e:
try:
if e.response.status_code == 429:
#s_print('err 429', token, 'waiting for 60s')
time.sleep(60)
try:
cval = cg.get_coin_market_chart_range_by_id(id=token.split('USDT')[0].lower(), vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except:
cval = cg.get_coin_market_chart_range_by_id(id=token.split('BUSD')[0].lower(), vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except:
#s_print('skipping', token)
return
#s_print('grabbed', token)
return cval
def download(start, end, interval, currlist, pth, type, withcgdata=True, withaggtrades=False, aggtradelimit=0):
from binance.enums import HistoricalKlinesType
if withcgdata == True:
cg = CoinGeckoAPI()
aa = cg.get_coins_list()
for token in currlist:
if withcgdata == True:
ccdcdc = date_to_milliseconds(start)//1000
ccdcdd = date_to_milliseconds(end)//1000
ccdccc = date_to_milliseconds(end)//1000
tokid = {}
b = token.split('.')[0].split('USDT')[0].lower()
for tokn in aa:
if tokn['symbol'] == b:
tokid[token] = tokn['id']
try:
cval = cg.get_coin_market_chart_range_by_id(id=tokid[token], vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except:
try:
cval = cg.get_coin_market_chart_range_by_id(id=token.split('USDT')[0].lower(), vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except Exception as e:
try:
if e.response.status_code == 429:
time.sleep(60)
cval = cg.get_coin_market_chart_range_by_id(id=token.split('USDT')[0].lower(), vs_currency="usd", from_timestamp=ccdcdc, to_timestamp=ccdcdd)
except:
continue
klines = client8.get_historical_klines(token, interval, start, end, klines_type= HistoricalKlinesType.SPOT)
if not klines == []:
klines.pop(-1)
else:
continue
if len(cval['market_caps']) != 0:
d1 = [item[1] for item in cval['market_caps']]
else:
d1 = []
d1.extend([0] * len(klines))
if len(cval['total_volumes']) != 0:
d2 = [item[1] for item in cval['total_volumes']]
else:
d2 = []
d2.extend([0] * len(klines))
if len(cval['prices']) != 0:
d3 = [item[1] for item in cval['prices']]
else:
d3 = []
d3.extend([0] * len(klines))
if withcgdata == True:
with open(os.path.join(pth,'{}.csv'.format(token)), 'w', newline='') as f:
writerc = csv.writer(f)
if withaggtrades == False:
dic = ['date', 'open', 'high','low', 'close','volume','symbol','QAV','numberoftrades','takerbuyBAV','takerbuyQAV','market_cap', 'total_volume','price_from_coingecko', 'factor']
writerc.writerow(dic)
gct = 0
it = 0
prevdate = 0
testvar = len(klines)//60
testvar2 = (len(klines)+ 60)//60
if type == '1min':
if len(d1) > (len(klines)+ 60)//60:
cnct = len(d1) - ((len(klines)//60) + 1)
mnx = 0
elif len(d1) == (len(klines) + 60)//60:
cnct = 0
mnx = 0
elif len(d1) == len(klines)//60:
cnct = 0
mnx = 0
else:
cnct = 0
mnx = len(klines)//60 - len(d1)
if type == 'day':
if len(d1) > (len(klines) + 1):
cnct = len(d1) - ((len(klines)) + 1)
mnx = 0
elif len(d1) == (len(klines) + 1):
cnct = 0
mnx = 0
elif len(d1) == len(klines):
cnct = 0
mnx = 1
else:
cnct = 0
mnx = (len(klines) + 1) - len(d1)
templist = []
for a in klines:
if withaggtrades == True:
olis = []
if type == 'day':
agg_trades = client8.aggregate_trade_iter(symbol=token, start_str=a[0], endingm=a[6], day=True)
if type == '1min':
agg_trades = client8.aggregate_trade_iter(symbol=token, start_str=a[0], endingm=a[6], day=False)
dic = ['date', 'open', 'high','low', 'close','volume','symbol','QAV','numberoftrades','takerbuyBAV','takerbuyQAV','market_cap', 'total_volume','price_from_coingecko', 'factor']
for enumr, qqq in enumerate(range(1000)):
dic.append('p_{}'.format(enumr))
dic.append('q_{}'.format(enumr))
dic.append('m_{}'.format(enumr))
dic.append('M_{}'.format(enumr))
writerc.writerow(dic)
for bb in agg_trades:
for bbb in bb:
if bbb == []:
continue
for bbbbb in bbb:
olis.append(bbbbb)
fnlis = sorted(olis, key = lambda i: i['q'], reverse=True)[0:1000]
if type == 'day':
if datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d") != prevdate:
cnct = cnct + 1
prevdate = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d")
if mnx != None:
if mnx != 0:
mnx = mnx -1
cnct = cnct -1
dd = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%Y-%m-%d")
b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct],d2[cnct],d3[cnct],1]
if withaggtrades == True:
for enumr, qqq in enumerate(range(1000)):
try:
b.append(fnlis[enumr]['p'])
b.append(fnlis[enumr]['q'])
b.append(fnlis[enumr]['m'])
b.append(fnlis[enumr]['M'])
except:
b.append(0)
b.append(0)
b.append(0)
b.append(0)
templist.append(b)
gct = gct + 1
if gct == 10000:
it = it + 1
gct= 0
#print('{} Iteration #: '.format(token), it)
writerc.writerows(templist)
templist = []
writerc.writerows(templist)
templist = []
if type == '1min':
dte = int(datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%H"))
if dte != prevdate:
cnct = cnct + 1
prevdate = int(datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%H"))
if mnx != None:
if mnx != 0:
mnx = mnx -1
cnct = cnct -1
dd = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%Y-%m-%d %H:%M:%S")
b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct],d2[cnct],d3[cnct],1]
if withaggtrades == True:
for enumr, qqq in enumerate(range(1000)):
try:
b.append(fnlis[enumr]['p'])
b.append(fnlis[enumr]['q'])
b.append(fnlis[enumr]['m'])
b.append(fnlis[enumr]['M'])
except:
b.append(0)
b.append(0)
b.append(0)
b.append(0)
templist.append(b)
gct = gct + 1
if gct == 10000:
it = it + 1
gct= 0
#print('{} Iteration #: '.format(token), it)
writerc.writerows(templist)
templist = []
writerc.writerows(templist)
templist = []
else:
with open(os.path.join(pth,'{}.csv'.format(token)), 'w', newline='') as f:
writerc = csv.writer(f)
dic = ['date', 'open', 'high','low', 'close','volume','symbol','QAV','numberoftrades','takerbuyBAV','takerbuyQAV','factor']
writerc.writerow(dic)
gct = 0
it = 0
prevdate = 0
for a in klines:
if datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d") != prevdate:
cnct = cnct + 1
prevdate = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d")
if type == 'day':
dd = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%Y-%m-%d")
if type == '1min':
dd = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%Y-%m-%d %H:%M:%S")
b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],1]
writerc.writerow(b)
gct = gct + 1
if gct == 1000:
it = it + 1
gct= 0
#print('{} Iteration #: '.format(token), it)
writerc.writerows(templist)
def download1(start, end, interval, q, pth, type, client, pq, aa, withcgdata=True, withaggtrades=False, aggtradelimit=0):
from binance.enums import HistoricalKlinesType
# if withcgdata == True:
if type == '1min':
token = q.get()
if type == 'day':
token = q
if token == None:
return
if type == '1min':
order = pq.get()
if type == 'day':
order = pq
#print(order)
if withcgdata == True:
cval = getcgdata(start, end, token, aa,cg)
if cval == None:
if type == '1min':
pq.put(order)
return
if type == 'day':
return
try:
klines = client.get_historical_klines(token, interval, start, end, klines_type= HistoricalKlinesType.SPOT,barorder=order)
except:
try:
time.sleep(random.randint(15,180))
klines = client.get_historical_klines(token, interval, start, end, klines_type= HistoricalKlinesType.SPOT,barorder=order)
except:
pq.put(order)
return
if type == '1min':
if not klines == []:
klines.pop(-1)
else:
pq.put(order)
return
if len(cval['market_caps']) != 0:
d1 = [item[1] for item in cval['market_caps']]
else:
d1 = []
d1.extend([0] * (len(klines)+1))
if len(cval['total_volumes']) != 0:
d2 = [item[1] for item in cval['total_volumes']]
else:
d2 = []
d2.extend([0] * (len(klines)+1))
if len(cval['prices']) != 0:
d3 = [item[1] for item in cval['prices']]
else:
d3 = []
d3.extend([0] * (len(klines)+1))
if withcgdata == True:
with open(os.path.join(pth,'{}.csv'.format(token)), 'w', newline='') as f:
writerc = csv.writer(f)
if withaggtrades == False:
dic = ['date', 'open', 'high','low', 'close','volume','symbol','QAV','numberoftrades','takerbuyBAV','takerbuyQAV','market_cap', 'total_volume','price_from_coingecko', 'factor']
writerc.writerow(dic)
gct = 0
it = 0
if klines == []:
return
prevdate = datetime.fromtimestamp(klines[0][0]/1000.0,tz=pytz.utc).strftime("%d")
testvar = len(d1) - 2
testvar2 = (len(klines))//60
cctf = testvar2/testvar
if type == '1min':
if len(d1) > (len(klines)//60)//24:
cnct = (len(d1)) - (len(klines)//60)//24
cnct1 = (len(d1)) - ((len(klines)//60)//24 + 1)
#cnct1 = (len(d1) -2) - (len(klines)//60)//24
#cnct2 = (len(d1) -3) - (len(klines)//60)//24
mnx = 0
elif (len(klines)//60)//24 >= len(d1):
cnct = 0
mnx = (((len(klines))//60)//24 + 2) - len(d1)
if type == 'day':
if len(d1) > len(klines):
cnct = (len(d1)) - len(klines)
#cnct1 = (len(d1)) - len(klines)
#cnct2 = (len(d1) -3) - len(klines)
mnx = 0
elif len(klines) >= len(d1):
cnct = 0
mnx = (len(klines) + 1) - len(d1)
templist = []
for enm, a in enumerate(klines):
if withaggtrades == True:
olis = []
if type == 'day':
agg_trades = client.aggregate_trade_iter(symbol=token, start_str=a[0], endingm=a[6], day=True)
if type == '1min':
agg_trades = client.aggregate_trade_iter(symbol=token, start_str=a[0], endingm=a[6], day=False)
dic = ['date', 'open', 'high','low', 'close','volume','symbol','QAV','numberoftrades','takerbuyBAV','takerbuyQAV','market_cap', 'total_volume','price_from_coingecko', 'factor']
for enumr, qqq in enumerate(range(1000)):
dic.append('p_{}'.format(enumr))
dic.append('q_{}'.format(enumr))
dic.append('m_{}'.format(enumr))
dic.append('M_{}'.format(enumr))
writerc.writerow(dic)
for bb in agg_trades:
for bbb in bb:
if bbb == []:
continue
for bbbbb in bbb:
olis.append(bbbbb)
fnlis = sorted(olis, key = lambda i: i['q'], reverse=True)[0:1000]
if type == 'day':
if datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d") != prevdate:
cnct = cnct + 1
prevdate = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d")
if mnx != None:
if mnx != 0:
mnx = mnx -1
cnct = cnct -1
dd = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%Y-%m-%d")
# try:
cnct_temp = cnct
for t in range(cnct+1):
try:
b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct_temp],d2[cnct_temp],d3[cnct_temp],1]
break
except:
cnct_temp = cnct_temp -1
if cnct_temp == 0:
return
continue
# except:
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct],d2[cnct],d3[cnct],1]
# print('for', token, 'unsing cnct1')
# except:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct2],d2[cnct2],d3[cnct2],1]
# print('for', token, 'unsing cnct2')
if withaggtrades == True:
for enumr, qqq in enumerate(range(1000)):
try:
b.append(fnlis[enumr]['p'])
b.append(fnlis[enumr]['q'])
b.append(fnlis[enumr]['m'])
b.append(fnlis[enumr]['M'])
except:
b.append(0)
b.append(0)
b.append(0)
b.append(0)
templist.append(b)
gct = gct + 1
if gct == 10000:
it = it + 1
gct= 0
#print('{} Iteration #: '.format(token), it)
writerc.writerows(templist)
templist = []
if type == '1min':
dte = int(datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d"))
if dte != prevdate:
cnct = cnct + 1
prevdate = int(datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d"))
if mnx != None:
if mnx != 0:
mnx = mnx -1
cnct = cnct -1
dd = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%Y-%m-%d %H:%M:%S")
#b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct],d2[cnct],d3[cnct],1]
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct+3],d2[cnct+3],d3[cnct+3],1]
# except:
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct+2],d2[cnct+2],d3[cnct+2],1]
# except:
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct+1],d2[cnct+1],d3[cnct+1],1]
# except:
cnct_temp = cnct
for t in range(cnct):
try:
b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct_temp],d2[cnct_temp],d3[cnct_temp],1]
break
except:
cnct_temp = cnct_temp -1
if cnct_temp == 0:
return
continue
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct],d2[cnct],d3[cnct],1]
# except:
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct1],d2[cnct1],d3[cnct1],1]
# except:
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct1 - 1],d2[cnct1 - 1],d3[cnct1 - 1],1]
# except:
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct1 - 2],d2[cnct1 - 2],d3[cnct1 - 2],1]
# except:
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct1 - 3],d2[cnct1 - 3],d3[cnct1 - 3],1]
# except:
# try:
# b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],d1[cnct1 - 4],d2[cnct1 - 4],d3[cnct1 - 4],1]
# except:
# pq.put(order)
# return
if withaggtrades == True:
for enumr, qqq in enumerate(range(1000)):
try:
b.append(fnlis[enumr]['p'])
b.append(fnlis[enumr]['q'])
b.append(fnlis[enumr]['m'])
b.append(fnlis[enumr]['M'])
except:
b.append(0)
b.append(0)
b.append(0)
b.append(0)
try:
templist.append(b)
except:
return
gct = gct + 1
if gct == 10000:
it = it + 1
gct= 0
#print('{} Iteration #: '.format(token), it)
writerc.writerows(templist)
templist = []
writerc.writerows(templist)
templist = []
else:
with open(os.path.join(pth,'{}.csv'.format(token)), 'w', newline='') as f:
writerc = csv.writer(f)
dic = ['date', 'open', 'high','low', 'close','volume','symbol','QAV','numberoftrades','takerbuyBAV','takerbuyQAV','factor']
writerc.writerow(dic)
gct = 0
it = 0
prevdate = 0
templist = []
for a in klines:
if datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d") != prevdate:
cnct = cnct + 1
prevdate = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%d")
if type == 'day':
dd = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%Y-%m-%d")
if type == '1min':
dd = datetime.fromtimestamp(a[0]/1000.0,tz=pytz.utc).strftime("%Y-%m-%d %H:%M:%S")
b = [dd, a[1], a[2], a[3], a[4], a[5],token,a[7],a[8],a[9],a[10],1]
writerc.writerow(b)
gct = gct + 1
if gct == 1000:
it = it + 1
gct= 0
#print('{} Iteration #: '.format(token), it)
writerc.writerows(templist)
def takehalf(v, list):
length = len(list)
middle_index = length//2
if v == 1:
first_half = list[:middle_index]
return first_half
if v == 2:
second_half = list[middle_index:]
return second_half
def splitlist(x, list):
x = len(list)//x + 1
final_list= lambda test_list, x: [test_list[i:i+x] for i in range(0, len(test_list), x)]
output=final_list(list, x)
return output
def startdownload_day(withcgdata=True, withaggtrades=False, aggtradelimit=0, run=True, ver=None):
if run == True:
global cg
cg = CoinGeckoAPI()
try:
aa = cg.get_coins_list()
except:
time.sleep(120)
aa = cg.get_coins_list()
dirs = os.path.dirname(os.path.realpath(__file__))
if withcgdata == True:
pth = os.path.join(dirs, 'data-download/day-unprocessed')
else:
pth = os.path.join(dirs, 'data-download/day-processed')
exchg = get_exchange_info()
currlist = getstate(checkprog(pth),exchg)
q = queue.SimpleQueue()
for item in currlist:
q.put(item)
pq = queue.SimpleQueue()
for itm in range(4):
pq.put(itm)
start = "21 Dec, 2010"
end = "4 Jan, 2022"
interval = Client.KLINE_INTERVAL_1DAY
if not ver == None:
currlist = takehalf(ver, currlist)
download(start,end,interval,currlist,pth,'day',withaggtrades=False,aggtradelimit=0)
if ver == None:
# l1,l2,l3,l4 = splitlist(4,currlist)
for a in currlist:
download1(start,end,interval,a,pth,'day',client1, 0, aa, withaggtrades=False,aggtradelimit=0)
# t1 = threading.Thread(target=download1, args=(start,end,interval,a,pth,'day',client1, 0))
# t2 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'day',client2, pq))
# t3 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'day',client3, pq))
# t4 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'day',client4, pq))
# t1.start()
# time.sleep(10)
# t2.start()
# time.sleep(10)
# t3.start()
# time.sleep(10)
# t4.start()
# t1.join()
# t2.join()
# t3.join()
# t4.join()
def startdownload_1min(withcgdata=True, withaggtrades=False, aggtradelimit=0, run=False, ver=None):
global cg
if run == True:
cg = CoinGeckoAPI()
try:
aa = cg.get_coins_list()
except:
time.sleep(120)
aa = cg.get_coins_list()
dirs = os.path.dirname(os.path.realpath(__file__))
if withcgdata == True:
pth = os.path.join(dirs, 'data-download/1min-unprocessed')
else:
pth = os.path.join(dirs, 'data-download/1min-processed')
exchg = get_exchange_info()
currlist = getstate(checkprog(pth),exchg)
start = "21 Dec, 2010"
end = "5 Jan, 2022"
q = queue.SimpleQueue()
for item in currlist:
q.put(item)
pq = queue.SimpleQueue()
for itm in range(8):
pq.put(itm)
interval = Client.KLINE_INTERVAL_1MINUTE
if not ver == None:
currlist = takehalf(ver, currlist)
download(start,end,interval,currlist,pth,'1min',withaggtrades=False,aggtradelimit=0)
if ver == None:
#l1,l2,l3,l4,l5,l6,l7,l8 = splitlist(8,currlist)
for a in (range((len(currlist)//8)+ 1)):
t1 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'1min',client1, pq,aa))
t2 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'1min',client2, pq,aa))
t3 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'1min',client3, pq,aa))
t4 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'1min',client4, pq,aa))
t5 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'1min',client5, pq,aa))
t6 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'1min',client6, pq,aa))
t7 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'1min',client7, pq,aa))
t8 = threading.Thread(target=download1, args=(start,end,interval,q,pth,'1min',client8, pq,aa))
t1.start()
time.sleep(10)
t2.start()
time.sleep(10)
t3.start()
time.sleep(10)
t4.start()
time.sleep(10)
t5.start()
time.sleep(10)
t6.start()
time.sleep(10)
t7.start()
time.sleep(10)
t8.start()
t1.join()
t2.join()
t3.join()
t4.join()
t5.join()
t6.join()
t7.join()
t8.join()
def startdownload_1min_nothread(name, withcgdata=True, withaggtrades=False, aggtradelimit=0, run=False, ver=None):
if run == True:
dirs = os.path.dirname(os.path.realpath(__file__))
if withcgdata == True:
pth = os.path.join(dirs, 'data-download/1min-unprocessed')
else:
pth = os.path.join(dirs, 'data-download/1min-processed')
exchg = get_exchange_info()
currlist = getstate(checkprog(pth),exchg)
start = "5 Jan, 2021"
end = "5 Jan, 2022"
interval = Client.KLINE_INTERVAL_1MINUTE
if not ver == None:
currlist = takehalf(ver, currlist)
download(start,end,interval,currlist,pth,'1min',withaggtrades=False,aggtradelimit=0)
if ver == None:
l1,l2,l3,l4,l5,l6,l7,l8 = splitlist(8,currlist)
download1(start,end,interval,[name],pth,'1min',client8, 1,withaggtrades=False,aggtradelimit=0, )
def delete_incompletes(pth):
def import_csv(csvfilename):
data = []
row_index = 0
with open(csvfilename, "r", encoding="utf-8", errors="ignore") as scraped:
reader = csv.reader(scraped, delimiter=',')
for row in reader:
if row: # avoid blank lines
row_index += 1
columns = [str(row_index), row[0]]
data.append(columns)
return data
for a in os.listdir(pth):
data = import_csv(os.path.join(pth,a))
last_row = data[-1]
if last_row[1] != '2022-01-04 23:59:00':
print(a, ":", last_row[1])
os.remove(os.path.join(pth,a))
def interval_to_milliseconds(interval):
"""Convert a Binance interval string to milliseconds
:param interval: Binance interval string 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w
:type interval: str
:return:
None if unit not one of m, h, d or w
None if string not in correct format
int value of interval in milliseconds
"""
ms = None
seconds_per_unit = {
"m": 60,
"h": 60 * 60,
"d": 24 * 60 * 60,
"w": 7 * 24 * 60 * 60
}
unit = interval[-1]
if unit in seconds_per_unit:
try:
ms = int(interval[:-1]) * seconds_per_unit[unit] * 1000
except ValueError:
pass
return ms
# requires dateparser package
startdownload_day(withcgdata=True, run=True)
# if __name__ == '__main__':
# #fire.Fire(startdownload_day, command=('withcgdata=True','run= True'))
# fire.Fire({"make_dirs": make_dirs, "download_day": startdownload_day,"download_1min": startdownload_1min, "del_incomp": delete_incompletes})
|
main.py
|
"""
Manual Captcha Harvester
Made by @CrepChef
"""
from utils import Logger
from flask import Flask, request, jsonify, render_template, redirect
import logging
import threading
from datetime import datetime
from time import sleep
import webbrowser
import json
tokens = []
logger = Logger()
def manageTokens():
while True:
for token in tokens:
if token['expiry'] < datetime.now().timestamp():
tokens.remove(token)
logger.error("Token expired and deleted")
sleep(5)
def sendToken():
while not tokens:
pass
token = tokens.pop(0)
return token['token']
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
@app.route('/')
def index():
return render_template('index.html', sitekey=config['sitekey'], domain=config['domain'])
@app.route('/api/submit', methods=['POST'])
def submit():
try:
token = request.form['g-recaptcha-response']
expiry = datetime.now().timestamp() + 115
tokenDict = {
'token': token,
'expiry': expiry
}
tokens.append(tokenDict)
logger.success("Token harvested and stored")
return jsonify({
'success': True,
'error': None,
'result': 'Token harvested and stored'
})
except:
return jsonify({
'success': False,
'error': 'Undocumented error',
'result': None
})
@app.route('/api/count')
def api_count():
return jsonify({
'success': True,
'error': None,
'result': len(tokens)
})
@app.route('/api/token')
def api_fetch_token():
try:
token = tokens.pop(0)
logger.status("Token requested and returned to user")
return jsonify({
'success': True,
'error': None,
'results': token['token']
})
except:
logger.warn("Token requested but none available")
return jsonify({
'success': False,
'error': 'Token requested but none available',
'result': None
})
if __name__ == '__main__':
threading.Thread(target=manageTokens).start()
with open('config.json') as file:
config = json.load(file)
file.close()
logger.log("*****************************************************")
logger.log("Manual Captcha Harvester | CrepChef")
logger.log("*****************************************************")
logger.log("Server running at harvester.{}:5000".format(config['domain']))
webbrowser.open('http://harvester.{}:5000/'.format(config['domain']))
app.run()
|
test.py
|
import csv
import os
import subprocess
import threading
# Gather the packages to test.
PREFIX = './packages/node_modules/'
# CISCOSPARK = os.path.join(PREFIX, '@ciscospark')
WEBEX = os.path.join(PREFIX, '@webex')
PROD_ENV_VARS = {
# 'ACL_SERVICE_URL': 'https://acl-a.wbx2.com/acl/api/v1', ?
'ATLAS_SERVICE_URL': 'https://atlas-a.wbx2.com/admin/api/v1',
'CONVERSATION_SERVICE': 'https://conv-a.wbx2.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-a.wbx2.com',
'IDBROKER_BASE_URL': 'https://idbroker.webex.com',
'IDENTITY_BASE_URL': 'https://identity.webex.com',
'WDM_SERVICE_URL': 'https://wdm-a.wbx2.com/wdm/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true'
}
INT_ENV_VARS = {
# Environments
'ACL_SERVICE_URL': 'https://acl-intb.ciscospark.com/acl/api/v1',
'ATLAS_SERVICE_URL': 'https://atlas-intb.ciscospark.com/admin/api/v1',
'CONVERSATION_SERVICE': 'https://conversation-intb.ciscospark.com/conversation/api/v1',
'ENCRYPTION_SERVICE_URL': 'https://encryption-intb.ciscospark.com/encryption/api/v1',
# Do not use 'https://hydra-intb.ciscospark.com/v1' for Hydra. CI expects 'apialpha'.
'HYDRA_SERVICE_URL': 'https://apialpha.ciscospark.com/v1/',
'IDBROKER_BASE_URL': 'https://idbrokerbts.webex.com',
'IDENTITY_BASE_URL': 'https://identitybts.webex.com',
'WDM_SERVICE_URL': 'https://wdm-intb.ciscospark.com/wdm/api/v1',
'WHISTLER_API_SERVICE_URL': 'https://whistler.onint.ciscospark.com/api/v1',
# Logging
'ENABLE_VERBOSE_NETWORK_LOGGING': 'true'
}
OUTPUT_DIR = 'output'
OUTPUT_FILE_PATH = os.path.join(OUTPUT_DIR, 'test-comparison.csv')
TEST_COMMAND = 'npm test -- --packages %s'
SKIP_PACKAGES = [
'@webex/test-helper-server' # no tests
'@webex/internal-plugin-calendar', # no tests
'@webex/plugin-webhooks' # no tests
]
def should_include_package(path_name, name):
scoped_name = os.path.join(os.path.basename(path_name), name)
return os.path.isdir(os.path.join(path_name, name)) and scoped_name not in SKIP_PACKAGES
def get_package_names(path_name):
namespace = path_name.replace(PREFIX, '')
return [os.path.join(namespace, name) for name in os.listdir(path_name) if should_include_package(path_name, name)]
def run_subprocess(bash_command, env_vars):
env = os.environ.copy()
env.update(env_vars)
process = subprocess.Popen(bash_command.split(), stdout=subprocess.PIPE, env=env)
output, error = process.communicate()
return process.returncode # , output, error
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def print_result(return_code, prefix='Tests are a...'):
if return_code == 0:
print(bcolors.OKGREEN + prefix + 'success.' + bcolors.ENDC)
else:
print(bcolors.FAIL + prefix + 'failure.' + bcolors.ENDC)
def run_test(package, environment):
env_vars = INT_ENV_VARS if environment is 'integration' else PROD_ENV_VARS
print(bcolors.OKBLUE + 'Testing `%s` on %s...' % (package, environment) + bcolors.ENDC)
bash_command = TEST_COMMAND % package
return_code = run_subprocess(bash_command, env_vars)
print_result(return_code, prefix='Testing `%s` on %s...' % (package, environment))
return return_code
def run_env_tests(package, writer, csv_file):
prod_return_code = run_test(package, 'production')
int_return_code = run_test(package, 'integration')
writer.writerow([package, prod_return_code, int_return_code])
csv_file.flush()
def run_tests_in_sequence(packages, writer, csv_file):
for package in packages:
run_env_tests(package, writer, csv_file)
def run_tests_in_parallel(packages, writer, csv_file):
threads = [threading.Thread(target=run_env_tests, args=(package, writer, csv_file)) for package in packages]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def main():
# ciscospark_packages = get_package_names(CISCOSPARK)
webex_packages = get_package_names(WEBEX)
# packages = ciscospark_packages + webex_packages
packages = webex_packages
print ('Skipping %d packages: %s' % (len(SKIP_PACKAGES), ', '.join(SKIP_PACKAGES)))
print('Testing %d packages...' % len(packages))
try:
os.mkdir(OUTPUT_DIR)
except OSError:
pass
threads = []
with open(OUTPUT_FILE_PATH, 'wb') as csv_file:
writer = csv.writer(csv_file, quoting=csv.QUOTE_MINIMAL)
writer.writerow(['Package', 'Production exit code', 'Integration exit code'])
run_tests_in_sequence(packages, writer, csv_file)
print('Wrote output to: %s' % OUTPUT_FILE_PATH)
print('Done.')
if __name__ == "__main__":
main()
|
job_scheduling.py
|
# This file handles all jobs and job scheduling
# Each job has:
# * scheduler to set its rate
# * the job function itself
import logging
import time
import datetime
import threading
import schedule
import my_globals
import webcam
import remote_comm
#from remote_comm import register # importing this way to avoid circular includes
import sensors # TODO this may cause more timming issues
from cover import fsm, cover_schedule, check_cover_button
# multi thread support
def run_threaded(job_func):
job_thread = threading.Thread(target=job_func)
job_thread.start()
# status
def schedule_job_status():
rate = my_globals.settings["job_server_status_sec"]
logging.info("Scheduling status job for every %d seconds" % rate)
schedule.clear("status")
schedule.every(rate).seconds.do(job_upload_status).tag("status")
#time.sleep(3)
# send status to server
def job_upload_status():
remote_comm.status_update()
# sensors
def schedule_job_sensors():
rate = my_globals.settings["job_server_sensors_sec"]
logging.info("Scheduling sensors job for every %d seconds" % rate)
schedule.clear("sensors")
schedule.every(rate).seconds.do(run_threaded, job_sensors).tag("sensors")
#time.sleep(3)
# Read enviroment sensors
def job_sensors():
logging.info("Getting Sensors")
sensors.update()
remote_comm.sensor_upload()
# webcam
def schedule_job_webcam():
rate = my_globals.settings["job_webcam_sec"]
logging.info("Scheduling webcam job for every %d seconds" % rate)
schedule.clear("webcam")
#schedule.every(rate).seconds.do(job_webcam).tag("webcam") # non-threaded
schedule.every(rate).seconds.do(run_threaded, job_webcam).tag("webcam") # threaded
#time.sleep(3)
# take a picture
def job_webcam():
t0 = int(round(time.time() * 1000)) # debugger
webcam.get_Picture() # get picture function
t1 = int(round(time.time() * 1000)) # debugger
logging.debug ("timepic: %d" % (t1-t0)) # debugger
remote_comm.pic_upload()
# cover monitor
def job_cover_monitor():
fsm()
def job_cover_schedule():
cover_schedule()
def job_cover_button():
check_cover_button()
# Save setting to file
def job_save_settings():
my_globals.save_settings()
# If I'm running you should see this periodically
def job_heartbeat():
logging.info("I'm working. %s" % datetime.datetime.now())
# Schedule jobs that are hard coded times
schedule.every(60).seconds.do(job_heartbeat)
schedule.every(15).minutes.do(remote_comm.register) # periodic re-register device with webserver
schedule.every(2).seconds.do(job_cover_monitor)
schedule.every(0.2).seconds.do(job_cover_button)
schedule.every(10).seconds.do(job_cover_schedule)
schedule.every(2).minutes.do(job_save_settings)
# Reference: Jobs that are dynamically schedualed
# * job_status
# * job sensors
# * job webcam
|
latch.py
|
"""
Used for stressing Latch.get/put. Swap the number of producer/consumer threads
below to try both -- there are many conditions in the Latch code that require
testing of both.
"""
import logging
import random
import threading
import time
import mitogen.core
import mitogen.utils
mitogen.utils.log_to_file()
mitogen.core.IOLOG.setLevel(logging.DEBUG)
mitogen.core._v = True
mitogen.core._vv = True
l = mitogen.core.Latch()
consumed = 0
produced = 0
crash = 0
def cons():
global consumed, crash
try:
while 1:
g = l.get()
print('got=%s consumed=%s produced=%s crash=%s' % (g, consumed, produced, crash))
consumed += 1
time.sleep(g)
for x in range(int(g * 1000)):
pass
except:
crash += 1
def prod():
global produced
while 1:
l.put(random.random()/10)
produced += 1
time.sleep(random.random()/10)
allc = [threading.Thread(target=cons) for x in range(64)]
allp = [threading.Thread(target=prod) for x in range(8)]
for th in allc+allp:
th.setDaemon(True)
th.start()
raw_input()
exit()
|
kill_thread.py
|
import ctypes
import inspect
import threading
import time
__all__ = ["stop_thread"]
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
_async_raise(thread.ident, SystemExit)
def test():
while True:
print("-------")
time.sleep(0.5)
if __name__ == "__main__":
t = threading.Thread(target=test)
t.start()
time.sleep(5.2)
print("main thread sleep finish")
stop_thread(t)
|
rate_limited_sender.py
|
#-------------------------------------------------------------------------------
# rate_limited_sender.py
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2021 homelith
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#-------------------------------------------------------------------------------
import os
import sys
import time
import signal
import serial
import threading
handle_serial = None
thread_running = False
transfer_running = False
start_word = "writer_ready"
end_word = "writer_end"
baud_rate = 9600
port_name = ""
file_name = ""
target_bps = 300
def open_serial():
global handle_serial
global baud_rate
global port_name
try:
print("open '%s' at %u bps ..." % (port_name, baud_rate))
handle_serial = serial.Serial(port=port_name, baudrate=baud_rate, timeout=0.1)
print("done.")
except:
print("port '%s' not found." % port_name)
print("usage : python3 rate_limited_sender.py {port_name (e.g. /dev/ttyACM0)} {filename to send} ( {target_bps (default: 300)} )")
sys.exit(1)
def read_proc():
global thread_running
global transfer_running
global handle_serial
start_match_count = 0
end_match_count = 0
while thread_running:
if handle_serial.inWaiting() > 0 :
c = handle_serial.read().decode('ascii')
print(c, end="")
# search start_word and start write process
if start_word[start_match_count] == c :
if start_match_count == (len(start_word) - 1) :
print("'%s' detected" % start_word)
start_match_count = 0
transfer_running = True
else:
start_match_count += 1
else:
start_match_count = 0
# search end_word and exit program
if end_word[end_match_count] == c :
if end_match_count == (len(end_word) - 1) :
print("'%s' detected" % end_word)
end_match_count = 0
thread_running = False
else:
end_match_count += 1
else:
end_match_count = 0
time.sleep(0.0001)
def write_proc() :
global thread_running
global transfer_running
global handle_serial
prev_tick = 0.0
remainder_bucket = 0.0
target_interval = 1.0 / (target_bps / 8.0)
bytes_sent = 0
# open input file
fp = open(file_name, "rb")
while thread_running :
curr_tick = time.time();
# wait until start_word arrived at RX
if transfer_running == False :
prev_tick = curr_tick + 1.0
continue
# rate limiting
if (curr_tick - prev_tick + remainder_bucket) < target_interval :
continue
remainder_bucket += (curr_tick - prev_tick) - target_interval
if remainder_bucket < 0.0 :
remainder_bucket = 0.0
if remainder_bucket >= target_interval:
remainder_bucket = target_interval
prev_tick = curr_tick
# write 1 byte to serial
c = fp.read(1)
if len(c) != 0 :
handle_serial.write(c)
bytes_sent += 1
if bytes_sent % 400 == 0 :
print("sender : %u bytes sent" % bytes_sent)
time.sleep(0.0001)
fp.close()
if __name__ == "__main__" :
# evaluate cmd arguments
if len(sys.argv) <= 2 :
print("usage : python3 rate_limited_sender.py {port_name (e.g. /dev/ttyACM0)} {filename to send} ( {target_bps (default: 300)} )")
sys.exit(1)
port_name = sys.argv[1]
file_name = sys.argv[2]
if os.path.isfile(file_name) == False :
print("input file '%s' not found." % file_name)
print("usage : python3 rate_limited_sender.py {port_name (e.g. /dev/ttyACM0)} {filename to send} ( {target_bps (default: 300)} )")
sys.exit(1)
if len(sys.argv) >= 4 :
target_bps = int(sys.argv[2])
# init serial port
open_serial()
try :
# start read/write thread and wait
read_thread = threading.Thread(target=read_proc)
write_thread = threading.Thread(target=write_proc)
thread_running = True
read_thread.start()
write_thread.start()
while thread_running:
time.sleep(1)
finally :
print("waiting for thread join ... ")
thread_running = False
read_thread.join()
write_thread.join()
handle_serial.close()
print("done.")
sys.exit(0)
|
locators.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2015 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
import json
import logging
import os
import posixpath
import re
from io import BytesIO
try:
import threading
except ImportError: # pragma: no cover
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, build_opener,
HTTPRedirectHandler as BaseRedirectHandler, text_type,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata, MetadataInvalidError
from .util import (cached_property, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy, normalize_name)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile(r'^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'https://pypi.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
try:
return client.list_packages()
finally:
client('close')()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None: # pragma: no cover
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
self.errors = queue.Queue()
def get_errors(self):
"""
Return any errors which have occurred.
"""
result = []
while not self.errors.empty(): # pragma: no cover
try:
e = self.errors.get(False)
result.append(e)
except self.errors.Empty:
continue
self.errors.task_done()
return result
def clear_errors(self):
"""
Clear any errors which may have been logged.
"""
# Just get the errors and throw them away
self.get_errors()
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None: # pragma: no cover
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
self.clear_errors()
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
basename = posixpath.basename(t.path)
compatible = True
is_wheel = basename.endswith('.whl')
is_downloadable = basename.endswith(self.downloadable_extensions)
if is_wheel:
compatible = is_compatible(Wheel(basename), self.wheel_tags)
return (t.scheme == 'https', 'pypi.org' in t.netloc,
is_downloadable, is_wheel, compatible, basename)
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implementation favours https:// URLs over http://, archives
from PyPI over those from other locations, wheel compatibility (if a
wheel) and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
return normalize_name(name1) == normalize_name(name2)
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='): # pragma: no cover
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/': # pragma: no cover
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if not is_compatible(wheel, self.wheel_tags):
logger.debug('Wheel not compatible: %s', path)
else:
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e: # pragma: no cover
logger.warning('invalid path for wheel: %s', path)
elif not path.endswith(self.downloadable_extensions): # pragma: no cover
logger.debug('Not downloadable: %s', path)
else: # downloadable extension
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t: # pragma: no cover
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver: # pragma: no cover
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at a "digests" dictionary
or keys of the form 'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
if 'digests' in info:
digests = info['digests']
for algo in ('sha256', 'md5'):
if algo in digests:
result = (algo, digests[algo])
break
if not result:
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None: # pragma: no cover
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if len(versions) > 2: # urls and digests keys are present
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
if k in ('urls', 'digests'):
continue
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception: # pragma: no cover
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd: # pragma: no cover
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
dist.locator = self
urls = d['urls']
result[md.version] = dist
for info in d['urls']:
url = info['url']
dist.download_urls.add(url)
dist.digests[url] = self._get_digest(info)
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# Now get other releases
for version, infos in d['releases'].items():
if version == md.version:
continue # already done
omd = Metadata(scheme=self.scheme)
omd.name = md.name
omd.version = version
odist = Distribution(omd)
odist.locator = self
result[version] = odist
for info in infos:
url = info['url']
odist.download_urls.add(url)
odist.digests[url] = self._get_digest(info)
result['urls'].setdefault(version, set()).add(url)
result['digests'][url] = self._get_digest(info)
# for info in urls:
# md.source_url = info['url']
# dist.digest = self._get_digest(info)
# dist.locator = self
# for info in urls:
# url = info['url']
# result['urls'].setdefault(md.version, set()).add(url)
# result['digests'][url] = self._get_digest(info)
except Exception as e:
self.errors.put(text_type(e))
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\\s*=\\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\\s\n]*))\\s+)?
href\\s*=\\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\\s\n]*))
(\\s+rel\\s*=\\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
self.platform_check = False # See issue #112
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux_(i\d86|x86_64|arm\w+)|'
r'win(32|_amd64)|macosx_?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self.platform_check and self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
try:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
except MetadataInvalidError: # e.g. invalid versions
pass
except Exception as e: # pragma: no cover
self.errors.put(text_type(e))
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError: # pragma: no cover
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e: # pragma: no cover
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path): # pragma: no cover
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {'urls': {}, 'digests': {}}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])},
'digests': {dist.version: set([None])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError: # pragma: no cover
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if meta_extras and dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
test_bucket.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import sys
import time
import threading
from nose.tools import assert_equals
from ...backends.bucket import BucketBackend
from ...messages import OutgoingMessage
from ...router import Router
def test_bucket_swallows_messages():
router = Router()
router.add_backend("mock", "rapidsms.backends.bucket")
worker = threading.Thread(target=router.start)
worker.setDaemon(True)
worker.start()
# wait until the router has started.
while not router.running:
time.sleep(0.1)
backend = router.backends["mock"]
backend.receive("1234", "Mock Incoming Message")
msg = object()
backend.send(msg)
assert_equals(backend.bucket[0].text, "Mock Incoming Message")
assert_equals(backend.bucket[1], msg)
assert_equals(len(backend.bucket), 2)
# wait until the router has stopped.
router.stop()
worker.join()
|
meshConvertP.py
|
import os
from multiprocessing import Process
def f(name):
if os.path.isdir(name) and name != "blank_foamcase":
oldDir = os.getcwd()
os.system('cd ~/OpenFOAM/tjc2017-7/run')
os.chdir(oldDir)
os.chdir(name)
os.system('gmshToFoam busemann.msh')
os.chdir(oldDir)
if __name__ == "__main__":
dirs = os.listdir()
for name in dirs:
p = Process(target=f, args=(name,))
p.start()
|
websocket_transport.py
|
# Copyright (C) 2018 DataArt
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from devicehive.transports.transport import Transport
from devicehive.transports.transport import TransportError
import websocket
import socket
import threading
import sys
import time
class WebsocketTransport(Transport):
"""Websocket transport class."""
def __init__(self, data_format_class, data_format_options, handler_class,
handler_options):
super(WebsocketTransport, self).__init__('websocket',
WebsocketTransportError,
data_format_class,
data_format_options,
handler_class, handler_options)
self._websocket = websocket.WebSocket()
self._connection_lock = threading.Lock()
self._event_queue_sleep_time = None
self._response_sleep_time = None
self._pong_received = False
self._event_queue = []
self._responses = {}
if self._text_data_type:
self._data_opcode = websocket.ABNF.OPCODE_TEXT
else:
self._data_opcode = websocket.ABNF.OPCODE_BINARY
def _websocket_call(self, websocket_method, *args, **kwargs):
try:
return websocket_method(*args, **kwargs)
except (websocket.WebSocketException, socket.error) as websocket_error:
error = websocket_error
raise self._error(error)
def _connect(self, url, **options):
timeout = options.pop('timeout', None)
event_queue_sleep_time = options.pop('event_queue_sleep_time', 1e-6)
response_sleep_time = options.pop('response_sleep_time', 1e-6)
pong_timeout = options.pop('pong_timeout', None)
self._websocket.timeout = timeout
self._event_queue_sleep_time = event_queue_sleep_time
self._response_sleep_time = response_sleep_time
self._websocket_call(self._websocket.connect, url, **options)
self._connected = True
event_thread = threading.Thread(target=self._event)
event_thread.name = '%s-transport-event' % self._name
event_thread.daemon = True
event_thread.start()
if pong_timeout:
ping_thread = threading.Thread(target=self._ping,
args=(pong_timeout,))
ping_thread.name = '%s-transport-ping' % self._name
ping_thread.daemon = True
ping_thread.start()
self._handle_connect()
def _event(self):
while self._connected:
try:
with self._connection_lock:
opcode, data = self._websocket_call(
self._websocket.recv_data, True)
if opcode in (websocket.ABNF.OPCODE_TEXT,
websocket.ABNF.OPCODE_BINARY):
if opcode == websocket.ABNF.OPCODE_TEXT:
data = data.decode('utf-8')
event = self._decode(data)
request_id = event.get(self.REQUEST_ID_KEY)
if not request_id:
self._event_queue.append(event)
continue
self._responses[request_id] = event
continue
if opcode == websocket.ABNF.OPCODE_PONG:
self._pong_received = True
continue
if opcode == websocket.ABNF.OPCODE_CLOSE:
return
except:
self._exception_info = sys.exc_info()
def _ping(self, pong_timeout):
while self._connected:
try:
self._websocket_call(self._websocket.ping)
except self._error:
self._connected = False
return
self._pong_received = False
time.sleep(pong_timeout)
if not self._pong_received:
self._connected = False
return
def _receive(self):
while self._connected and not self._exception_info:
if not self._event_queue:
time.sleep(self._event_queue_sleep_time)
continue
for event in self._event_queue:
self._handle_event(event)
if not self._connected:
return
self._event_queue = []
def _disconnect(self):
self._websocket_call(self._websocket.ping)
with self._connection_lock:
self._websocket_call(self._websocket.close)
self._pong_received = False
self._event_queue = []
self._responses = {}
self._handle_disconnect()
def _send_request(self, request_id, action, request):
request[self.REQUEST_ID_KEY] = request_id
request[self.REQUEST_ACTION_KEY] = action
self._websocket_call(self._websocket.send, self._encode(request),
opcode=self._data_opcode)
def _receive_response(self, request_id, timeout):
start_time = time.time()
while time.time() - timeout < start_time:
response = self._responses.get(request_id)
if response:
del self._responses[request_id]
return response
time.sleep(self._response_sleep_time)
raise self._error('Response timeout.')
def send_request(self, request_id, action, request, **params):
self._ensure_connected()
self._send_request(request_id, action, request)
def request(self, request_id, action, request, **params):
self._ensure_connected()
timeout = params.pop('timeout', 30)
self._send_request(request_id, action, request)
return self._receive_response(request_id, timeout)
class WebsocketTransportError(TransportError):
"""Websocket transport error."""
|
run_trt.py
|
import tensorrt as trt
import numpy as np
import pycuda.driver as cuda
import pycuda.autoinit
import common
import os
import cv2
from PIL import Image
import time
from threading import Thread
TRT_LOGGER = trt.Logger(trt.Logger.VERBOSE)
def preprocess(img, input_resolution):
image = cv2.resize(img[..., ::-1], input_resolution).transpose(2, 0, 1).astype(np.float32)
image /= 255.0
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
mean = mean[:, np.newaxis, np.newaxis]
std = std[:, np.newaxis, np.newaxis]
image = (image - mean) / std
image = np.expand_dims(image, axis=0)
return np.array(image, dtype=np.float32, order='C')
def postprocess(pred, input_resolution):
depth = pred.reshape(input_resolution)
depth = normalize_depth(depth)
return depth
def normalize_depth(depth):
depth *= 1000.0
depth = depth - depth.min()
depth = (depth / depth.max()) * 255
#depth = ((depth - depth.min()) / (depth.max() - depth.min())) * 255
return depth.astype(np.uint8)
class WebcamVideoStream:
"""From PyImageSearch
Webcam reading with multi-threading
"""
def __init__(self, src=0, name='WebcamVideoStream'):
self.stream = cv2.VideoCapture(src)
self.grabbed, self.frame = self.stream.read()
self.name = name
self.stopped = False
def start(self):
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
while True:
if self.stopped:
return
self.grabbed, self.frame = self.stream.read()
def read(self):
return self.frame
def stop(self):
self.stopped = True
def build_engine(onnx_file_path):
"""
Takes an ONNX file and creates a TensorRT engine to run inference with.
"""
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(common.EXPLICIT_BATCH) as network, trt.OnnxParser(network, TRT_LOGGER) as parser:
builder.max_workspace_size = 1 << 28 # 256MB
builder.max_batch_size = 1
# Parser model file
print(f"Loading ONNX file from path {onnx_file_path} ...")
with open(onnx_file_path, 'rb') as model:
print("Beginning ONNX file parsing")
if not parser.parse(model.read()):
print(f"ERROR: Failed to parse the ONNX file")
for error in range(parser.num_errors):
print(parser.get_error(error))
return None
print(f"Completed parsing of ONNX file.")
print(f"Building an engine form file {onnx_file_path}; this may take a while ...")
engine = builder.build_cuda_engine(network)
print("Completed creating Engine")
with open(onnx_file_path.replace('.onnx', '.trt'), 'wb') as f:
f.write(engine.serialize())
return engine
def get_engine(model_path: str):
"""
Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it.
"""
if os.path.exists(model_path):
if model_path.endswith('trt'):
print(f"Reading engine from file {model_path}")
with open(model_path, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime:
return runtime.deserialize_cuda_engine(f.read())
elif model_path.endswith('onnx'):
build_engine(model_path)
else:
print("Invalid File: Only .onnx and .trt are supported.")
else:
print(f"FILE: {model_path} not found.")
def main():
model_path = 'weights/bts_nyu_320_mem.trt'
input_image_path = 'images/NYU0937.jpg'
input_resolution = (320, 320)
vs = WebcamVideoStream().start()
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
with get_engine(model_path) as engine, engine.create_execution_context() as context:
inputs, outputs, bindings, stream = common.allocate_buffers(engine)
while True:
prev_time = time.time()
frame = vs.read()
image = preprocess(frame, input_resolution)
inputs[0].host = image
trt_outputs = common.do_inference_v2(context, bindings, inputs, outputs, stream)[-1]
vis = postprocess(trt_outputs, input_resolution)
curr_time = time.time()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
print(fps)
curr_fps = 0
cv2.imshow('frame', vis)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
vs.stop()
#cv2.imwrite('images/trt_output.jpg', depth_image)
if __name__ == '__main__':
main()
|
client.py
|
# SPDX-FileCopyrightText: 2021 Carnegie Mellon University
#
# SPDX-License-Identifier: Apache-2.0
import glob
import os
import signal
import sys
import threading
import time
import uuid
import yaml
from core.result_manager import ResultManager
from logzero import logger
class CvatClient(object):
def __init__(self, config):
self.config = config
self.task_root = os.path.join(
self.config["root_dir"], self.config["target_name"]
)
self.unlabeled_dir = os.path.join(self.task_root, "unlabeled")
self.labeled_dir = os.path.join(self.task_root, "labeled")
self.to_label_dir = os.path.join(self.task_root, "to_label")
self.pseudo_anno_dir = os.path.join(self.task_root, "pseudo_anno")
self.manual_anno_dir = os.path.join(self.task_root, "manual_anno")
self.frcnn_model_dir = os.path.join(self.task_root, "frcnn_models")
self._init_folders()
self.result_manager = ResultManager(
self.labeled_dir,
self.pseudo_anno_dir,
self.manual_anno_dir,
self.config["cvat"],
)
self.modelVersion = 0
self.lastVesion = 0
self.curVesion = 0
signal.signal(signal.SIGINT, self.stop)
def _init_folders(self):
os.makedirs(self.unlabeled_dir, exist_ok=True)
os.makedirs(self.labeled_dir, exist_ok=True)
os.makedirs(self.to_label_dir, exist_ok=True)
os.makedirs(self.pseudo_anno_dir, exist_ok=True)
os.makedirs(self.manual_anno_dir, exist_ok=True)
def _mv_to_label(self, src_name):
img_name = os.path.basename(src_name)
to_label_filename = os.path.join(self.to_label_dir, img_name)
os.rename(src_name, to_label_filename)
return to_label_filename
def _load_yaml(self, yaml_dir):
with open(yaml_dir, "r") as stream:
try:
return yaml.safe_load(stream)
except yaml.YAMLError as exc:
logger.critical(exc)
def _glob_unlabeled_folder(self):
file_list = glob.glob(os.path.join(self.unlabeled_dir, "*.jpg"))
file_list.sort(key=os.path.getmtime)
return file_list
def start(self):
try:
threading.Thread(target=self._result_thread, name="get-results").start()
except Exception as e:
self.stop()
raise e
def stop(self, *args):
logger.info("Stop called")
self.result_manager.terminate()
time.sleep(5)
for img_name in glob.glob(os.path.join(self.to_label_dir, "*.jpg")):
os.rename(
img_name, os.path.join(self.unlabeled_dir, os.path.basename(img_name))
)
logger.info("Moved images in to_label folder back to unlabeled folder")
pid = os.getpid()
os.kill(pid, signal.SIGKILL)
def _result_thread(self):
while True:
file_list = sorted(
glob.glob(os.path.join(self.frcnn_model_dir, "model_v_*.pth"))
)
if len(file_list) > 0:
self.curVesion = int(file_list[-1].split("/")[-1].split("_")[-1][:-4])
if self.lastVesion < self.curVesion:
self.modelVersion += 1
self.lastVesion = self.curVesion
logger.info(
"Models have been updated to version {}!".format(
self.modelVersion
)
)
# else:
# self.modelVersion = 0
file_list = self._glob_unlabeled_folder()
if len(file_list) == 0:
# logger.info('unlabeled folder is empty.')
time.sleep(3)
continue
for filename in file_list:
if self.result_manager._tasks_lock._value == 0:
logger.info("_tasks_lock")
while self.result_manager._tasks_lock._value == 0:
pass
to_label_filename = self._mv_to_label(filename)
self.result_manager.add((to_label_filename, self.modelVersion))
# print(to_label_filename)
if not self.result_manager.running:
self.stop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.