source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
cut_words_v4.py
|
#coding=utf-8
import os
import sys
import json
sys.path.append("/home/minghaiyan/thirdparty/jieba-master")
import re
import time
import datetime
import pickle
import jieba
import jieba.analyse
jieba.load_userdict("user.dict")
from text_feature import TextFeature
from get_label_words import LabelWords
from multiprocessing import Process, Pool, Queue
#### input is other train material ####
sw = dict()
for line in open("stopwords.txt", "r"):
sw[line.strip()] = 1
print "load stopwords: %d" % len(sw)
p_num = re.compile(r'^\d+$')
valid_chars = re.compile(ur'^[\u4e00-\u9fa5a-zA-Z0-9]+$')
# print " ".join(jieba.lcut("要依法利用社会集资来做"))
lw = LabelWords()
lw.Build("labelcfg.txt")
pickle.dump(lw, open("label_words.dat", "w"))
def BuildJob(q, fn, cs):
f = TextFeature()
cache = []
cnt = 0
while True:
r = q.get(True)
cnt += 1
if cnt % 10000 == 0:
print "build %s: %d" % (fn, cnt)
if r is None:
if len(cache) > 0:
f.Build(cache)
cache = []
if fn == 'chat_text_f.dat':
print "prune..."
f.Prune(3, 3)
pickle.dump(f, open(fn, "w"))
print "feature build %s succ" % fn
return
else:
cache.append(r)
if len(cache) >= cs:
f.Build(cache)
cache = []
q_norm = Queue(10*10000)
jobs = []
p = Process(target = BuildJob, args = (q_norm, "chat_text_f.dat", 100))
jobs.append(p)
p.start()
# word_vec =[]
fp = open("chat_words.txt","w")
fk = open("chat_keywords.txt","w")
cnt = 0
#for line in open(sys.argv[1], "r"):
for line in open("all_cont.txt", "r"):
cnt += 1
if cnt % 1000 == 0:
time_str = datetime.datetime.now().isoformat()
print "[%s]: %d" % (time_str, cnt)
# parts = json.dumps(line.strip())
# print parts
words = ""
keywords = ""
kw_vec = []
#print "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^"
# content = ''.join([x for x in line.strip() if x != " "])
text = line.strip()
idx = int(text.split('\t')[0])
# print idx
content = text.split('\t')[1]
# print content
#print "------------------------------------"
for t in jieba.cut(content):
if valid_chars.match(t) is None:
continue
t = t.lower()
tl = t.strip().encode('utf8')
if (not tl) or sw.has_key(tl):
continue
if p_num.match(tl):
continue
if len(tl.decode('utf8')) < 2:
continue
kw_vec.append(tl)
# print("word:%s, word len: %d" %(tl,len(tl.decode('utf8'))))
# word_vec.append(kw_vec)
q_norm.put(kw_vec)
# print kw_vec
words += " ".join(kw_vec)
if len(kw_vec) > 0 :
fp.write(str(idx).encode("utf8") + "\t" + words + "\n")
# print "words..."
# print words
topK = 7
tags = jieba.analyse.extract_tags(words, topK=topK)
#print ",".join(tags)
keywords += " ".join(tags)
if len(tags) > 0 :
fk.write(str(idx) + "\t" + keywords.encode("utf8") + "\n")
fp.close()
fk.close()
q_norm.put(None)
for job in jobs:
job.join()
print "dump text feature succ"
os._exit(-1)
|
test_b2_command_line.py
|
#!/usr/bin/env python2
######################################################################
#
# File: test_b2_command_line.py
#
# Copyright 2019 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from __future__ import print_function
import hashlib
import json
import os.path
import platform
import random
import re
import shutil
import six
import subprocess
import sys
import tempfile
import threading
import unittest
from b2sdk.utils import fix_windows_path_limit
USAGE = """
This program tests the B2 command-line client.
Usages:
{command} <accountId> <applicationKey> [basic | sync_down | sync_up | sync_up_no_prefix
keys | sync_long_path | download | account]
The optional last argument specifies which of the tests to run. If not
specified, all test will run. Runs the b2 package in the current directory.
{command} test
Runs internal unit tests.
"""
def usage_and_exit():
print(USAGE.format(command=sys.argv[0]), file=sys.stderr)
sys.exit(1)
def error_and_exit(message):
print('ERROR:', message)
sys.exit(1)
def read_file(path):
with open(path, 'rb') as f:
return f.read()
def write_file(path, contents):
with open(path, 'wb') as f:
f.write(contents)
def file_mod_time_millis(path):
return int(os.path.getmtime(path) * 1000)
def set_file_mod_time_millis(path, time):
os.utime(path, (os.path.getatime(path), time / 1000))
def random_hex(length):
return ''.join(random.choice('0123456789abcdef') for i in six.moves.xrange(length))
class TempDir(object):
def __init__(self):
self.dirpath = None
def get_dir(self):
return self.dirpath
def __enter__(self):
self.dirpath = tempfile.mkdtemp()
return self.dirpath
def __exit__(self, exc_type, exc_val, exc_tb):
shutil.rmtree(fix_windows_path_limit(self.dirpath))
class StringReader(object):
def __init__(self):
self.string = None
def get_string(self):
return self.string
def read_from(self, f):
try:
self.string = f.read()
except Exception as e:
print(e)
self.string = str(e)
def remove_insecure_platform_warnings(text):
return os.linesep.join(
line for line in text.split(os.linesep)
if ('SNIMissingWarning' not in line) and ('InsecurePlatformWarning' not in line)
)
def run_command(path_to_script, args):
"""
:param command: A list of strings like ['ls', '-l', '/dev']
:return: (status, stdout, stderr)
"""
# We'll run the b2 command-line by running the b2 module from
# the current directory. Python 2.6 doesn't support using
# '-m' with a package, so we explicitly say to run the module
# b2.__main__
os.environ['PYTHONPATH'] = '.'
os.environ['PYTHONIOENCODING'] = 'utf-8'
command = ['python', '-m', 'b2.__main__']
command.extend(args)
print('Running:', ' '.join(command))
stdout = StringReader()
stderr = StringReader()
p = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=platform.system() != 'Windows'
)
p.stdin.close()
reader1 = threading.Thread(target=stdout.read_from, args=[p.stdout])
reader1.start()
reader2 = threading.Thread(target=stderr.read_from, args=[p.stderr])
reader2.start()
p.wait()
reader1.join()
reader2.join()
stdout_decoded = remove_insecure_platform_warnings(stdout.get_string().decode('utf-8'))
stderr_decoded = remove_insecure_platform_warnings(stderr.get_string().decode('utf-8'))
print_output(p.returncode, stdout_decoded, stderr_decoded)
return p.returncode, stdout_decoded, stderr_decoded
def print_text_indented(text):
"""
Prints text that may include weird characters, indented four spaces.
"""
for line in text.split(os.linesep):
print(' ', repr(line)[1:-1])
def print_json_indented(value):
"""
Converts the value to JSON, then prints it.
"""
print_text_indented(json.dumps(value, indent=4, sort_keys=True))
def print_output(status, stdout, stderr):
print(' status:', status)
if stdout != '':
print(' stdout:')
print_text_indented(stdout)
if stderr != '':
print(' stderr:')
print_text_indented(stderr)
print()
class CommandLine(object):
PROGRESS_BAR_PATTERN = re.compile(r'.*B/s]$', re.DOTALL)
EXPECTED_STDERR_PATTERNS = [
PROGRESS_BAR_PATTERN,
re.compile(r'^$') # empty line
]
def __init__(self, path_to_script):
self.path_to_script = path_to_script
def run_command(self, args):
"""
Runs the command with the given arguments, returns a tuple in form of
(succeeded, stdout)
"""
status, stdout, stderr = run_command(self.path_to_script, args)
return status == 0 and stderr == '', stdout
def should_succeed(self, args, expected_pattern=None):
"""
Runs the command-line with the given arguments. Raises an exception
if there was an error; otherwise, returns the stdout of the command
as as string.
"""
status, stdout, stderr = run_command(self.path_to_script, args)
if status != 0:
print('FAILED with status', status)
sys.exit(1)
if stderr != '':
failed = False
for line in (s.strip() for s in stderr.split(os.linesep)):
if not any(p.match(line) for p in self.EXPECTED_STDERR_PATTERNS):
print('Unexpected stderr line:', repr(line))
failed = True
if failed:
print('FAILED because of stderr')
print(stderr)
sys.exit(1)
if expected_pattern is not None:
if re.search(expected_pattern, stdout) is None:
print('STDOUT:')
print(stdout)
error_and_exit('did not match pattern: ' + expected_pattern)
return stdout
def should_succeed_json(self, args):
"""
Runs the command-line with the given arguments. Raises an exception
if there was an error; otherwise, treats the stdout as JSON and returns
the data in it.
"""
return json.loads(self.should_succeed(args))
def should_fail(self, args, expected_pattern):
"""
Runs the command-line with the given args, expecting the given pattern
to appear in stderr.
"""
status, stdout, stderr = run_command(self.path_to_script, args)
if status == 0:
print('ERROR: should have failed')
sys.exit(1)
if re.search(expected_pattern, stdout + stderr) is None:
print(expected_pattern)
print(stdout + stderr)
error_and_exit('did not match pattern: ' + expected_pattern)
def list_file_versions(self, bucket_name):
return self.should_succeed_json(['list_file_versions', bucket_name])['files']
class TestCommandLine(unittest.TestCase):
def test_stderr_patterns(self):
progress_bar_line = './b2: 0%| | 0.00/33.3K [00:00<?, ?B/s]\r./b2: 25%|\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8d | 8.19K/33.3K [00:00<00:01, 21.7KB/s]\r./b2: 33.3KB [00:02, 12.1KB/s]'
self.assertIsNotNone(CommandLine.PROGRESS_BAR_PATTERN.match(progress_bar_line))
progress_bar_line = '\r./b2: 0%| | 0.00/33.3K [00:00<?, ?B/s]\r./b2: 25%|\xe2\x96\x88\xe2\x96\x88\xe2\x96\x8d | 8.19K/33.3K [00:00<00:01, 19.6KB/s]\r./b2: 33.3KB [00:02, 14.0KB/s]'
self.assertIsNotNone(CommandLine.PROGRESS_BAR_PATTERN.match(progress_bar_line))
def should_equal(expected, actual):
print(' expected:')
print_json_indented(expected)
print(' actual:')
print_json_indented(actual)
if expected != actual:
print(' ERROR')
sys.exit(1)
print()
def delete_files_in_bucket(b2_tool, bucket_name):
while True:
data = b2_tool.should_succeed_json(['list_file_versions', bucket_name])
files = data['files']
if len(files) == 0:
return
for file_info in files:
b2_tool.should_succeed(
['delete_file_version', file_info['fileName'], file_info['fileId']]
)
def clean_buckets(b2_tool, bucket_name_prefix):
"""
Removes the named bucket, if it's there.
In doing so, exercises list_buckets.
"""
text = b2_tool.should_succeed(['list_buckets'])
buckets = {}
for line in text.split(os.linesep)[:-1]:
words = line.split()
if len(words) != 3:
error_and_exit('bad list_buckets line: ' + line)
(b_id, b_type, b_name) = words
buckets[b_name] = b_id
for bucket_name in buckets:
if bucket_name.startswith(bucket_name_prefix):
delete_files_in_bucket(b2_tool, bucket_name)
b2_tool.should_succeed(['delete_bucket', bucket_name])
def setup_envvar_test(envvar_name, envvar_value):
"""
Establish config for environment variable test.
The envvar_value names the new credential file
Create an environment variable with the given value
Copy the B2 credential file (~/.b2_account_info) and rename the existing copy
Extract and return the account_id and application_key from the credential file
"""
src = os.path.expanduser('~/.b2_account_info')
dst = os.path.expanduser(envvar_value)
shutil.copyfile(src, dst)
shutil.move(src, src + '.bkup')
os.environ[envvar_name] = envvar_value
def tearDown_envvar_test(envvar_name):
"""
Clean up after running the environment variable test.
Delete the new B2 credential file (file contained in the
envvar_name environment variable.
Rename the backup of the original credential file back to
the standard name (~/.b2_account_info)
Delete the environment variable
"""
os.remove(os.environ.get(envvar_name))
fname = os.path.expanduser('~/.b2_account_info')
shutil.move(fname + '.bkup', fname)
if os.environ.get(envvar_name) is not None:
del os.environ[envvar_name]
def download_test(b2_tool, bucket_name):
file_to_upload = 'README.md'
uploaded_a = b2_tool.should_succeed_json(
['upload_file', '--noProgress', '--quiet', bucket_name, file_to_upload, 'a']
)
with TempDir() as dir_path:
p = lambda fname: os.path.join(dir_path, fname)
b2_tool.should_succeed(['download_file_by_name', '--noProgress', bucket_name, 'a', p('a')])
assert read_file(p('a')) == read_file(file_to_upload)
b2_tool.should_succeed(
['download_file_by_id', '--noProgress', uploaded_a['fileId'],
p('b')]
)
assert read_file(p('b')) == read_file(file_to_upload)
# there is just one file, so clean after itself for faster execution
b2_tool.should_succeed(['delete_file_version', uploaded_a['fileName'], uploaded_a['fileId']])
b2_tool.should_succeed(['delete_bucket', bucket_name])
return True
def basic_test(b2_tool, bucket_name):
file_to_upload = 'README.md'
file_mod_time_str = str(file_mod_time_millis(file_to_upload))
hex_sha1 = hashlib.sha1(read_file(file_to_upload)).hexdigest()
b2_tool.should_succeed(
['upload_file', '--noProgress', '--quiet', bucket_name, file_to_upload, 'a']
)
b2_tool.should_succeed(['upload_file', '--noProgress', bucket_name, file_to_upload, 'a'])
b2_tool.should_succeed(['upload_file', '--noProgress', bucket_name, file_to_upload, 'b/1'])
b2_tool.should_succeed(['upload_file', '--noProgress', bucket_name, file_to_upload, 'b/2'])
b2_tool.should_succeed(
[
'upload_file', '--noProgress', '--sha1', hex_sha1, '--info', 'foo=bar=baz', '--info',
'color=blue', bucket_name, file_to_upload, 'c'
]
)
b2_tool.should_fail(
[
'upload_file', '--noProgress', '--sha1', hex_sha1, '--info', 'foo-bar', '--info',
'color=blue', bucket_name, file_to_upload, 'c'
], r'ERROR: Bad file info: foo-bar'
)
b2_tool.should_succeed(
[
'upload_file', '--noProgress', '--contentType', 'text/plain', bucket_name,
file_to_upload, 'd'
]
)
b2_tool.should_succeed(
['download_file_by_name', '--noProgress', bucket_name, 'b/1', os.devnull]
)
b2_tool.should_succeed(['hide_file', bucket_name, 'c'])
list_of_files = b2_tool.should_succeed_json(['list_file_names', bucket_name])
should_equal(['a', 'b/1', 'b/2', 'd'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(['list_file_names', bucket_name, 'b/2'])
should_equal(['b/2', 'd'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(['list_file_names', bucket_name, 'b', '2'])
should_equal(['b/1', 'b/2'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(['list_file_versions', bucket_name])
should_equal(
['a', 'a', 'b/1', 'b/2', 'c', 'c', 'd'], [f['fileName'] for f in list_of_files['files']]
)
should_equal(
['upload', 'upload', 'upload', 'upload', 'hide', 'upload', 'upload'],
[f['action'] for f in list_of_files['files']]
)
first_c_version = list_of_files['files'][4]
second_c_version = list_of_files['files'][5]
list_of_files = b2_tool.should_succeed_json(['list_file_versions', bucket_name, 'c'])
should_equal(['c', 'c', 'd'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(
['list_file_versions', bucket_name, 'c', second_c_version['fileId']]
)
should_equal(['c', 'd'], [f['fileName'] for f in list_of_files['files']])
list_of_files = b2_tool.should_succeed_json(
['list_file_versions', bucket_name, 'c', second_c_version['fileId'], '1']
)
should_equal(['c'], [f['fileName'] for f in list_of_files['files']])
b2_tool.should_succeed(['ls', bucket_name], '^a{0}b/{0}d{0}'.format(os.linesep))
b2_tool.should_succeed(
['ls', '--long', bucket_name],
'^4_z.*upload.*a{0}.*-.*b/{0}4_z.*upload.*d{0}'.format(os.linesep)
)
b2_tool.should_succeed(
['ls', '--versions', bucket_name], '^a{0}a{0}b/{0}c{0}c{0}d{0}'.format(os.linesep)
)
b2_tool.should_succeed(['ls', bucket_name, 'b'], '^b/1{0}b/2{0}'.format(os.linesep))
b2_tool.should_succeed(['ls', bucket_name, 'b/'], '^b/1{0}b/2{0}'.format(os.linesep))
file_info = b2_tool.should_succeed_json(['get_file_info', second_c_version['fileId']])
expected_info = {
'color': 'blue',
'foo': 'bar=baz',
'src_last_modified_millis': file_mod_time_str
}
should_equal(expected_info, file_info['fileInfo'])
b2_tool.should_succeed(['delete_file_version', 'c', first_c_version['fileId']])
b2_tool.should_succeed(['ls', bucket_name], '^a{0}b/{0}c{0}d{0}'.format(os.linesep))
b2_tool.should_succeed(['make_url', second_c_version['fileId']])
def key_restrictions_test(b2_tool, bucket_name):
second_bucket_name = 'test-b2-command-line-' + random_hex(8)
b2_tool.should_succeed(['create-bucket', second_bucket_name, 'allPublic'],)
key_one_name = 'clt-testKey-01' + random_hex(6)
created_key_stdout = b2_tool.should_succeed(
[
'create-key',
key_one_name,
'listFiles,listBuckets,readFiles,writeKeys',
]
)
key_one_id, key_one = created_key_stdout.split()
b2_tool.should_succeed(['authorize_account', key_one_id, key_one],)
b2_tool.should_succeed(['get-bucket', bucket_name],)
b2_tool.should_succeed(['get-bucket', second_bucket_name],)
key_two_name = 'clt-testKey-02' + random_hex(6)
created_key_two_stdout = b2_tool.should_succeed(
[
'create-key',
'--bucket',
bucket_name,
key_two_name,
'listFiles,listBuckets,readFiles',
]
)
key_two_id, key_two = created_key_two_stdout.split()
b2_tool.should_succeed(['authorize_account', key_two_id, key_two],)
b2_tool.should_succeed(['get-bucket', bucket_name],)
b2_tool.should_succeed(['list-file-names', bucket_name],)
failed_bucket_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name
b2_tool.should_fail(['get-bucket', second_bucket_name], failed_bucket_err)
failed_list_files_err = r'ERROR: Application key is restricted to bucket: ' + bucket_name
b2_tool.should_fail(['list-file-names', second_bucket_name], failed_list_files_err)
# reauthorize with more capabilities for clean up
b2_tool.should_succeed(['authorize_account', sys.argv[1], sys.argv[2]])
b2_tool.should_succeed(['delete-bucket', second_bucket_name])
b2_tool.should_succeed(['delete-key', key_one_id])
b2_tool.should_succeed(['delete-key', key_two_id])
def account_test(b2_tool, bucket_name):
# actually a high level operations test - we run bucket tests here since this test doesn't use it
b2_tool.should_succeed(['delete_bucket', bucket_name])
new_bucket_name = bucket_name[:-8] + random_hex(
8
) # apparently server behaves erratically when we delete a bucket and recreate it right away
b2_tool.should_succeed(['create_bucket', new_bucket_name, 'allPrivate'])
b2_tool.should_succeed(['update_bucket', new_bucket_name, 'allPublic'])
new_creds = os.path.join(tempfile.gettempdir(), 'b2_account_info')
setup_envvar_test('B2_ACCOUNT_INFO', new_creds)
b2_tool.should_succeed(['clear_account'])
bad_application_key = sys.argv[2][:-8] + ''.join(reversed(sys.argv[2][-8:]))
b2_tool.should_fail(['authorize_account', sys.argv[1], bad_application_key], r'unauthorized')
b2_tool.should_succeed(['authorize_account', sys.argv[1], sys.argv[2]])
tearDown_envvar_test('B2_ACCOUNT_INFO')
def file_version_summary(list_of_files):
"""
Given the result of list_file_versions, returns a list
of all file versions, with "+" for upload and "-" for
hide, looking like this:
['+ photos/a.jpg', '- photos/b.jpg', '+ photos/c.jpg']
"""
return [('+ ' if (f['action'] == 'upload') else '- ') + f['fileName'] for f in list_of_files]
def find_file_id(list_of_files, file_name):
for file in list_of_files:
if file['fileName'] == file_name:
return file['fileId']
assert False, 'file not found: %s' % (file_name,)
def sync_up_test(b2_tool, bucket_name):
_sync_test_using_dir(b2_tool, bucket_name, 'sync')
def sync_test_no_prefix(b2_tool, bucket_name):
_sync_test_using_dir(b2_tool, bucket_name, '')
def _sync_test_using_dir(b2_tool, bucket_name, dir_):
sync_point_parts = [bucket_name]
if dir_:
sync_point_parts.append(dir_)
prefix = dir_ + '/'
else:
prefix = ''
b2_sync_point = 'b2:' + '/'.join(sync_point_parts)
with TempDir() as dir_path:
p = lambda fname: os.path.join(dir_path, fname)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal([], file_version_summary(file_versions))
write_file(p('a'), b'hello')
write_file(p('b'), b'hello')
write_file(p('c'), b'hello')
# simulate action (nothing should be uploaded)
b2_tool.should_succeed(['sync', '--noProgress', '--dryRun', dir_path, b2_sync_point])
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal([], file_version_summary(file_versions))
os.symlink('broken', p('d'))
# now upload
b2_tool.should_succeed(
['sync', '--noProgress', dir_path, b2_sync_point],
expected_pattern="d could not be accessed"
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'a',
'+ ' + prefix + 'b',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
c_id = find_file_id(file_versions, prefix + 'c')
file_info = b2_tool.should_succeed_json(['get_file_info', c_id])['fileInfo']
should_equal(file_mod_time_millis(p('c')), int(file_info['src_last_modified_millis']))
os.unlink(p('b'))
write_file(p('c'), b'hello world')
b2_tool.should_succeed(
['sync', '--noProgress', '--keepDays', '10', dir_path, b2_sync_point]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'a',
'- ' + prefix + 'b',
'+ ' + prefix + 'b',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
os.unlink(p('a'))
b2_tool.should_succeed(['sync', '--noProgress', '--delete', dir_path, b2_sync_point])
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal([
'+ ' + prefix + 'c',
], file_version_summary(file_versions))
#test --compareThreshold with file size
write_file(p('c'), b'hello world!')
#should not upload new version of c
b2_tool.should_succeed(
[
'sync', '--noProgress', '--keepDays', '10', '--compareVersions', 'size',
'--compareThreshold', '1', dir_path, b2_sync_point
]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal([
'+ ' + prefix + 'c',
], file_version_summary(file_versions))
#should upload new version of c
b2_tool.should_succeed(
[
'sync', '--noProgress', '--keepDays', '10', '--compareVersions', 'size', dir_path,
b2_sync_point
]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
set_file_mod_time_millis(p('c'), file_mod_time_millis(p('c')) + 2000)
#test --compareThreshold with modTime
#should not upload new version of c
b2_tool.should_succeed(
[
'sync', '--noProgress', '--keepDays', '10', '--compareVersions', 'modTime',
'--compareThreshold', '2000', dir_path, b2_sync_point
]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
#should upload new version of c
b2_tool.should_succeed(
[
'sync', '--noProgress', '--keepDays', '10', '--compareVersions', 'modTime',
dir_path, b2_sync_point
]
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
], file_version_summary(file_versions)
)
# confirm symlink is skipped
write_file(p('linktarget'), b'hello')
os.symlink('linktarget', p('alink'))
b2_tool.should_succeed(
['sync', '--noProgress', '--excludeAllSymlinks', dir_path, b2_sync_point],
)
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'linktarget',
],
file_version_summary(file_versions),
)
# confirm symlink target is uploaded (with symlink's name)
b2_tool.should_succeed(['sync', '--noProgress', dir_path, b2_sync_point])
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(
[
'+ ' + prefix + 'alink',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'c',
'+ ' + prefix + 'linktarget',
],
file_version_summary(file_versions),
)
def sync_down_test(b2_tool, bucket_name):
sync_down_helper(b2_tool, bucket_name, 'sync')
def sync_down_helper(b2_tool, bucket_name, folder_in_bucket):
file_to_upload = 'README.md'
b2_sync_point = 'b2:%s' % bucket_name
if folder_in_bucket:
b2_sync_point += '/' + folder_in_bucket
b2_file_prefix = folder_in_bucket + '/'
else:
b2_file_prefix = ''
with TempDir() as local_path:
# Sync from an empty "folder" as a source.
b2_tool.should_succeed(['sync', b2_sync_point, local_path])
should_equal([], sorted(os.listdir(local_path)))
# Put a couple files in B2, and sync them down
b2_tool.should_succeed(
['upload_file', '--noProgress', bucket_name, file_to_upload, b2_file_prefix + 'a']
)
b2_tool.should_succeed(
['upload_file', '--noProgress', bucket_name, file_to_upload, b2_file_prefix + 'b']
)
b2_tool.should_succeed(['sync', b2_sync_point, local_path])
should_equal(['a', 'b'], sorted(os.listdir(local_path)))
def sync_long_path_test(b2_tool, bucket_name):
"""
test sync with very long path (overcome windows 260 character limit)
"""
b2_sync_point = 'b2://' + bucket_name
long_path = '/'.join(
(
'extremely_long_path_which_exceeds_windows_unfortunate_260_character_path_limit',
'and_needs_special_prefixes_containing_backslashes_added_to_overcome_this_limitation',
'when_doing_so_beware_leaning_toothpick_syndrome_as_it_can_cause_frustration',
'see_also_xkcd_1638'
)
)
with TempDir() as dir_path:
local_long_path = os.path.normpath(os.path.join(dir_path, long_path))
fixed_local_long_path = fix_windows_path_limit(local_long_path)
os.makedirs(os.path.dirname(fixed_local_long_path))
write_file(fixed_local_long_path, b'asdf')
b2_tool.should_succeed(['sync', '--noProgress', '--delete', dir_path, b2_sync_point])
file_versions = b2_tool.list_file_versions(bucket_name)
should_equal(['+ ' + long_path], file_version_summary(file_versions))
def main():
if len(sys.argv) < 3:
usage_and_exit()
path_to_script = 'b2'
account_id = sys.argv[1]
application_key = sys.argv[2]
defer_cleanup = True
test_map = {
'account': account_test,
'basic': basic_test,
'keys': key_restrictions_test,
'sync_down': sync_down_test,
'sync_up': sync_up_test,
'sync_up_no_prefix': sync_test_no_prefix,
'sync_long_path': sync_long_path_test,
'download': download_test,
}
if len(sys.argv) >= 4:
tests_to_run = sys.argv[3:]
for test_name in tests_to_run:
if test_name not in test_map:
error_and_exit('unknown test: "%s"' % (test_name,))
else:
tests_to_run = sorted(six.iterkeys(test_map))
if os.environ.get('B2_ACCOUNT_INFO') is not None:
del os.environ['B2_ACCOUNT_INFO']
b2_tool = CommandLine(path_to_script)
global_dirty = False
# Run each of the tests in its own empty bucket
for test_name in tests_to_run:
print('#')
print('# Cleaning and making bucket for:', test_name)
print('#')
print()
b2_tool.should_succeed(['clear_account'])
b2_tool.should_succeed(['authorize_account', account_id, application_key])
bucket_name_prefix = 'test-b2-command-line-' + account_id
if not defer_cleanup:
clean_buckets(b2_tool, bucket_name_prefix)
bucket_name = bucket_name_prefix + '-' + random_hex(8)
success, _ = b2_tool.run_command(['create_bucket', bucket_name, 'allPublic'])
if not success:
clean_buckets(b2_tool, bucket_name_prefix)
b2_tool.should_succeed(['create_bucket', bucket_name, 'allPublic'])
print('#')
print('# Running test:', test_name)
print('#')
print()
test_fcn = test_map[test_name]
dirty = not test_fcn(b2_tool, bucket_name)
global_dirty = global_dirty or dirty
if global_dirty:
print('#' * 70)
print('#')
print('# The last test was run, cleaning up')
print('#')
print('#' * 70)
print()
clean_buckets(b2_tool, bucket_name_prefix)
print()
print("ALL OK")
if __name__ == '__main__':
if sys.argv[1:] == ['test']:
del sys.argv[1]
unittest.main()
else:
main()
|
core.py
|
# Copyright 2011-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
"""
Some of POX's core API and functionality is here, largely in the POXCore
class (an instance of which is available as pox.core.core).
This includes things like component rendezvous, logging, system status
(up and down events), etc.
"""
""" IMPORTA ARQUIVOS DJANGO PARA OBJETOS"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "madapp.settings")
from django.core.management import execute_from_command_line
from django.db.models import Count
from madapp import settings
from madapp.mad.models import *
"""TERMINA INPORTACAO DJANGO"""
#from __future__ import print_function
# Set up initial log state
import logging
import inspect
import time
#import os
_path = inspect.stack()[0][1]
_ext_path = _path[0:_path.rindex(os.sep)]
_ext_path = os.path.dirname(_ext_path) + os.sep
_path = os.path.dirname(_path) + os.sep
SQUELCH_TIME = 5
_squelch = ''
_squelchTime = 0
_squelchCount = 0
def getLogger (name=None, moreFrames=0):
"""
In general, you don't need to call this directly, and will use
core.getLogger() instead.
"""
if name is None:
s = inspect.stack()[1+moreFrames]
name = s[1]
if name.endswith('.py'):
name = name[0:-3]
elif name.endswith('.pyc'):
name = name[0:-4]
if name.startswith(_path):
name = name[len(_path):]
elif name.startswith(_ext_path):
name = name[len(_ext_path):]
name = name.replace('/', '.').replace('\\', '.') #FIXME: use os.path or whatever
# Remove double names ("topology.topology" -> "topology")
if name.find('.') != -1:
n = name.split('.')
if len(n) >= 2:
if n[-1] == n[-2]:
del n[-1]
name = '.'.join(n)
if name.startswith("ext."):
name = name.split("ext.",1)[1]
if name.endswith(".__init__"):
name = name.rsplit(".__init__",1)[0]
l = logging.getLogger(name)
g=globals()
if not hasattr(l, "print"):
def printmsg (*args, **kw):
#squelch = kw.get('squelch', True)
msg = ' '.join((str(s) for s in args))
s = inspect.stack()[1]
o = '['
if 'self' in s[0].f_locals:
o += s[0].f_locals['self'].__class__.__name__ + '.'
o += s[3] + ':' + str(s[2]) + '] '
o += msg
if o == _squelch:
if time.time() >= _squelchTime:
l.debug("[Previous message repeated %i more times]" % (g['_squelchCount']+1,))
g['_squelchCount'] = 0
g['_squelchTime'] = time.time() + SQUELCH_TIME
else:
g['_squelchCount'] += 1
else:
g['_squelch'] = o
if g['_squelchCount'] > 0:
l.debug("[Previous message repeated %i more times]" % (g['_squelchCount'],))
g['_squelchCount'] = 0
g['_squelchTime'] = time.time() + SQUELCH_TIME
l.debug(o)
setattr(l, "print", printmsg)
setattr(l, "msg", printmsg)
return l
# Working around something (don't remember what)
log = (lambda : getLogger())()
from pox.lib.revent import *
# Now use revent's exception hook to put exceptions in event handlers into
# the log...
def _revent_exception_hook (source, event, args, kw, exc_info):
try:
c = source
t = event
if hasattr(c, "__class__"): c = c.__class__.__name__
if isinstance(t, Event): t = t.__class__.__name__
elif issubclass(t, Event): t = t.__name__
except:
pass
log.exception("Exception while handling %s!%s...\n" % (c,t))
import pox.lib.revent.revent
pox.lib.revent.revent.handleEventException = _revent_exception_hook
class GoingUpEvent (Event):
""" Fired when system is going up. """
pass
class GoingDownEvent (Event):
""" Fired when system is going down. """
pass
class UpEvent (Event):
""" Fired when system is up. """
pass
class DownEvent (Event):
""" Fired when system is down. """
pass
class ComponentRegistered (Event):
"""
This is raised by core whenever a new component is registered.
By watching this, a component can monitor whether other components it
depends on are available.
"""
def __init__ (self, name, component):
Event.__init__(self)
self.name = name
self.component = component
import pox.lib.recoco as recoco
class POXCore (EventMixin):
"""
A nexus of of the POX API.
pox.core.core is a reference to an instance of this class. This class
serves a number of functions.
An important one is that it can serve as a rendezvous point for
components. A component can register objects on core, and they can
then be accessed on the core object (e.g., if you register foo, then
there will then be a pox.core.core.foo). In many cases, this means you
won't need to import a module.
Another purpose to the central registration is that it decouples
functionality from a specific module. If myL2Switch and yourL2Switch
both register as "switch" and both provide the same API, then it doesn't
matter. Doing this with imports is a pain.
Additionally, a number of commmon API functions are vailable here.
"""
_eventMixin_events = set([
UpEvent,
DownEvent,
GoingUpEvent,
GoingDownEvent,
ComponentRegistered
])
def __init__ (self):
self.debug = False
self.running = True
self.starting_up = True
self.components = {'core':self}
import threading
self.quit_condition = threading.Condition()
self.version = (0,2,0)
self.version_name = "carp"
print(self.banner)
self.scheduler = recoco.Scheduler(daemon=True)
self._waiters = [] # List of waiting components
@property
def banner (self):
return "{0} / Copyright 2011-2013 James McCauley, et al.".format(
self.version_string)
@property
def version_string (self):
return "POX %s (%s)" % ('.'.join(map(str,self.version)),self.version_name)
def callDelayed (_self, _seconds, _func, *args, **kw):
"""
Calls the function at a later time.
This is just a wrapper around a recoco timer.
"""
t = recoco.Timer(_seconds, _func, args=args, kw=kw,
scheduler = _self.scheduler)
return t
def callLater (_self, _func, *args, **kw):
# first arg is `_self` rather than `self` in case the user wants
# to specify self as a keyword argument
"""
Call the given function with the given arguments within the context
of the co-operative threading environment.
It actually calls it sooner rather than later. ;)
Much of POX is written without locks because it's all thread-safe
with respect to itself, as it's written using the recoco co-operative
threading library. If you have a real thread outside of the
co-operative thread context, you need to be careful about calling
things within it. This function provides a rather simple way that
works for most situations: you give it a callable (like a method)
and some arguments, and it will call that callable with those
arguments from within the co-operative threader, taking care of
synchronization for you.
"""
_self.scheduler.callLater(_func, *args, **kw)
def raiseLater (_self, _obj, *args, **kw):
# first arg is `_self` rather than `self` in case the user wants
# to specify self as a keyword argument
"""
This is similar to callLater(), but provides an easy way to raise a
revent event from outide the co-operative context.
Rather than foo.raiseEvent(BarEvent, baz, spam), you just do
core.raiseLater(foo, BarEvent, baz, spam).
"""
_self.scheduler.callLater(_obj.raiseEvent, *args, **kw)
def getLogger (self, *args, **kw):
"""
Returns a logger. Pass it the name you want if you'd like to specify
one (e.g., core.getLogger("foo")). If you don't specify a name, it
will make one up based on the module name it is called from.
"""
return getLogger(moreFrames=1,*args, **kw)
def quit (self):
"""
Shut down POX.
"""
import threading
if (self.starting_up or
threading.current_thread() is self.scheduler._thread):
t = threading.Thread(target=self._quit)
t.daemon = True
t.start()
else:
self._quit()
def _quit (self):
# Should probably do locking here
if not self.running:
return
if self.starting_up:
# Try again later
self.quit()
return
self.running = False
# -- gilnei -- Atualiza Status
delflow = TemporaryFlows.objects.all().delete()
delflow.save()
delrules = RuleTable.objects.all().delete()
delrules.save()
delswt = Switches.objects.all().delete()
delswt.save()
django.setup()
poxstats = UsageTable.objects.get(servername = 'POX')
poxstats.status = 0
poxstats.save()
log.info("Going down...")
import gc
gc.collect()
self.raiseEvent(GoingDownEvent())
self.callLater(self.scheduler.quit)
for i in range(50):
if self.scheduler._hasQuit: break
gc.collect()
time.sleep(.1)
if not self.scheduler._allDone:
log.warning("Scheduler didn't quit in time")
self.raiseEvent(DownEvent())
log.info("Down.")
#logging.shutdown()
self.quit_condition.acquire()
self.quit_condition.notifyAll()
core.quit_condition.release()
def _get_python_version (self):
try:
import platform
return "{impl} ({vers}/{build})".format(
impl=platform.python_implementation(),
vers=platform.python_version(),
build=platform.python_build()[1].replace(" "," "))
except:
return "Unknown Python"
def _get_platform_info (self):
try:
import platform
return platform.platform().split("\n")[0]
except:
return "Unknown Platform"
def goUp (self):
log.debug(self.version_string + " going up...")
# -- gilnei -- Atualiza Status
# poxstats = UsageTable.objects.get(servername = 'POX')
# poxstats.status = 1
# poxstats.save()
log.debug("Running on " + self._get_python_version())
log.debug("Platform is " + self._get_platform_info())
try:
import platform
vers = '.'.join(platform.python_version().split(".")[:2])
except:
vers = 'an unknown version'
if vers != "2.7":
l = logging.getLogger("version")
if not l.isEnabledFor(logging.WARNING):
l.setLevel(logging.WARNING)
l.warn("POX requires Python 2.7. You're running %s.", vers)
l.warn("If you run into problems, try using Python 2.7 or PyPy.")
self.starting_up = False
self.raiseEvent(GoingUpEvent())
self.raiseEvent(UpEvent())
self._waiter_notify()
if self.running:
log.info(self.version_string + " is up.")
def _waiter_notify (self):
if len(self._waiters):
waiting_for = set()
for entry in self._waiters:
_, name, components, _, _ = entry
components = [c for c in components if not self.hasComponent(c)]
waiting_for.update(components)
log.debug("%s still waiting for: %s"
% (name, " ".join(components)))
names = set([n for _,n,_,_,_ in self._waiters])
#log.info("%i things still waiting on %i components"
# % (names, waiting_for))
log.warn("Still waiting on %i component(s)" % (len(waiting_for),))
def hasComponent (self, name):
"""
Returns True if a component with the given name has been registered.
"""
return name in self.components
def registerNew (self, __componentClass, *args, **kw):
"""
Give it a class (and optional __init__ arguments), and it will
create an instance and register it using the class name. If the
instance has a _core_name property, it will use that instead.
It returns the new instance.
core.registerNew(FooClass, arg) is roughly equivalent to
core.register("FooClass", FooClass(arg)).
"""
name = __componentClass.__name__
obj = __componentClass(*args, **kw)
if hasattr(obj, '_core_name'):
# Default overridden
name = obj._core_name
self.register(name, obj)
return obj
def register (self, name, component=None):
"""
Makes the object "component" available as pox.core.core.name.
If only one argument is specified, the given argument is registered
using its class name as the name.
"""
#TODO: weak references?
if component is None:
component = name
name = component.__class__.__name__
if hasattr(component, '_core_name'):
# Default overridden
name = component._core_name
if name in self.components:
log.warn("Warning: Registered '%s' multipled times" % (name,))
self.components[name] = component
self.raiseEventNoErrors(ComponentRegistered, name, component)
self._try_waiters()
def call_when_ready (self, callback, components=[], name=None, args=(),
kw={}):
"""
Calls a callback when components are ready.
"""
if callback is None:
callback = lambda:None
callback.func_name = "<None>"
if isinstance(components, basestring):
components = [components]
elif isinstance(components, set):
components = list(components)
else:
try:
_ = components[0]
components = list(components)
except:
components = [components]
if name is None:
#TODO: Use inspect here instead
name = getattr(callback, 'func_name')
if name is None:
name = str(callback)
else:
name += "()"
if hasattr(callback, 'im_class'):
name = getattr(callback.im_class,'__name__', '') + '.' + name
if hasattr(callback, '__module__'):
# Is this a good idea? If not here, we should do it in the
# exception printing in try_waiter().
name += " in " + callback.__module__
entry = (callback, name, components, args, kw)
self._waiters.append(entry)
self._try_waiter(entry)
def _try_waiter (self, entry):
"""
Tries a waiting callback.
Calls the callback, removes from _waiters, and returns True if
all are satisfied.
"""
if entry not in self._waiters:
# Already handled
return
callback, name, components, args_, kw_ = entry
for c in components:
if not self.hasComponent(c):
return False
self._waiters.remove(entry)
try:
if callback is not None:
callback(*args_,**kw_)
except:
import traceback
msg = "Exception while trying to notify " + name
import inspect
try:
msg += " at " + inspect.getfile(callback)
msg += ":" + str(inspect.getsourcelines(callback)[1])
except:
pass
log.exception(msg)
return True
def _try_waiters (self):
"""
Tries to satisfy all component-waiting callbacks
"""
changed = True
while changed:
changed = False
for entry in list(self._waiters):
if self._try_waiter(entry):
changed = True
def listen_to_dependencies (self, sink, components=None, attrs=True,
short_attrs=False, listen_args={}):
"""
Look through *sink* for handlers named like _handle_component_event.
Use that to build a list of components, and append any components
explicitly specified by *components*.
listen_args is a dict of "component_name"={"arg_name":"arg_value",...},
allowing you to specify additional arguments to addListeners().
When all the referenced components are registered, do the following:
1) Set up all the event listeners
2) Call "_all_dependencies_met" on *sink* if it exists
3) If attrs=True, set attributes on *sink* for each component
(e.g, sink._openflow_ would be set to core.openflow)
For example, if topology is a dependency, a handler for topology's
SwitchJoin event must be defined as so:
def _handle_topology_SwitchJoin (self, ...):
*NOTE*: The semantics of this function changed somewhat in the
Summer 2012 milestone, though its intention remains the same.
"""
if components is None:
components = set()
elif isinstance(components, basestring):
components = set([components])
else:
components = set(components)
for c in dir(sink):
if not c.startswith("_handle_"): continue
if c.count("_") < 3: continue
c = '_'.join(c.split("_")[2:-1])
components.add(c)
if None in listen_args:
# This means add it to all...
args = listen_args.pop(None)
for k,v in args.iteritems():
for c in components:
if c not in listen_args:
listen_args[c] = {}
if k not in listen_args[c]:
listen_args[c][k] = v
if set(listen_args).difference(components):
log.error("Specified listen_args for missing component(s): %s" %
(" ".join(set(listen_args).difference(components)),))
def done (sink, components, attrs, short_attrs):
if attrs or short_attrs:
for c in components:
if short_attrs:
attrname = c
else:
attrname = '_%s_' % (c,)
setattr(sink, attrname, getattr(self, c))
for c in components:
if hasattr(getattr(self, c), "_eventMixin_events"):
kwargs = {"prefix":c}
kwargs.update(listen_args.get(c, {}))
getattr(self, c).addListeners(sink, **kwargs)
getattr(sink, "_all_dependencies_met", lambda : None)()
self.call_when_ready(done, components, name=sink.__class__.__name__,
args=(sink,components,attrs,short_attrs))
if not self.starting_up:
self._waiter_notify()
def __getattr__ (self, name):
if name not in self.components:
raise AttributeError("'%s' not registered" % (name,))
return self.components[name]
core = None
def initialize ():
global core
core = POXCore()
return core
# The below is a big hack to make tests and doc tools work.
# We should do something better.
def _maybe_initialize ():
import sys
if 'unittest' in sys.modules or 'nose' in sys.modules:
initialize()
return
import __main__
mod = getattr(__main__, '__file__', '')
if 'pydoc' in mod:
initialize()
return
_maybe_initialize()
|
pitmSsrRelay.py
|
#!/usr/bin/python
# piTempBuzzer
import os
import json
import hashlib
import json
import struct
import socket
import syslog
import sys
import threading
import time
from pitmCfg import pitmCfg
from pitmLCDisplay import *
from gpiotools import gpiotools
class pitmSsrRelay:
def __init__(self):
self.logging=3 # 1 = syslog, 2 = stderr
self.lastLog=["","","","","","","","","","",""]
if not os.path.exists("simulator"):
self.logging=3
syslog.openlog( facility=syslog.LOG_LOCAL7)
self.cfg = pitmCfg()
self.gpio = gpiotools()
self.lcdDisplay=pitmLCDisplay()
# our first job is to make sure all the relays are set to 1
# so that they stay N/O
# we do this in co-opreation with the master
# controlling a transistor to ensure the relay
# opto isolator's don't have power until
# after we have done this
self.gpio.output('ssrZoneA',0)
self.gpio.output('ssrZoneB',0)
# self.gpio.output('tSsrFan',0)
self.mcastMembership=False
self.zoneTemp=-1
self.zoneTarget=-1
self.zoneTempTimestamp=-1
self.ssrZoneA=False
self.ssrZoneB=False
self.ssrPinA=False
self.ssrPinB=False
# Used to broadcast the gpio status
self._relayZoneA=False
self._relayZoneB=False
self._relayZoneUseA=False
self._relayZoneUseB=False
self._gpiossrA=False
self._gpiossrB=False
self.hltActive=False
self.boilActive=False
self.cycle=4
self.zoneAduty=0
self.zoneBduty=0
self.zoneAmeter=0
self.zoneBmeter=0
# used for zone toggling
self.useZoneA=True
self.zoneToggleCount=0
self.singleZone=True
self.ssrFanRequiredUntil=-1
def __del__(self):
self.gpio.output('ssrZoneA',0)
self.gpio.output('ssrZoneB',0)
# self.gpio.output('tSsrFan',0)
def uncontrol(self):
self._log("Uncontrol Called")
self.gpio.output('ssrZoneA',0)
self.gpio.output('ssrZoneB',0)
# self.gpio.output('tSsrFan',0)
def _log(self,msg,importance=10):
if self.logging == 1:
if importance > 9:
syslog.syslog(syslog.LOG_DEBUG, msg)
elif self.logging == 2:
sys.stderr.write("%s\n" %(msg))
elif self.logging == 3:
if (importance > 9) or ( (("%s" %(time.time())).split(".")[0][-3:] == "000") or (not self.lastLog[importance] == msg)):
syslog.syslog(syslog.LOG_DEBUG, msg)
self.lastLog[importance]=msg
sys.stderr.write("%s\n" %(msg))
def _err(self,msg):
syslog.syslog(syslog.LOG_ERR, msg)
sys.stderr.write("%s\n" %(msg))
def submission(self):
self._log("Submitting to control of Controller")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 4)
self.sock.bind(('', self.cfg.mcastPort))
mreq = struct.pack("4sl", socket.inet_aton(self.cfg.mcastGroup), socket.INADDR_ANY)
self.sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True:
(data, addr) = self.sock.recvfrom(1200)
try:
cm = json.loads( data )
except:
self._log("Error unpickling input message\n%s" %(data))
return
checksum = cm['_checksum']
cm['_checksum'] =" "
ourChecksum = hashlib.sha1("%s%s" %(cm,self.cfg.checksum)).hexdigest()
# if time.time() > self.ssrFanRequiredUntil:
# self.gpio.output('tSsrFan',0)
if not cm['_mode'].count("hlt") and not cm['_mode'].count("boil"):
print "not hlt not boil zoneA = 0"
self.gpio.output('zoneA',0)
self.gpio.output('zoneB',0)
self.gpio.output('ssrZoneA',0)
self.gpio.output('ssrZoneB',0)
self.gpio.output('zoneAuse',0)
self.gpio.output('zoneBuse',0)
self._relayZoneA=False
self._relayZoneB=False
self._relayZoneUseA=False
self._relayZoneUseB=False
self._gpiossrA=False
self._gpiossrB=False
#
# HLT (mash water) heating processing
#
if self.hltActive and not cm['_mode'].count("hlt"):
self._log("HLT Reset")
if not os.path.exists("simulator"):
self.lcdDisplay.sendMessage("No target/zone temp A",3,importance=-8) # this resets the message
self.hltActive=False
# self.gpio.output('zoneA',0)
# self.gpio.output('zoneB',0)
# self.gpio.output('zoneAuse',0)
# self.gpio.output('zoneBuse',0)
if cm['_mode'].count("hlt") and not self.hltActive and not cm['_mode'] == "idle":
self._log("HLT Init")
self._log(" - setting zoneA power on to HLT mode")
self._log(" - setting zoneB power on to HLT mode")
self.gpio.output('zoneA',1)
self.gpio.output('zoneB',1)
self.gpio.output('zoneAuse',0)
self.gpio.output('zoneBuse',0)
self._relayZoneA=True
self._relayZoneB=True
self._relayZoneUseA=False
self._relayZoneUseB=False
self.htlActive=True
self.zoneAduty=1
self.zoneBduty=0
self.useZoneA=True
spargeOrHlt=False
if cm['_mode'].count("sparge") and not cm['_mode'] == "idle":
self.hltActive=True
(tMin,tMax,tTarget)=cm['sparge']
self.zoneTarget=tTarget
# print "Using Sparge Target",tTarget
spargeOrHlt=True
elif cm['_mode'].count("hlt") and not cm['_mode'] == "idle":
self.hltActive=True
(tMin,tMax,tTarget)=cm['hlt']
self.zoneTarget=tTarget
# print "Using HLT Target",tTarget
spargeOrHlt=True
if spargeOrHlt:
if self.zoneTemp == -1 or self.zoneTarget == -1:
self._log("no target temp - sparge/hlt")
if not os.path.exists("simulator"):
self.lcdDisplay.sendMessage("No target/zone temp A",3,importance=8)
time.sleep(3)
else:
if not os.path.exists("simulator"):
self.lcdDisplay.sendMessage("No target/zone temp A",3,importance=-8)
# if we are 95% of the temperature target then we will set this to 1
if self.zoneTemp < (self.zoneTarget * 0.95):
loadRequired=self.cycle
elif self.zoneTemp > self.zoneTarget:
loadRequired=0
else:
loadRequired=0.85
# load requiired
self._log("HLT: load required %s %.1f %.1f " %(loadRequired,self.zoneTemp,self.zoneTarget),importance=2)
# print "Load Required ",loadRequired,self.zoneTemp,self.zoneTarget,self.zoneToggleCount
# print " ZONE A",self.useZoneA,self.zoneAduty,self.ssrZoneA,self.zoneAmeter
# print " ZONE B", "-----",self.zoneBduty,self.ssrZoneB,self.zoneBmeter
if self.zoneToggleCount > 33:
self.zoneToggleCount=0
if self.useZoneA:
self._log("HLT: switching from A to B")
self.useZoneA=False
self.zoneAduty=0
self.zoneBduty=loadRequired
else:
self._log("HLT: switching from B to A")
self.useZoneA=True
self.zoneBduty=0
self.zoneAduty=loadRequired
if self.zoneTemp < self.zoneTarget:
if self.useZoneA:
self.ssrZoneA=True
self.zoneAduty=loadRequired
self.zoneBduty=0
# print "Line 221: ssrZoneA=True"
else:
self.ssrZoneB=True
self.zoneAduty=0
self.zoneBduty=loadRequired
# print "Line 224: ssrZoneB=True"
else:
self.ssrZoneA=False
self.ssrZoneB=False
# print "Line 227: ssrZoneA=False"
# print "Line 228: ssrZoneB=False"
#
# BOIL heating processing
#
if self.boilActive and not cm['_mode'].count("boil"):
self._log("BOIL Reset")
if not os.path.exists("simulator"):
self.lcdDisplay.sendMessage("No target/zone temp A",3,importance=-8) # this resets the message
self.boilActive=False
if cm['_mode'].count("boil") and not self.boilActive and not cm['_mode'] == "idle":
self._log("BOIL Init")
self._log(" - setting zoneA power on to BOIL mode")
self._log(" - setting zoneB power on to BOIL mode")
self.gpio.output('zoneA',1)
self.gpio.output('zoneB',1)
self.gpio.output('zoneAuse',1)
self.gpio.output('zoneBuse',1)
self._relayZoneA=True
self._relayZoneB=True
self._relayZoneUseA=True
self._relayZoneUseB=True
self.boilActive=True
self.zoneAduty=1
self.zoneBduty=0
self.useZoneA=True
self.useZoneB=True
if cm['_mode'].count("boil") and not cm['_mode'] == "idle":
self.boilActive=True
(tMin,tMax,tTarget)=cm['boil']
self.zoneTarget=tTarget
if self.zoneTemp == -1 or self.zoneTarget == -1:
self._log("boil: no target/temp")
if not os.path.exists("simulator"):
self.lcdDisplay.sendMessage("No target/zone temp A",3,importance=8)
time.sleep(3)
else:
if not os.path.exists("simulator"):
self.lcdDisplay.sendMessage("No target/zone temp A",3,importance=-8)
# if we are 95% of the temperature target then we will set this to 1
if self.zoneTemp < (self.zoneTarget - 0.47):
loadRequired=self.cycle
elif self.zoneTemp > self.zoneTarget:
loadRequired=0
else:
loadRequired=0.85
# load required
self._log("BOIL: load required %s %.1f %.1f " %(loadRequired,self.zoneTemp,self.zoneTarget),importance=2)
# print "BOIL:Load Required ",loadRequired,self.zoneTemp,self.zoneTarget,self.zoneToggleCount
# print "BOIL: ZONE A",self.useZoneA,self.zoneAduty,self.ssrZoneA,self.zoneAmeter
# print "BOIL: ZONE B",self.zoneBduty,self.ssrZoneB,self.zoneBmeter
if (self.zoneTemp < self.zoneTarget * 0.90) or os.path.exists("ipc/use-both-boil-elements"):
# if we are less than 90% of the target then use both elements
self.useZoneA=True
self.useZoneB=True
self.singleZone=False
self.zoneAduty=loadRequired
self.zoneBduty=loadRequired
else:
self.singleZone=True
if self.singleZone:
if self.zoneToggleCount > 33:
if self.useZoneA:
self._log("BOIL: switching from A to B")
self.useZoneA=False
self.useZoneB=True
self.zoneAduty=0
self.zoneBduty=loadRequired
self.ssrA=False
self.zoneToggleCount=0
else:
self._log("BOIL: switching from B to A")
self.useZoneB=False
self.useZoneA=True
self.zoneBduty=0
self.zoneAduty=loadRequired
self.ssrB=False
self.zoneToggleCount=0
else:
if self.useZoneA:
self.zoneAduty=loadRequired
self.zoneBduty=0
else:
self.zoneAduty=0
self.zoneBduty=loadRequired
if self.zoneTemp < self.zoneTarget:
if self.useZoneA and self.useZoneB:
self.ssrZoneA=True
self.ssrZoneB=True
elif self.useZoneA:
self.ssrZoneA=True
self.ssrZoneB=False
else:
self.ssrZoneB=True
self.ssrZoneA=False
else:
self.ssrZoneA=False
self.ssrZoneB=False
def zoneTempThread(self):
self._log("Listening for temeprature from Zone A")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, 4)
sock.bind(('', self.cfg.mcastTemperaturePort))
mreq = struct.pack("4sl", socket.inet_aton(self.cfg.mcastGroup), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
while True:
(data, addr) = sock.recvfrom(1200)
try:
cm = json.loads( data )
except:
self._log("Error unickling input message\n%s" %(data))
return
checksum = cm['_checksum']
cm['_checksum'] =" "
ourChecksum = hashlib.sha1("%s%s" %(cm,self.cfg.checksum)).hexdigest()
if cm['currentResult'].has_key( self.cfg.hltProbe ) and self.hltActive:
if cm['currentResult'][self.cfg.hltProbe]['valid']:
self.zoneTemp = float( cm['currentResult'][self.cfg.hltProbe]['temperature'])
self.zoneTempTimestamp=time.time()
else:
self.lcdDisplay.sendMessage("Temp Result Error",2)
if cm['currentResult'].has_key( self.cfg.boilProbe ) and self.boilActive:
if cm['currentResult'][self.cfg.boilProbe]['valid']:
self.zoneTemp = float( cm['currentResult'][self.cfg.boilProbe]['temperature'])
self.zoneTempTimestamp=time.time()
else:
self.lcdDisplay.sendMessage("Temp Result Error",2)
def zoneBssrThread(self):
self._log("Zone B SSR Thread active")
while True:
if not self.hltActive and not self.boilActive:
self.gpio.output('ssrZoneB',0)
self._gpiossrB=False
self.ssrPinB=False
time.sleep(1)
if self.hltActive or self.boilActive:
if self.ssrZoneB:
if self.zoneBduty == 0:
print "\tB. DUTY = 0 (temp %.2f target %.2f)" %(self.zoneTemp,self.zoneTarget)
time.sleep(1)
else:
while self.singleZone and self.ssrPinA:
print "t\t... waiting for SSR A to stop firing"
time.sleep(0.2)
print "\tB. ON : ",time.time()," for ", self.zoneBduty*self.cycle," has been active for", self.zoneBmeter,"(",self.zoneToggleCount,")",self.zoneTarget,self.zoneTemp
self.gpio.output('ssrZoneB',1)
self._gpiossrB=True
self.ssrPinB=True
time.sleep(self.zoneBduty*self.cycle)
# self.ssrFanRequiredUntil=time.time()+30
# self.gpio.output('tSsrFan',1)
self.ssrZoneB=True
if self.zoneBduty == self.cycle:
print "\tB. duty time is set to 100pcnt"
else:
print "\tB. OFF: ",time.time()," for ", (self.cycle-(self.zoneBduty*self.cycle))
self.gpio.output('ssrZoneB',0)
self._gpiossrB=False
self.ssrPinB=False
if self.zoneBduty == self.cycle:
time.sleep(0)
else:
print "zone B duty/cycle ",self.zoneBduty,self.cycle
(self.cycle-(self.zoneBduty*self.cycle))
time.sleep(self.cycle-(self.zoneBduty*self.cycle))
self.zoneBmeter=self.zoneBmeter + (self.zoneBduty*self.cycle)
self.zoneToggleCount=self.zoneToggleCount+ (self.zoneBduty*self.cycle)
else:
print "\tB. SSR MASTER FLAG OFF (temp %.2f target %.2f)" %(self.zoneTemp,self.zoneTarget)
self.ssrPinB=False
self.gpio.output('ssrZoneB',0)
self._gpiossrB=False
time.sleep(1)
def zoneAssrThread(self):
self._log("Zone A SSR Thread active")
while True:
if not self.hltActive and not self.boilActive:
self.gpio.output('ssrZoneA',0)
self._gpiossrA=False
self.ssrPinA=False
time.sleep(1)
if self.hltActive or self.boilActive:
if self.ssrZoneA:
if self.zoneAduty == 0:
print "\tA. DUTY = 0 (temp %.2f target %.2f)" %(self.zoneTemp,self.zoneTarget)
time.sleep(1)
else:
while self.singleZone and self.ssrPinB:
print "t\t... waiting for SSR B to stop firing"
time.sleep(0.2)
print "\tA. ON : ",time.time()," for ", self.zoneAduty*self.cycle," has been active for", self.zoneAmeter,"(",self.zoneToggleCount,")",self.zoneTarget,self.zoneTemp
# self.ssrFanRequiredUntil=time.time()+30
# self.gpio.output('tSsrFan',1)
self.gpio.output('ssrZoneA',1)
self._gpiossrA=True
self.ssrPinA=True
if self.zoneAduty == self.cycle:
time.sleep(self.cycle)
else:
time.sleep(self.zoneAduty*self.cycle)
if self.zoneAduty == self.cycle:
print "\tA. duty time is set to 1"
else:
print "\tA. OFF: ",time.time()," for ", (self.cycle-(self.zoneAduty*self.cycle))
self.gpio.output('ssrZoneA',0)
self._gpiossrA=False
self.ssrPinA=False
if self.zoneAduty == self.cycle:
time.sleep(0)
else:
time.sleep(self.cycle-(self.zoneAduty*self.cycle))
print "zone A duty/cycle ",self.zoneAduty,self.cycle,(self.cycle-(self.zoneAduty*self.cycle))
self.zoneAmeter=self.zoneAmeter + (self.zoneAduty * self.cycle)
self.zoneToggleCount=self.zoneToggleCount+ (self.zoneAduty*self.cycle)
else:
print "\tA. SSR MASTER FLAG OFF (temp %.2f target %.2f)" %(self.zoneTemp,self.zoneTarget)
self.gpio.output('ssrZoneA',0)
self._gpiossrA=False
self._relayZoneUseA=False
self.ssrPinA=False
time.sleep(1)
def broadcastResult(self):
print "advertising our SSR capabiltiies"
sendSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sendSocket.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 3)
controlMessage={}
controlMessage['_operation'] = 'ssrrelay'
controlMessage['_checksum'] =" "
checksum = "%s%s" %(controlMessage,self.cfg.checksum)
controlMessage['_checksum'] = hashlib.sha1(self.cfg.checksum).hexdigest()
while 1:
controlMessage['relayZoneA'] = self._relayZoneA
controlMessage['relayZoneB'] = self._relayZoneB
controlMessage['relayZoneUseA'] = self._relayZoneUseA
controlMessage['relayZoneUseB'] = self._relayZoneUseB
controlMessage['gpioSsrA'] = self._gpiossrA
controlMessage['gpioSsrB'] = self._gpiossrB
if self._relayZoneA:
o=open("ipc/relayZoneA","w")
o.close()
else:
try:
os.unlink("ipc/relayZoneA")
except:
pass
if self._relayZoneB:
o=open("ipc/relayZoneB","w")
o.close()
else:
try:
os.unlink("ipc/relayZoneB")
except:
pass
if self._relayZoneUseA:
o=open("ipc/relayZoneUseA","w")
o.close()
else:
try:
os.unlink("ipc/relayZoneUseA")
except:
pass
if self._relayZoneUseB:
o=open("ipc/relayZoneUseB","w")
o.close()
else:
try:
os.unlink("ipc/relayZoneUseB")
except:
pass
# print "broadcastResult,",self._gpiossrA,self._gpiossrB
if self._gpiossrA:
o=open("ipc/gpioSsrA","w")
o.close()
else:
# print "trying to remove gpioSsrA file"
try:
os.unlink("ipc/gpioSsrA")
# print "scucess"
except:
pass
if self._gpiossrB:
o=open("ipc/gpioSsrB","w")
o.close()
else:
try:
os.unlink("ipc/gpioSsrB")
except:
pass
msg= json.dumps(controlMessage)
msg= "%s%s" %(msg," "*(1200-len(msg)))
sendSocket.sendto( msg ,(self.cfg.mcastGroup,self.cfg.mcastSsrRelayPort))
time.sleep(1)
if __name__ == '__main__':
try:
controller = pitmSsrRelay()
# get under the control of the flasher
broadcastResult = threading.Thread(target=controller.broadcastResult)
broadcastResult.daemon = True
broadcastResult.start()
# get under the control of the contoller
controlThread = threading.Thread(target=controller.submission)
controlThread.daemon = True
controlThread.start()
# get temperature status from zone a
zoneTempThread = threading.Thread(target=controller.zoneTempThread)
zoneTempThread.daemon = True
zoneTempThread.start()
# # start a SSR thread
zoneAssrThread = threading.Thread(target=controller.zoneAssrThread)
zoneAssrThread.daemon = True
zoneAssrThread.start()
#
# # start a SSR thread
zoneBssrThread = threading.Thread(target=controller.zoneBssrThread)
zoneBssrThread.daemon = True
zoneBssrThread.start()
while 1:
time.sleep(1)
except KeyboardInterrupt:
controller.uncontrol()
pass
|
eyes.py
|
#!/usr/bin/python
# This is a hasty port of the Teensy eyes code to Python...all kludgey with
# an embarrassing number of globals in the frame() function and stuff.
# Needed to get SOMETHING working, can focus on improvements next.
import board
import busio
import serial
import adafruit_tsl2591
import argparse
import math
import pi3d
import random
import threading
import time
import RPi.GPIO as GPIO
from svg.path import Path, parse_path
from xml.dom.minidom import parse
from gfxutil import *
from snake_eyes_bonnet import SnakeEyesBonnet
from serial_input import Serial_input
# INPUT CONFIG for eye motion ----------------------------------------------
# ANALOG INPUTS REQUIRE SNAKE EYES BONNET
JOYSTICK_X_IN = -1 # Analog input for eye horiz pos (-1 = auto)
JOYSTICK_Y_IN = -1 # Analog input for eye vert position (")
PUPIL_IN = 1 # Analog input for pupil control (-1 = auto)
JOYSTICK_X_FLIP = False # If True, reverse stick X axis
JOYSTICK_Y_FLIP = False # If True, reverse stick Y axis
PUPIL_IN_FLIP = False # If True, reverse reading from PUPIL_IN
TRACKING = True # If True, eyelid tracks pupil
PUPIL_SMOOTH = 16 # If > 0, filter input from PUPIL_IN
PUPIL_MIN = 0.0 # Lower analog range from PUPIL_IN
PUPIL_MAX = 1.0 # Upper "
WINK_L_PIN = 22 # GPIO pin for LEFT eye wink button
BLINK_PIN = 23 # GPIO pin for blink button (BOTH eyes)
WINK_R_PIN = 24 # GPIO pin for RIGHT eye wink button
AUTOBLINK = True # If True, eyes blink autonomously
CRAZY_EYES = False # If True, each eye moves in different directions
lux = 0 # this is for the light sensor
OP_MODE = 0 # 0 for nature mode, in this mode, it act as nature, 1 for operation mode, keep still
v = 0.2 # pupil size
curXSet = 0.0 #store the position for the X in the operation mode, zeor means middle
curYSet = 0.0 #store the position for the Y in the operation mode
earliest_sound_sensor = "O"
sound_sensor_one = 0
sound_sensor_two = 0
sound_sensor_three = 0
sound_sensor_four = 0
# GPIO initialization ------------------------------------------------------
GPIO.setmode(GPIO.BCM)
if WINK_L_PIN >= 0: GPIO.setup(WINK_L_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
if BLINK_PIN >= 0: GPIO.setup(BLINK_PIN , GPIO.IN, pull_up_down=GPIO.PUD_UP)
if WINK_R_PIN >= 0: GPIO.setup(WINK_R_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# set the button for the mode switch
GPIO.setup(17, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(27, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# ADC stuff ----------------------------------------------------------------
# ADC channels are read and stored in a separate thread to avoid slowdown
# from blocking operations. The animation loop can read at its leisure.
if JOYSTICK_X_IN >= 0 or JOYSTICK_Y_IN >= 0 or PUPIL_IN >= 0:
bonnet = SnakeEyesBonnet(daemon=True)
bonnet.setup_channel(JOYSTICK_X_IN, reverse=JOYSTICK_X_FLIP)
bonnet.setup_channel(JOYSTICK_Y_IN, reverse=JOYSTICK_Y_FLIP)
bonnet.setup_channel(PUPIL_IN, reverse=PUPIL_IN_FLIP)
bonnet.start()
# Load SVG file, extract paths & convert to point lists --------------------
dom = parse("graphics/eye2.svg")
vb = get_view_box(dom)
pupilMinPts = get_points(dom, "pupilMin" , 32, True , True )
pupilMaxPts = get_points(dom, "pupilMax" , 32, True , True )
irisPts = get_points(dom, "iris" , 32, True , True )
scleraFrontPts = get_points(dom, "scleraFront" , 0, False, False)
scleraBackPts = get_points(dom, "scleraBack" , 0, False, False)
upperLidClosedPts = get_points(dom, "upperLidClosed", 33, False, True )
upperLidOpenPts = get_points(dom, "upperLidOpen" , 33, False, True )
upperLidEdgePts = get_points(dom, "upperLidEdge" , 33, False, False)
lowerLidClosedPts = get_points(dom, "lowerLidClosed", 33, False, False)
lowerLidOpenPts = get_points(dom, "lowerLidOpen" , 33, False, False)
lowerLidEdgePts = get_points(dom, "lowerLidEdge" , 33, False, False)
# Set up display and initialize pi3d ---------------------------------------
DISPLAY = pi3d.Display.create(samples=4)
DISPLAY.set_background(0, 0, 0, 1) # r,g,b,alpha
# eyeRadius is the size, in pixels, at which the whole eye will be rendered
# onscreen. eyePosition, also pixels, is the offset (left or right) from
# the center point of the screen to the center of each eye. This geometry
# is explained more in-depth in fbx2.c.
eyePosition = DISPLAY.width / 4
eyeRadius = 128 # Default; use 240 for IPS screens
parser = argparse.ArgumentParser()
parser.add_argument("--radius", type=int)
args = parser.parse_args()
if args.radius:
eyeRadius = args.radius
# A 2D camera is used, mostly to allow for pixel-accurate eye placement,
# but also because perspective isn't really helpful or needed here, and
# also this allows eyelids to be handled somewhat easily as 2D planes.
# Line of sight is down Z axis, allowing conventional X/Y cartesion
# coords for 2D positions.
cam = pi3d.Camera(is_3d=False, at=(0,0,0), eye=(0,0,-1000))
shader = pi3d.Shader("uv_light")
light = pi3d.Light(lightpos=(0, -500, -500), lightamb=(0.2, 0.2, 0.2))
# Load texture maps --------------------------------------------------------
irisMap = pi3d.Texture("graphics/Dog1.jpg" , mipmap=False,
filter=pi3d.GL_LINEAR)
scleraMap = pi3d.Texture("graphics/sclera.png", mipmap=False,
filter=pi3d.GL_LINEAR, blend=True)
lidMap = pi3d.Texture("graphics/lid.png" , mipmap=False,
filter=pi3d.GL_LINEAR, blend=True)
# U/V map may be useful for debugging texture placement; not normally used
#uvMap = pi3d.Texture("graphics/uv.png" , mipmap=False,
# filter=pi3d.GL_LINEAR, blend=False, m_repeat=True)
# Initialize static geometry -----------------------------------------------
# Transform point lists to eye dimensions
scale_points(pupilMinPts , vb, eyeRadius)
scale_points(pupilMaxPts , vb, eyeRadius)
scale_points(irisPts , vb, eyeRadius)
scale_points(scleraFrontPts , vb, eyeRadius)
scale_points(scleraBackPts , vb, eyeRadius)
scale_points(upperLidClosedPts, vb, eyeRadius)
scale_points(upperLidOpenPts , vb, eyeRadius)
scale_points(upperLidEdgePts , vb, eyeRadius)
scale_points(lowerLidClosedPts, vb, eyeRadius)
scale_points(lowerLidOpenPts , vb, eyeRadius)
scale_points(lowerLidEdgePts , vb, eyeRadius)
# Regenerating flexible object geometry (such as eyelids during blinks, or
# iris during pupil dilation) is CPU intensive, can noticably slow things
# down, especially on single-core boards. To reduce this load somewhat,
# determine a size change threshold below which regeneration will not occur;
# roughly equal to 1/4 pixel, since 4x4 area sampling is used.
# Determine change in pupil size to trigger iris geometry regen
irisRegenThreshold = 0.0
a = points_bounds(pupilMinPts) # Bounds of pupil at min size (in pixels)
b = points_bounds(pupilMaxPts) # " at max size
maxDist = max(abs(a[0] - b[0]), abs(a[1] - b[1]), # Determine distance of max
abs(a[2] - b[2]), abs(a[3] - b[3])) # variance around each edge
# maxDist is motion range in pixels as pupil scales between 0.0 and 1.0.
# 1.0 / maxDist is one pixel's worth of scale range. Need 1/4 that...
if maxDist > 0: irisRegenThreshold = 0.25 / maxDist
# Determine change in eyelid values needed to trigger geometry regen.
# This is done a little differently than the pupils...instead of bounds,
# the distance between the middle points of the open and closed eyelid
# paths is evaluated, then similar 1/4 pixel threshold is determined.
upperLidRegenThreshold = 0.0
lowerLidRegenThreshold = 0.0
p1 = upperLidOpenPts[len(upperLidOpenPts) // 2]
p2 = upperLidClosedPts[len(upperLidClosedPts) // 2]
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
d = dx * dx + dy * dy
if d > 0: upperLidRegenThreshold = 0.25 / math.sqrt(d)
p1 = lowerLidOpenPts[len(lowerLidOpenPts) // 2]
p2 = lowerLidClosedPts[len(lowerLidClosedPts) // 2]
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
d = dx * dx + dy * dy
if d > 0: lowerLidRegenThreshold = 0.25 / math.sqrt(d)
# Generate initial iris meshes; vertex elements will get replaced on
# a per-frame basis in the main loop, this just sets up textures, etc.
rightIris = mesh_init((32, 4), (0, 0.5 / irisMap.iy), True, False)
rightIris.set_textures([irisMap])
rightIris.set_shader(shader)
# Left iris map U value is offset by 0.5; effectively a 180 degree
# rotation, so it's less obvious that the same texture is in use on both.
leftIris = mesh_init((32, 4), (0.5, 0.5 / irisMap.iy), True, False)
leftIris.set_textures([irisMap])
leftIris.set_shader(shader)
irisZ = zangle(irisPts, eyeRadius)[0] * 0.99 # Get iris Z depth, for later
# Eyelid meshes are likewise temporary; texture coordinates are
# assigned here but geometry is dynamically regenerated in main loop.
leftUpperEyelid = mesh_init((33, 5), (0, 0.5 / lidMap.iy), False, True)
leftUpperEyelid.set_textures([lidMap])
leftUpperEyelid.set_shader(shader)
leftLowerEyelid = mesh_init((33, 5), (0, 0.5 / lidMap.iy), False, True)
leftLowerEyelid.set_textures([lidMap])
leftLowerEyelid.set_shader(shader)
rightUpperEyelid = mesh_init((33, 5), (0, 0.5 / lidMap.iy), False, True)
rightUpperEyelid.set_textures([lidMap])
rightUpperEyelid.set_shader(shader)
rightLowerEyelid = mesh_init((33, 5), (0, 0.5 / lidMap.iy), False, True)
rightLowerEyelid.set_textures([lidMap])
rightLowerEyelid.set_shader(shader)
# Generate scleras for each eye...start with a 2D shape for lathing...
angle1 = zangle(scleraFrontPts, eyeRadius)[1] # Sclera front angle
angle2 = zangle(scleraBackPts , eyeRadius)[1] # " back angle
aRange = 180 - angle1 - angle2
pts = []
for i in range(24):
ca, sa = pi3d.Utility.from_polar((90 - angle1) - aRange * i / 23)
pts.append((ca * eyeRadius, sa * eyeRadius))
# Scleras are generated independently (object isn't re-used) so each
# may have a different image map (heterochromia, corneal scar, or the
# same image map can be offset on one so the repetition isn't obvious).
leftEye = pi3d.Lathe(path=pts, sides=64)
leftEye.set_textures([scleraMap])
leftEye.set_shader(shader)
re_axis(leftEye, 0)
rightEye = pi3d.Lathe(path=pts, sides=64)
rightEye.set_textures([scleraMap])
rightEye.set_shader(shader)
re_axis(rightEye, 0.5) # Image map offset = 180 degree rotation
# Init global stuff --------------------------------------------------------
mykeys = pi3d.Keyboard() # For capturing key presses
startX = random.uniform(-30.0, 30.0)
n = math.sqrt(900.0 - startX * startX)
startY = random.uniform(-n, n)
destX = startX
destY = startY
curX = startX
curY = startY
moveDuration = random.uniform(0.075, 0.175)
holdDuration = random.uniform(0.1, 1.1)
startTime = 0.0
isMoving = False
startXR = random.uniform(-30.0, 30.0)
n = math.sqrt(900.0 - startX * startX)
startYR = random.uniform(-n, n)
destXR = startXR
destYR = startYR
curXR = startXR
curYR = startYR
moveDurationR = random.uniform(0.075, 0.175)
holdDurationR = random.uniform(0.1, 1.1)
startTimeR = 0.0
isMovingR = False
frames = 0
beginningTime = time.time()
rightEye.positionX(-eyePosition)
rightIris.positionX(-eyePosition)
rightUpperEyelid.positionX(-eyePosition)
rightUpperEyelid.positionZ(-eyeRadius - 42)
rightLowerEyelid.positionX(-eyePosition)
rightLowerEyelid.positionZ(-eyeRadius - 42)
leftEye.positionX(eyePosition)
leftIris.positionX(eyePosition)
leftUpperEyelid.positionX(eyePosition)
leftUpperEyelid.positionZ(-eyeRadius - 42)
leftLowerEyelid.positionX(eyePosition)
leftLowerEyelid.positionZ(-eyeRadius - 42)
currentPupilScale = 0.5
prevPupilScale = -1.0 # Force regen on first frame
prevLeftUpperLidWeight = 0.5
prevLeftLowerLidWeight = 0.5
prevRightUpperLidWeight = 0.5
prevRightLowerLidWeight = 0.5
prevLeftUpperLidPts = points_interp(upperLidOpenPts, upperLidClosedPts, 0)
prevLeftLowerLidPts = points_interp(lowerLidOpenPts, lowerLidClosedPts, 0)
prevRightUpperLidPts = points_interp(upperLidOpenPts, upperLidClosedPts, 0)
prevRightLowerLidPts = points_interp(lowerLidOpenPts, lowerLidClosedPts, 0)
luRegen = False
llRegen = False
ruRegen = False
rlRegen = False
timeOfLastBlink = 0.0
timeToNextBlink = 1.0
# These are per-eye (left, right) to allow winking:
blinkStateLeft = 0 # NOBLINK
blinkStateRight = 0
blinkDurationLeft = 0.1
blinkDurationRight = 0.1
blinkStartTimeLeft = 0
blinkStartTimeRight = 0
trackingPos = 0
trackingPosR = 0
# Generate one frame of imagery
def frame(p):
global startX, startY, destX, destY, curX, curY
global startXR, startYR, destXR, destYR, curXR, curYR
global moveDuration, holdDuration, startTime, isMoving
global moveDurationR, holdDurationR, startTimeR, isMovingR
global frames
global leftIris, rightIris
global pupilMinPts, pupilMaxPts, irisPts, irisZ
global leftEye, rightEye
global leftUpperEyelid, leftLowerEyelid, rightUpperEyelid, rightLowerEyelid
global upperLidOpenPts, upperLidClosedPts, lowerLidOpenPts, lowerLidClosedPts
global upperLidEdgePts, lowerLidEdgePts
global prevLeftUpperLidPts, prevLeftLowerLidPts, prevRightUpperLidPts, prevRightLowerLidPts
global leftUpperEyelid, leftLowerEyelid, rightUpperEyelid, rightLowerEyelid
global prevLeftUpperLidWeight, prevLeftLowerLidWeight, prevRightUpperLidWeight, prevRightLowerLidWeight
global prevPupilScale
global irisRegenThreshold, upperLidRegenThreshold, lowerLidRegenThreshold
global luRegen, llRegen, ruRegen, rlRegen
global timeOfLastBlink, timeToNextBlink
global blinkStateLeft, blinkStateRight
global blinkDurationLeft, blinkDurationRight
global blinkStartTimeLeft, blinkStartTimeRight
global trackingPos
global trackingPosR
global OP_MODE
global curXSet, curYSet
DISPLAY.loop_running()
now = time.time()
dt = now - startTime
dtR = now - startTimeR
frames += 1
# if(now > beginningTime):
# print(frames/(now-beginningTime))
if(OP_MODE == 1):
# Eye position from analog inputs
curX = curXSet
curY = curYSet
else :
# Autonomous eye position
if isMoving == True:
if dt <= moveDuration:
scale = (now - startTime) / moveDuration
# Ease in/out curve: 3*t^2-2*t^3
scale = 3.0 * scale * scale - 2.0 * scale * scale * scale
curX = startX + (destX - startX) * scale
curY = startY + (destY - startY) * scale
else:
startX = destX
startY = destY
curX = destX
curY = destY
holdDuration = random.uniform(0.1, 1.1)
startTime = now
isMoving = False
else:
if dt >= holdDuration:
destX = random.uniform(-30.0, 30.0)
n = math.sqrt(900.0 - destX * destX)
destY = random.uniform(-n, n)
moveDuration = random.uniform(0.075, 0.175)
startTime = now
isMoving = True
# repeat for other eye if CRAZY_EYES
if CRAZY_EYES:
if isMovingR == True:
if dtR <= moveDurationR:
scale = (now - startTimeR) / moveDurationR
# Ease in/out curve: 3*t^2-2*t^3
scale = 3.0 * scale * scale - 2.0 * scale * scale * scale
curXR = startXR + (destXR - startXR) * scale
curYR = startYR + (destYR - startYR) * scale
else:
startXR = destXR
startYR = destYR
curXR = destXR
curYR = destYR
holdDurationR = random.uniform(0.1, 1.1)
startTimeR = now
isMovingR = False
else:
if dtR >= holdDurationR:
destXR = random.uniform(-30.0, 30.0)
n = math.sqrt(900.0 - destXR * destXR)
destYR = random.uniform(-n, n)
moveDurationR = random.uniform(0.075, 0.175)
startTimeR = now
isMovingR = True
# Regenerate iris geometry only if size changed by >= 1/4 pixel
if abs(p - prevPupilScale) >= irisRegenThreshold:
# Interpolate points between min and max pupil sizes
interPupil = points_interp(pupilMinPts, pupilMaxPts, p)
# Generate mesh between interpolated pupil and iris bounds
mesh = points_mesh((None, interPupil, irisPts), 4, -irisZ, True)
# Assign to both eyes
leftIris.re_init(pts=mesh)
rightIris.re_init(pts=mesh)
prevPupilScale = p
# Eyelid WIP
if AUTOBLINK and (now - timeOfLastBlink) >= timeToNextBlink:
timeOfLastBlink = now
duration = random.uniform(0.035, 0.06)
if blinkStateLeft != 1:
blinkStateLeft = 1 # ENBLINK
blinkStartTimeLeft = now
blinkDurationLeft = duration
if blinkStateRight != 1:
blinkStateRight = 1 # ENBLINK
blinkStartTimeRight = now
blinkDurationRight = duration
timeToNextBlink = duration * 3 + random.uniform(0.0, 4.0)
if blinkStateLeft: # Left eye currently winking/blinking?
# Check if blink time has elapsed...
if (now - blinkStartTimeLeft) >= blinkDurationLeft:
# Yes...increment blink state, unless...
if (blinkStateLeft == 1 and # Enblinking and...
((BLINK_PIN >= 0 and # blink pin held, or...
GPIO.input(BLINK_PIN) == GPIO.LOW) or
(WINK_L_PIN >= 0 and # wink pin held
GPIO.input(WINK_L_PIN) == GPIO.LOW))):
# Don't advance yet; eye is held closed
pass
else:
blinkStateLeft += 1
if blinkStateLeft > 2:
blinkStateLeft = 0 # NOBLINK
else:
blinkDurationLeft *= 2.0
blinkStartTimeLeft = now
else:
if WINK_L_PIN >= 0 and GPIO.input(WINK_L_PIN) == GPIO.LOW:
blinkStateLeft = 1 # ENBLINK
blinkStartTimeLeft = now
blinkDurationLeft = random.uniform(0.035, 0.06)
if blinkStateRight: # Right eye currently winking/blinking?
# Check if blink time has elapsed...
if (now - blinkStartTimeRight) >= blinkDurationRight:
# Yes...increment blink state, unless...
if (blinkStateRight == 1 and # Enblinking and...
((BLINK_PIN >= 0 and # blink pin held, or...
GPIO.input(BLINK_PIN) == GPIO.LOW) or
(WINK_R_PIN >= 0 and # wink pin held
GPIO.input(WINK_R_PIN) == GPIO.LOW))):
# Don't advance yet; eye is held closed
pass
else:
blinkStateRight += 1
if blinkStateRight > 2:
blinkStateRight = 0 # NOBLINK
else:
blinkDurationRight *= 2.0
blinkStartTimeRight = now
else:
if WINK_R_PIN >= 0 and GPIO.input(WINK_R_PIN) == GPIO.LOW:
blinkStateRight = 1 # ENBLINK
blinkStartTimeRight = now
blinkDurationRight = random.uniform(0.035, 0.06)
if BLINK_PIN >= 0 and GPIO.input(BLINK_PIN) == GPIO.LOW:
duration = random.uniform(0.035, 0.06)
if blinkStateLeft == 0:
blinkStateLeft = 1
blinkStartTimeLeft = now
blinkDurationLeft = duration
if blinkStateRight == 0:
blinkStateRight = 1
blinkStartTimeRight = now
blinkDurationRight = duration
if TRACKING:
n = 0.4 - curY / 60.0
if n < 0.0: n = 0.0
elif n > 1.0: n = 1.0
#trackingPos = (trackingPos * 3.0 + n) * 0.25
if CRAZY_EYES:
n = 0.4 - curYR / 60.0
if n < 0.0: n = 0.0
elif n > 1.0: n = 1.0
#trackingPosR = (trackingPosR * 3.0 + n) * 0.25
if blinkStateLeft:
n = (now - blinkStartTimeLeft) / blinkDurationLeft
if n > 1.0: n = 1.0
if blinkStateLeft == 2: n = 1.0 - n
else:
n = 0.0
newLeftUpperLidWeight = trackingPos + (n * (1.0 - trackingPos))
#newLeftLowerLidWeight = (1.0 - trackingPos) + (n * trackingPos)
newLeftLowerLidWeight = trackingPos + (n * (1.0 - trackingPos))
if blinkStateRight:
n = (now - blinkStartTimeRight) / blinkDurationRight
if n > 1.0: n = 1.0
if blinkStateRight == 2: n = 1.0 - n
else:
n = 0.0
if CRAZY_EYES:
newRightUpperLidWeight = trackingPosR + (n * (1.0 - trackingPosR))
newRightLowerLidWeight = (1.0 - trackingPosR) + (n * trackingPosR)
else:
newRightUpperLidWeight = trackingPos + (n * (1.0 - trackingPos))
newRightLowerLidWeight = trackingPos + (n * (1.0 - trackingPos))
#newRightLowerLidWeight = (1.0 - trackingPos) + (n * trackingPos)
if (luRegen or (abs(newLeftUpperLidWeight - prevLeftUpperLidWeight) >=
upperLidRegenThreshold)):
newLeftUpperLidPts = points_interp(upperLidOpenPts,
upperLidClosedPts, newLeftUpperLidWeight)
if newLeftUpperLidWeight > prevLeftUpperLidWeight:
leftUpperEyelid.re_init(pts=points_mesh(
(upperLidEdgePts, prevLeftUpperLidPts,
newLeftUpperLidPts), 5, 0, False))
else:
leftUpperEyelid.re_init(pts=points_mesh(
(upperLidEdgePts, newLeftUpperLidPts,
prevLeftUpperLidPts), 5, 0, False))
prevLeftUpperLidPts = newLeftUpperLidPts
prevLeftUpperLidWeight = newLeftUpperLidWeight
luRegen = True
else:
luRegen = False
if (llRegen or (abs(newLeftLowerLidWeight - prevLeftLowerLidWeight) >=
lowerLidRegenThreshold)):
newLeftLowerLidPts = points_interp(lowerLidOpenPts,
lowerLidClosedPts, newLeftLowerLidWeight)
if newLeftLowerLidWeight > prevLeftLowerLidWeight:
leftLowerEyelid.re_init(pts=points_mesh(
(lowerLidEdgePts, prevLeftLowerLidPts,
newLeftLowerLidPts), 5, 0, False))
else:
leftLowerEyelid.re_init(pts=points_mesh(
(lowerLidEdgePts, newLeftLowerLidPts,
prevLeftLowerLidPts), 5, 0, False))
prevLeftLowerLidWeight = newLeftLowerLidWeight
prevLeftLowerLidPts = newLeftLowerLidPts
llRegen = True
else:
llRegen = False
if (ruRegen or (abs(newRightUpperLidWeight - prevRightUpperLidWeight) >=
upperLidRegenThreshold)):
newRightUpperLidPts = points_interp(upperLidOpenPts,
upperLidClosedPts, newRightUpperLidWeight)
if newRightUpperLidWeight > prevRightUpperLidWeight:
rightUpperEyelid.re_init(pts=points_mesh(
(upperLidEdgePts, prevRightUpperLidPts,
newRightUpperLidPts), 5, 0, True))
else:
rightUpperEyelid.re_init(pts=points_mesh(
(upperLidEdgePts, newRightUpperLidPts,
prevRightUpperLidPts), 5, 0, True))
prevRightUpperLidWeight = newRightUpperLidWeight
prevRightUpperLidPts = newRightUpperLidPts
ruRegen = True
else:
ruRegen = False
if (rlRegen or (abs(newRightLowerLidWeight - prevRightLowerLidWeight) >=
lowerLidRegenThreshold)):
newRightLowerLidPts = points_interp(lowerLidOpenPts,
lowerLidClosedPts, newRightLowerLidWeight)
if newRightLowerLidWeight > prevRightLowerLidWeight:
rightLowerEyelid.re_init(pts=points_mesh(
(lowerLidEdgePts, prevRightLowerLidPts,
newRightLowerLidPts), 5, 0, True))
else:
rightLowerEyelid.re_init(pts=points_mesh(
(lowerLidEdgePts, newRightLowerLidPts,
prevRightLowerLidPts), 5, 0, True))
prevRightLowerLidWeight = newRightLowerLidWeight
prevRightLowerLidPts = newRightLowerLidPts
rlRegen = True
else:
rlRegen = False
convergence = 2.0
# Right eye (on screen left)
if CRAZY_EYES:
rightIris.rotateToX(curYR)
rightIris.rotateToY(curXR - convergence)
rightIris.draw()
rightEye.rotateToX(curYR)
rightEye.rotateToY(curXR - convergence)
else:
rightIris.rotateToX(curY)
rightIris.rotateToY(curX - convergence)
rightIris.draw()
rightEye.rotateToX(curY)
rightEye.rotateToY(curX - convergence)
rightEye.draw()
# Left eye (on screen right)
leftIris.rotateToX(curY)
leftIris.rotateToY(curX + convergence)
leftIris.draw()
leftEye.rotateToX(curY)
leftEye.rotateToY(curX + convergence)
leftEye.draw()
leftUpperEyelid.draw()
leftLowerEyelid.draw()
rightUpperEyelid.draw()
rightLowerEyelid.draw()
k = mykeys.read()
if k==27:
mykeys.close()
DISPLAY.stop()
exit(0)
def split( # Recursive simulated pupil response when no analog sensor
startValue, # Pupil scale starting value (0.0 to 1.0)
endValue, # Pupil scale ending value (")
duration, # Start-to-end time, floating-point seconds
range): # +/- random pupil scale at midpoint
startTime = time.time()
if range >= 0.125: # Limit subdvision count, because recursion
duration *= 0.5 # Split time & range in half for subdivision,
range *= 0.5 # then pick random center point within range:
midValue = ((startValue + endValue - range) * 0.5 +
random.uniform(0.0, range))
split(startValue, midValue, duration, range)
split(midValue , endValue, duration, range)
else: # No more subdivisons, do iris motion...
dv = endValue - startValue
while True:
dt = time.time() - startTime
if dt >= duration: break
v = startValue + dv * dt / duration
if v < PUPIL_MIN: v = PUPIL_MIN
elif v > PUPIL_MAX: v = PUPIL_MAX
frame(v) # Draw frame w/interim pupil scale value
# MAIN LOOP -- runs continuously -------------------------------------------
# setup the light sensor
i2c = busio.I2C(board.SCL, board.SDA)
sensorLight = adafruit_tsl2591.TSL2591(i2c)
curTime = time.time()
eyeMoveFlag = True
lux = 0 # this is for the light sensor
# start serial thread
serial_input = Serial_input(9600)
T_serial = threading.Thread(target=serial_input.getSerialString)
T_serial.start()
def pupil_size(size):
global v
v = size
def change_eye_direction(x, y):
global curXSet, curYSet
curXSet = x
curYSet = y
def change_mode(mode):
global OP_MODE
global AUTOBLINK
if(mode == "general"):
OP_MODE = 0
AUTOBLINK = True
time.sleep(0.5)
elif(mode == "clinical"):
OP_MODE = 1
AUTOBLINK = False
time.sleep(0.5)
# dog control function which split the command string,
# and change features
def control_dog():
global temp_size
while(True):
time.sleep(2)
if serial_input.queue:
input1 = serial_input.queue.popleft()
inputList = input1.split("_")
try:
if inputList[1] == "mode":
if inputList[2] == "general":
change_mode("general")
change_eye_direction(0, 0)
serial_input.sendSerialString("change mode, general")
elif inputList[2] == "clinical":
change_mode("clinical")
serial_input.sendSerialString("change mode, clinical")
elif inputList[1] == "pupilsize":
temp_size = int(inputList[2])
serial_input.sendSerialString("change pupil size, " + str(temp_size))
elif inputList[1] == "pos":
if inputList[2] == "auto":
posX, posY = int(inputList[3]), int(inputList[4])
change_eye_direction(posX, posY)
else:
posX, posY = int(inputList[2]), int(inputList[3])
change_eye_direction(posX, posY)
serial_input.sendSerialString("change pos, " + str(posX) + ", " + str(posY))
else:
serial_input.sendSerialString("incorrect command")
except:
serial_input.sendSerialString("incorrect command")
# start dog control thread
T_control = threading.Thread(target = control_dog)
T_control.start()
temp_size = 20
# the main loop
while True:
if (time.time() - curTime > 3):
lux = sensorLight.infrared
curTime = time.time()
if (OP_MODE == 0):
if (lux > 500):
v = 0.2
elif (lux <= 500):
v = 0.8
elif (OP_MODE == 1):
pupil_size(temp_size / 100)
#print(v)
if PUPIL_IN >= 0: # Pupil scale from sensor
if v < PUPIL_MIN: v = PUPIL_MIN
elif v > PUPIL_MAX: v = PUPIL_MAX
# Scale to 0.0 to 1.0:
v = (v - PUPIL_MIN) / (PUPIL_MAX - PUPIL_MIN)
if PUPIL_SMOOTH > 0:
v = ((currentPupilScale * (PUPIL_SMOOTH - 1) + v) / PUPIL_SMOOTH)
frame(v)
else: # Fractal auto pupil scale
v = random.random()
split(currentPupilScale, v, 4.0, 1.0)
currentPupilScale = v
|
common_utils.py
|
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections import OrderedDict
from collections.abc import Sequence
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
import __main__ # type: ignore[import]
import errno
from typing import cast, Any, Dict, Iterable, Iterator, Optional, Union
from unittest.mock import MagicMock
import numpy as np
import expecttest
from .._core import \
(_compare_tensors_internal, _compare_scalars_internal, _compare_return_type)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
torch.backends.disable_global_flags()
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
# Environment variable `IN_CI` is set in `.jenkins/common.sh`.
IS_IN_CI = os.getenv('IN_CI') == '1'
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
slow_tests_dict: Optional[Dict[str, Any]] = None
disabled_tests_dict: Optional[Dict[str, Any]] = None
class _TestParametrizer(object):
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function that is called
during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this,
there is no need to parametrize over device type, as that is already handled separately.
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
Args:
handles_dtypes (bool): If True, indicates that it is the responsibility of the decorator to handle
dtypes internally. This allows for more flexibility when needed (e.g. for op-specific dtype handling).
Default: True
"""
def __init__(self, handles_dtypes=True):
self.handles_dtypes = handles_dtypes
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 3-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in
old_parametrize_fn(test, generic_cls, device_cls)]
for (old_test, old_test_name, old_param_kwargs) in old_tests:
for (new_test, new_test_name, new_param_kwargs) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
yield (new_test, '{}_{}'.format(new_test_name, old_test_name), full_param_kwargs)
fn.parametrize_fn = composite_fn
old_handles_dtypes = fn.handles_dtypes if hasattr(fn, 'handles_dtypes') else False
if self.handles_dtypes and old_handles_dtypes:
raise RuntimeError('Cannot compose multiple parametrization decorators that handle dtypes; '
'their dtype handling conflicts')
fn.handles_dtypes = self.handles_dtypes or old_handles_dtypes
else:
fn.parametrize_fn = self._parametrize_test
fn.handles_dtypes = self.handles_dtypes
return fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names.
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
if hasattr(class_attr, 'handles_dtypes') and class_attr.handles_dtypes:
raise RuntimeError('instantiate_parametrized_tests() should not be used with decorators '
'that handle dtypes internally (e.g. @ops, @modules, etc.). Use '
'instantiate_device_type_tests() with these instead.')
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name)
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = '{}_{}'.format(test.__name__, test_suffix)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
class subtest(object):
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
class parametrize(_TestParametrizer):
"""
Decorator for applying generic test parametrizations.
The interface for this decorator is modeled after `@pytest.mark.parametrize`.
Basic usage between this decorator and pytest's is identical. The first argument
should be a string containing comma-separated names of parameters for the test, and
the second argument should be an iterable returning values or tuples of values for
the case of multiple parameters.
Beyond this basic usage, the decorator provides some additional functionality that
pytest does not.
1. Parametrized tests end up as generated test functions on unittest test classes.
Since this differs from how pytest works, this decorator takes on the additional
responsibility of naming these test functions. The default test names consists of
the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"),
but custom names can be defined using `name_fn` or the `subtest` structure (see below).
2. The decorator specially handles parameter values of type `subtest`, which allows for
more fine-grained control over both test naming and test execution. In particular, it can
be used to tag subtests with explicit test names or apply arbitrary decorators (see examples
below).
Examples::
@parametrize("x", range(5))
def test_foo(self, x):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')])
def test_bar(self, x, y):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')],
name_fn=lambda x, y: '{}_{}'.format(x, y))
def test_bar_custom_names(self, x, y):
...
@parametrize("x, y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]),
subtest((1, 4), name='quadruple')])
def test_baz(self, x, y):
...
Args:
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
super().__init__(handles_dtypes=False)
self.arg_names = arg_str.split(',')
self.arg_values = arg_values
self.name_fn = name_fn
def _formatted_str_repr(self, name, value):
""" Returns a string representation for the given arg that is suitable for use in test function names. """
if isinstance(value, torch.dtype):
return dtype_name(value)
elif isinstance(value, torch.device):
return str(value)
# Can't use isinstance as it would cause a circular import
elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo':
return value.formatted_name
else:
# Include name and value separated by underscore.
return '{}_{}'.format(name, str(value).replace('.', '_'))
def _default_subtest_name(self, values):
return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)])
def _get_subtest_name(self, values, explicit_name=None):
if explicit_name:
subtest_name = explicit_name
elif self.name_fn:
subtest_name = self.name_fn(*values)
else:
subtest_name = self._default_subtest_name(values)
return subtest_name
def _parametrize_test(self, test, generic_cls, device_cls):
if len(self.arg_names) == 0:
# No additional parameters needed for the test.
test_name = device_cls.device_type if device_cls else ''
yield (test, test_name, {})
else:
# Each "values" item is expected to be either:
# * A tuple of values with one for each arg. For a single arg, a single item is expected.
# * A subtest instance with arg_values matching the previous.
for values in self.arg_values:
maybe_name = None
if isinstance(values, subtest):
sub = values
values = sub.arg_values
maybe_name = sub.name
# Apply decorators.
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in sub.decorators:
test_wrapper = decorator(test_wrapper)
gen_test = test_wrapper
else:
gen_test = test
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = {
name: value for name, value in zip(self.arg_names, values)
}
subtest_name = self._get_subtest_name(values, explicit_name=maybe_name)
test_name = '{}{}'.format(subtest_name, '_' + device_cls.device_type if device_cls else '')
if '.' in test_name:
raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name))
yield (gen_test, test_name, param_kwargs)
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser()
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_IN_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def _print_test_names():
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
global slow_tests_dict
with open(IMPORT_SLOW_TESTS, 'r') as fp:
slow_tests_dict = json.load(fp)
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
if os.path.exists(IMPORT_DISABLED_TESTS):
global disabled_tests_dict
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.load(fp)
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}')
# Determine the test launch mechanism
if TEST_DISCOVER:
_print_test_names()
elif TEST_IN_SUBPROCESS:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
failed_tests = []
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
exitcode = shell([sys.executable] + argv + [test_case_full_name])
if exitcode != 0:
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "avx512vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name):
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Disables noarch tests; all but one CI configuration disables these. We don't
# disable them for local runs because you still want to run them
# (unlike slow tests!)
TEST_SKIP_NOARCH = os.getenv('PYTORCH_TEST_SKIP_NOARCH', '0') == '1'
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# Disables tests for when on Github Actions
ON_GHA = os.getenv('GITHUB_ACTIONS', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic):
self.deterministic = deterministic
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
torch.use_deterministic_algorithms(self.deterministic)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(self.deterministic_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(torch.are_deterministic_algorithms_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfOnGHA(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ON_GHA:
raise unittest.SkipTest("Test disabled for GHA")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
# noarch tests are tests that should be only run on one CI configuration,
# because they don't exercise any interesting platform specific code
# and so if run once, indicate the test should pass everywhere.
# See https://github.com/pytorch/pytorch/issues/53743
def noarchTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_SKIP_NOARCH:
raise unittest.SkipTest("test is noarch: we are skipping noarch tests due to TEST_SKIP_NOARCH")
else:
fn(*args, **kwargs)
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
self.testcase.assertEqual(
before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(
self.name, after - before, i))
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_IN_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
test_name = f'{test._testMethodName} ({test_suite})'
if slow_tests_dict is not None and test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
if not IS_SANDCASTLE and disabled_tests_dict is not None:
if test_name in disabled_tests_dict:
issue_url, platforms = disabled_tests_dict[test_name]
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
raise unittest.SkipTest(
f"Test is disabled because an issue exists disabling it: {issue_url}" +
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " +
"If you're seeing this on your local machine and would like to enable this test, " +
"please make sure IN_CI is not set and you are not using the flag --import-disabled-tests.")
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
def run(self, result=None):
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
result.stop()
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype):
sparse_dim = 2
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
assert len(size) == sparse_dim
def random_sparse_csr(n_rows, n_cols, nnz):
crow_indices = self._make_crow_indices(n_rows, n_cols, nnz, device=device, dtype=index_dtype)
col_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_rows):
count = crow_indices[i + 1] - crow_indices[i]
col_indices[crow_indices[i]:crow_indices[i + 1]], _ = torch.sort(
torch.randperm(n_cols, dtype=index_dtype, device=device)[:count])
values = make_tensor([nnz], device=device, dtype=dtype, low=-1, high=1)
return values, crow_indices, col_indices
values, crow_indices, col_indices = random_sparse_csr(size[0], size[1], nnz)
return torch.sparse_csr_tensor(crow_indices,
col_indices,
values, size=size, dtype=dtype, device=device)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
return t.coalesce().to_dense()
# Compares torch function with reference function for given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
n_inp, n_args, n_kwargs = sample_input.numpy()
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
rtol = cast(float, rtol)
atol = cast(float, atol)
assert atol is not None
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
return _compare_scalars_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Construct assert messages basd on internal debug message and user provided message.
def _get_assert_msg(self, msg, debug_msg=None):
if msg is None:
return debug_msg
else:
return f"\n{msg}" if debug_msg is None else f"{debug_msg}\n{msg}"
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def _is_dict(self, obj):
return isinstance(obj, (dict, torch._C.ScriptDict)) # type: ignore[attr-defined]
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified, then the other must be too"
debug_msg: Optional[str] = None
# Tensor x Number and Number x Tensor comparisons
if isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
debug_msg = ("Attempted to compare with different is_sparse settings: "
f"Expected: {x.is_sparse}; Actual: {y.is_sparse}.")
super().assertEqual(x.is_sparse, y.is_sparse, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
debug_msg = ("Attempted to compare with different is_quantized settings: "
f"Expected: {x.is_quantized}; Actual: {y.is_quantized}.")
super().assertEqual(x.is_quantized, y.is_quantized, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
if x.is_sparse:
if x.size() != y.size():
debug_msg_sparse = ("Attempted to compare equality of tensors with different sizes: "
f"Expected: {x.size()}; Actual: {y.size()}.")
super().assertTrue(False, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg_sparse))
x = x.coalesce()
y = y.coalesce()
indices_result, debug_msg_indices = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result:
assert debug_msg_indices is not None
debug_msg = "Sparse tensor indices failed to compare as equal! " + debug_msg_indices
super().assertTrue(indices_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
values_result, debug_msg_values = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result:
assert debug_msg_values is not None
debug_msg = "Sparse tensor values failed to compare as equal! " + debug_msg_values
super().assertTrue(values_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg_compare = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_compare is not None
debug_msg = "Quantized representations failed to compare as equal! " + debug_msg_compare
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
result, debug_msg_generic = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_generic is not None
debug_msg = "Tensors failed to compare as equal!" + debug_msg_generic
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif isinstance(x, (np.ndarray, torch.Tensor)) or isinstance(y, (np.ndarray, torch.Tensor)):
def maybe_to_tensor(a: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
if not isinstance(a, np.ndarray):
return a
try:
return torch.from_numpy(a)
except TypeError:
# This happens if the dtype is non-numeric or not supported by torch
return a
def maybe_to_list(a: Any) -> Any:
if not isinstance(a, (np.ndarray, torch.Tensor)):
return a
return a.tolist()
x = maybe_to_tensor(x)
y = maybe_to_tensor(y)
if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
self.assertEqual(
x, y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device
)
else:
# In case we can't convert the array to a tensor, we fall back to comparing x and y as iterables
self.assertEqual(
maybe_to_list(x),
maybe_to_list(y),
atol=atol,
rtol=rtol,
msg=msg,
exact_dtype=exact_dtype,
exact_device=exact_device
)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
debug_msg = ("Attempted to compare [string] types: "
f"Expected: {repr(x)}; Actual: {repr(y)}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif type(x) == set and type(y) == set:
debug_msg = ("Attempted to compare [set] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif self._is_dict(x) and self._is_dict(y):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
debug_msg = ("Attempted to compare [type] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif is_iterable(x) and is_iterable(y):
debug_msg = ("Attempted to compare the lengths of [iterable] types: "
f"Expected: {len(x)}; Actual: {len(y)}.")
super().assertEqual(len(x), len(y), msg=self._get_assert_msg(msg, debug_msg=debug_msg))
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
super().assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg_scalars = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result:
assert debug_msg_scalars is not None
debug_msg = "Scalars failed to compare as equal! " + debug_msg_scalars
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
super().assertEqual(x, y, msg=msg)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1producer_version: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove IN_CI flag since this is a wrapped test process.
# IN_CI flag should be set in the parent process only.
if "IN_CI" in env.keys():
del env["IN_CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
matches exactly with one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if str(error) in connect_errors:
tries_remaining -= 1
if tries_remaining == 0:
raise
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1)).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = t + t.transpose(-2, -1).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.transpose(-2, -1).conj()).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1))
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian semi-positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return torch.matmul(A, A.conj().transpose(-2, -1))
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1)) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = torch.matmul(t, t.transpose(-2, -1))
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.transpose(-2, -1).conj()) \
+ torch.eye(matrix_size, dtype=dtype, device=device)
# TODO: remove this (prefer make_fullrank_matrices_with_distinct_singular_values below)
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,
**kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, vh = torch.linalg.svd(A, full_matrices=False)
real_dtype = A.real.dtype if A.dtype.is_complex else A.dtype
s = torch.arange(1., matrix_size + 1, dtype=real_dtype, device=device).mul_(1.0 / (matrix_size + 1))
return (u * s.to(A.dtype)) @ vh
# Creates a full rank matrix with distinct signular values or
# a batch of such matrices
# Shape must be a square matrix or batch of square matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
# TODO: improve the handling of complex tensors here
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
s = torch.arange(1., shape[-1] + 1, dtype=real_dtype, device=device).mul_(1.0 / (shape[-1] + 1))
return (u * s.to(dtype)) @ vh
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
def _wrap_warn_once(regex):
def decorator(fn):
def inner(self, *args, **kwargs):
with self.assertWarnsOnceRegex(UserWarning, regex):
fn(self, *args, **kwargs)
return inner
return decorator
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
def has_breakpad():
# We always build with breakpad in CI
if IS_IN_CI:
return True
# If not on a special build, check that the library was actually linked in
try:
torch._C._get_minidump_directory() # type: ignore[attr-defined]
return True
except RuntimeError as e:
if "Minidump handler is uninintialized" in str(e):
return True
return False
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE and condition:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
if condition and IS_SANDCASTLE:
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
else:
return func(*args, **kwargs)
return wrapper
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
|
util.py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Various low-level utilities.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import datetime
import json
import math
import os
import re
import select
import signal
import subprocess
import struct
import sys
import time
import errno
import threading
import shutil
import stat
import shlex
import operator
import collections
import six
from six.moves import xrange
from .extern import minify_json
nan = float('nan')
inf = float('inf')
WIN = (os.name == 'nt')
if not WIN:
try:
from select import PIPE_BUF
except ImportError:
# PIPE_BUF is not available on Python 2.6
PIPE_BUF = os.pathconf('.', os.pathconf_names['PC_PIPE_BUF'])
TIMEOUT_RETCODE = -256
class UserError(Exception):
pass
class ParallelFailure(Exception):
"""
Custom exception to work around a multiprocessing bug
https://bugs.python.org/issue9400
"""
def __new__(cls, message, exc_cls, traceback_str):
self = Exception.__new__(cls)
self.message = message
self.exc_cls = exc_cls
self.traceback_str = traceback_str
return self
def __reduce__(self):
return (ParallelFailure, (self.message, self.exc_cls, self.traceback_str))
def __str__(self):
return "{0}: {1}\n {2}".format(self.exc_cls.__name__,
self.message,
self.traceback_str.replace("\n", "\n "))
def reraise(self):
if self.exc_cls is UserError:
raise UserError(self.message)
else:
raise self
def human_list(l):
"""
Formats a list of strings in a human-friendly way.
"""
l = ["'{0}'".format(x) for x in l]
if len(l) == 0:
return 'nothing'
elif len(l) == 1:
return l[0]
elif len(l) == 2:
return ' and '.join(l)
else:
return ', '.join(l[:-1]) + ' and ' + l[-1]
def human_float(value, significant=3, truncate_small=None, significant_zeros=False):
"""
Return a string representing a float with human friendly significant digits.
Switches to scientific notation for too large/small numbers.
If `truncate_small`, then leading zeros of numbers < 1 are counted as
significant. If not `significant_zeros`, trailing unnecessary zeros are
stripped.
"""
if value == 0:
return "0"
elif math.isinf(value) or math.isnan(value):
return "{}".format(value)
elif value < 0:
sign = "-"
value = -value
else:
sign = ""
logv = math.log10(value)
magnitude = int(math.floor(logv)) + 1
if truncate_small is not None:
magnitude = max(magnitude, -truncate_small + 1)
num_digits = significant - magnitude
if magnitude <= -5 or magnitude >= 9:
# Too many digits, use scientific notation
fmt = "{{0:.{0}e}}".format(significant)
elif value == int(value):
value = int(round(value, num_digits))
fmt = "{0:d}"
elif num_digits <= 0:
value = int(round(value, num_digits))
fmt = "{0:d}"
else:
fmt = "{{0:.{0}f}}".format(num_digits)
formatted = sign + fmt.format(value)
if not significant_zeros and '.' in formatted and 'e' not in fmt:
formatted = formatted.rstrip('0')
if formatted[-1] == '.':
formatted = formatted[:-1]
if significant_zeros and '.' not in formatted:
if len(formatted) < significant:
formatted += "." + "0"*(significant - len(formatted))
return formatted
def human_file_size(size, err=None):
"""
Returns a human-friendly string representing a file size
that is 2-4 characters long.
For example, depending on the number of bytes given, can be one
of::
256b
64k
1.1G
Parameters
----------
size : int
The size of the file (in bytes)
Returns
-------
size : str
A human-friendly representation of the size of the file
"""
size = float(size)
if size < 1:
size = 0.0
suffixes = ' kMGTPEH'
if size == 0:
num_scale = 0
else:
num_scale = int(math.floor(math.log(size) / math.log(1000)))
if num_scale > 7:
suffix = '?'
else:
suffix = suffixes[num_scale].strip()
scale = int(math.pow(1000, num_scale))
value = size / scale
str_value = human_float(value, 3)
if err is None:
return "{0:s}{1}".format(str_value, suffix)
else:
str_err = human_float(err / scale, 1, truncate_small=2)
return "{0:s}±{1:s}{2}".format(str_value, str_err, suffix)
def human_time(seconds, err=None):
"""
Returns a human-friendly time string that is always exactly 6
characters long.
Depending on the number of seconds given, can be one of::
1w 3d
2d 4h
1h 5m
1m 4s
15s
Will be in color if console coloring is turned on.
Parameters
----------
seconds : int
The number of seconds to represent
Returns
-------
time : str
A human-friendly representation of the given number of seconds
that is always exactly 6 characters.
"""
units = [
('ns', 0.000000001),
('μs', 0.000001),
('ms', 0.001),
('s', 1),
('m', 60),
('h', 60 * 60),
('d', 60 * 60 * 24),
('w', 60 * 60 * 24 * 7),
('y', 60 * 60 * 24 * 7 * 52),
('C', 60 * 60 * 24 * 7 * 52 * 100)
]
seconds = float(seconds)
for i in xrange(len(units) - 1):
if seconds < units[i+1][1]:
str_time = human_float(seconds / units[i][1], 3, significant_zeros=True)
if err is None:
return "{0:s}{1}".format(str_time, units[i][0])
else:
str_err = human_float(err / units[i][1], 1, truncate_small=2)
return "{0:s}±{1:s}{2}".format(str_time, str_err, units[i][0])
return '~0'
def human_value(value, unit, err=None):
"""
Formats a value in a given unit in a human friendly way.
Parameters
----------
value : anything
The value to format
unit : str
The unit the value is in. Currently understands `seconds` and `bytes`.
err : float, optional
Std. error in the value
"""
if isinstance(value, (int, float)):
if value != value:
# nan
display = "n/a"
elif unit == 'seconds':
display = human_time(value, err=err)
elif unit == 'bytes':
display = human_file_size(value, err=err)
else:
display = json.dumps(value)
if err is not None:
display += "±{:.2g}".format(err)
elif value is None:
display = "failed"
else:
display = json.dumps(value)
return display
def which(filename, paths=None):
"""
Emulates the UNIX `which` command in Python.
Raises an IOError if no result is found.
"""
# Hide traceback from expected exceptions in pytest reports
__tracebackhide__ = operator.methodcaller('errisinstance', IOError)
if os.path.sep in filename:
locations = ['']
elif paths is not None:
locations = paths
else:
locations = os.environ.get("PATH", "").split(os.pathsep)
if WIN:
# On windows, an entry in %PATH% may be quoted
locations = [path[1:-1] if len(path) > 2 and path[0] == path[-1] == '"' else path
for path in locations]
if WIN:
filenames = [filename + ext for ext in ('.exe', '.bat', '.com', '')]
else:
filenames = [filename]
candidates = []
for location in locations:
for filename in filenames:
candidate = os.path.join(location, filename)
if os.path.isfile(candidate) or os.path.islink(candidate):
candidates.append(candidate)
if len(candidates) == 0:
if paths is None:
loc_info = 'PATH'
else:
loc_info = os.pathsep.join(locations)
raise IOError("Could not find '{0}' in {1}".format(filename, loc_info))
return candidates[0]
def has_command(filename):
"""
Returns `True` if the commandline utility exists.
"""
try:
which(filename)
except IOError:
return False
else:
return True
class ProcessError(subprocess.CalledProcessError):
def __init__(self, args, retcode, stdout, stderr):
self.args = args
self.retcode = retcode
self.stdout = stdout
self.stderr = stderr
def __str__(self):
if self.retcode == TIMEOUT_RETCODE:
return "Command '{0}' timed out".format(
' '.join(self.args))
else:
return "Command '{0}' returned non-zero exit status {1}".format(
' '.join(self.args), self.retcode)
def check_call(args, valid_return_codes=(0,), timeout=600, dots=True,
display_error=True, shell=False, env=None, cwd=None):
"""
Runs the given command in a subprocess, raising ProcessError if it
fails.
See `check_output` for parameters.
"""
# Hide traceback from expected exceptions in pytest reports
__tracebackhide__ = operator.methodcaller('errisinstance', ProcessError)
check_output(
args, valid_return_codes=valid_return_codes, timeout=timeout,
dots=dots, display_error=display_error, shell=shell, env=env,
cwd=cwd)
class DebugLogBuffer(object):
def __init__(self, log):
self.buf = []
self.first = True
self.linebreak_re = re.compile(b'.*\n')
self.log = log
def __call__(self, c):
if c is None:
text = b"".join(self.buf)
del self.buf[:]
elif b'\n' in c:
m = self.linebreak_re.match(c)
j = m.end()
self.buf.append(c[:j])
text = b"".join(self.buf)
self.buf[:] = [c[j:]]
else:
self.buf.append(c)
return
text = text.decode('utf-8', 'replace')
if text.endswith('\n'):
text = text[:-1]
if text:
if self.first:
self.log.debug('OUTPUT -------->', continued=True)
self.first = False
self.log.debug(text, continued=True)
def check_output(args, valid_return_codes=(0,), timeout=600, dots=True,
display_error=True, shell=False, return_stderr=False,
env=None, cwd=None, redirect_stderr=False, return_popen=False):
"""
Runs the given command in a subprocess, raising ProcessError if it
fails. Returns stdout as a string on success.
Parameters
----------
valid_return_codes : list, optional
A list of return codes to ignore. Defaults to only ignoring zero.
Setting to None ignores all return codes.
timeout : number, optional
Kill the process if it does not produce any output in `timeout`
seconds. If `None`, there is no timeout.
Default: 10 min
dots : bool, optional
If `True` (default) write a dot to the console to show
progress as the subprocess outputs content. May also be
a callback function to call (with no arguments) to indicate
progress.
display_error : bool, optional
If `True` (default) display the stdout and stderr of the
subprocess when the subprocess returns an error code.
shell : bool, optional
If `True`, run the command through the shell. Default is
`False`.
return_stderr : bool, optional
If `True`, return both the (stdout, stderr, errcode) as a
tuple.
env : dict, optional
Specify environment variables for the subprocess.
cwd : str, optional
Specify the current working directory to use when running the
process.
redirect_stderr : bool, optional
Whether to redirect stderr to stdout. In this case the returned
``stderr`` (when return_stderr == True) is an empty string.
return_popen : bool, optional
Whether to return immediately after subprocess.Popen.
Returns
-------
stdout, stderr, retcode : when return_stderr == True
stdout : otherwise
"""
from .console import log
# Hide traceback from expected exceptions in pytest reports
__tracebackhide__ = operator.methodcaller('errisinstance', ProcessError)
def get_content(header=None):
content = []
if header is not None:
content.append(header)
if redirect_stderr:
content.extend([
'OUTPUT -------->',
stdout[:-1]
])
else:
content.extend([
'STDOUT -------->',
stdout[:-1],
'STDERR -------->',
stderr[:-1]
])
return '\n'.join(content)
if isinstance(args, six.string_types):
args = [args]
log.debug("Running '{0}'".format(' '.join(args)))
if env and WIN and sys.version_info < (3,):
# Environment keys and values cannot be unicode
def _fix_env(s):
return s.encode('mbcs') if isinstance(s, unicode) else s
env = {_fix_env(k): _fix_env(v) for k, v in env.items()}
kwargs = dict(shell=shell, env=env, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if redirect_stderr:
kwargs['stderr'] = subprocess.STDOUT
if WIN:
kwargs['close_fds'] = False
kwargs['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
else:
kwargs['close_fds'] = True
posix = getattr(os, 'setpgid', None)
if posix:
# Run the subprocess in a separate process group, so that we
# can kill it and all child processes it spawns e.g. on
# timeouts. Note that subprocess.Popen will wait until exec()
# before returning in parent process, so there is no race
# condition in setting the process group vs. calls to os.killpg
kwargs['preexec_fn'] = lambda: os.setpgid(0, 0)
proc = subprocess.Popen(args, **kwargs)
if return_popen:
return proc
last_dot_time = time.time()
stdout_chunks = []
stderr_chunks = []
is_timeout = False
if log.is_debug_enabled():
debug_log = DebugLogBuffer(log)
else:
debug_log = lambda c: None
if WIN:
start_time = [time.time()]
was_timeout = [False]
def stdout_reader_run():
while True:
c = proc.stdout.read(1)
if not c:
break
start_time[0] = time.time()
stdout_chunks.append(c)
debug_log(c)
def stderr_reader_run():
while True:
c = proc.stderr.read(1)
if not c:
break
start_time[0] = time.time()
stderr_chunks.append(c)
debug_log(c)
def watcher_run():
while proc.returncode is None:
time.sleep(0.1)
if timeout is not None and time.time() - start_time[0] > timeout:
was_timeout[0] = True
proc.send_signal(signal.CTRL_BREAK_EVENT)
watcher = threading.Thread(target=watcher_run)
watcher.start()
stdout_reader = threading.Thread(target=stdout_reader_run)
stdout_reader.start()
if not redirect_stderr:
stderr_reader = threading.Thread(target=stderr_reader_run)
stderr_reader.start()
try:
proc.wait()
finally:
if proc.returncode is None:
proc.terminate()
proc.wait()
watcher.join()
if not redirect_stderr:
stderr_reader.join()
stdout_reader.join()
proc.stdout.close()
if not redirect_stderr:
proc.stderr.close()
is_timeout = was_timeout[0]
else:
try:
if posix and is_main_thread():
# Forward signals related to Ctrl-Z handling; the child
# process is in a separate process group so it won't receive
# these automatically from the terminal
def sig_forward(signum, frame):
_killpg_safe(proc.pid, signum)
if signum == signal.SIGTSTP:
os.kill(os.getpid(), signal.SIGSTOP)
signal.signal(signal.SIGTSTP, sig_forward)
signal.signal(signal.SIGCONT, sig_forward)
fds = {
proc.stdout.fileno(): stdout_chunks
}
if not redirect_stderr:
fds[proc.stderr.fileno()] = stderr_chunks
while proc.poll() is None:
try:
if timeout is None:
rlist, wlist, xlist = select.select(
list(fds.keys()), [], [])
else:
rlist, wlist, xlist = select.select(
list(fds.keys()), [], [], timeout)
except select.error as err:
if err.args[0] == errno.EINTR:
# interrupted by signal handler; try again
continue
raise
if len(rlist) == 0:
# We got a timeout
is_timeout = True
break
for f in rlist:
output = os.read(f, PIPE_BUF)
fds[f].append(output)
debug_log(output)
if dots and time.time() - last_dot_time > 0.5:
if dots is True:
log.dot()
elif dots:
dots()
last_dot_time = time.time()
finally:
if posix and is_main_thread():
# Restore signal handlers
signal.signal(signal.SIGTSTP, signal.SIG_DFL)
signal.signal(signal.SIGCONT, signal.SIG_DFL)
if proc.returncode is None:
# Timeout or another exceptional condition occurred, and
# the program is still running.
if posix:
# Terminate the whole process group
_killpg_safe(proc.pid, signal.SIGTERM)
for j in range(10):
time.sleep(0.1)
if proc.poll() is not None:
break
else:
# Didn't terminate within 1 sec, so kill it
_killpg_safe(proc.pid, signal.SIGKILL)
else:
proc.terminate()
proc.wait()
proc.stdout.flush()
if not redirect_stderr:
proc.stderr.flush()
stdout_chunks.append(proc.stdout.read())
if not redirect_stderr:
stderr_chunks.append(proc.stderr.read())
proc.stdout.close()
if not redirect_stderr:
proc.stderr.close()
debug_log(None)
stdout = b''.join(stdout_chunks)
stderr = b''.join(stderr_chunks)
stdout = stdout.decode('utf-8', 'replace')
stderr = stderr.decode('utf-8', 'replace')
if is_timeout:
retcode = TIMEOUT_RETCODE
else:
retcode = proc.returncode
if valid_return_codes is not None and retcode not in valid_return_codes:
header = 'Error running {0}'.format(' '.join(args))
if display_error:
if log.is_debug_enabled():
# Output was already printed
log.error(header)
else:
log.error(get_content(header))
raise ProcessError(args, retcode, stdout, stderr)
if return_stderr:
return (stdout, stderr, retcode)
else:
return stdout
def _killpg_safe(pgid, signo):
"""
Same as os.killpg, but deal with OSX/BSD
"""
try:
os.killpg(pgid, signo)
except OSError as exc:
if exc.errno == errno.EPERM:
# OSX/BSD may raise EPERM on killpg if the process group
# already terminated
pass
else:
raise
def is_main_thread():
"""
Return True if the current thread is the main thread.
"""
if sys.version_info[0] >= 3:
return threading.current_thread() == threading.main_thread()
else:
return isinstance(threading.current_thread(), threading._MainThread)
def write_json(path, data, api_version=None, compact=False):
"""
Writes JSON to the given path, including indentation and sorting.
Parameters
----------
path : str
File name to write
data : object
Data to serialize as JSON
api_version : int, optional
API version number
compact : bool, optional
Whether to produce compact, non-human readable JSON.
Disables sorting and indentation.
"""
path = os.path.abspath(path)
dirname = long_path(os.path.dirname(path))
if not os.path.exists(dirname):
os.makedirs(dirname)
if api_version is not None:
data = dict(data)
data['version'] = api_version
open_kwargs = {}
if sys.version_info[0] >= 3:
open_kwargs['encoding'] = 'utf-8'
with long_path_open(path, 'w', **open_kwargs) as fd:
if not compact:
json.dump(data, fd, indent=4, sort_keys=True)
else:
json.dump(data, fd)
def load_json(path, api_version=None, cleanup=True):
"""
Loads JSON to the given path, ignoring any C-style comments.
"""
# Hide traceback from expected exceptions in pytest reports
__tracebackhide__ = operator.methodcaller('errisinstance', UserError)
path = os.path.abspath(path)
open_kwargs = {}
if sys.version_info[0] >= 3:
open_kwargs['encoding'] = 'utf-8'
with long_path_open(path, 'r', **open_kwargs) as fd:
content = fd.read()
if cleanup:
content = minify_json.json_minify(content)
content = content.replace(",]", "]")
content = content.replace(",}", "}")
try:
d = json.loads(content)
except ValueError as e:
raise UserError(
"Error parsing JSON in file '{0}': {1}".format(
path, six.text_type(e)))
if api_version is not None:
if 'version' in d:
if d['version'] < api_version:
raise UserError(
"{0} is stored in an old file format. Run "
"`asv update` to update it.".format(path))
elif d['version'] > api_version:
raise UserError(
"{0} is stored in a format that is newer than "
"what this version of asv understands. Update "
"asv to use this file.".format(path))
del d['version']
else:
raise UserError(
"No version specified in {0}.".format(path))
return d
def update_json(cls, path, api_version, cleanup=True):
"""
Perform JSON file format updates.
Parameters
----------
cls : object
Object containing methods update_to_X which updates
the given JSON tree from version X-1 to X.
path : str
Path to JSON file
api_version : int
The current API version
"""
# Hide traceback from expected exceptions in pytest reports
__tracebackhide__ = operator.methodcaller('errisinstance', UserError)
d = load_json(path, cleanup=cleanup)
if 'version' not in d:
raise UserError(
"No version specified in {0}.".format(path))
if d['version'] < api_version:
for x in six.moves.xrange(d['version'] + 1, api_version):
d = getattr(cls, 'update_to_{0}'.format(x), lambda x: x)(d)
write_json(path, d, api_version)
elif d['version'] > api_version:
raise UserError(
"{0} is stored in a format that is newer than "
"what this version of asv understands. "
"Upgrade asv in order to use or add to "
"these results.".format(path))
def iter_chunks(s, n):
"""
Iterator that returns elements from s in chunks of size n.
"""
chunk = []
for x in s:
chunk.append(x)
if len(chunk) == n:
yield chunk
chunk = []
if len(chunk):
yield chunk
def pick_n(items, n):
"""Pick n items, attempting to get equal index spacing.
"""
if not (n > 0):
raise ValueError("Invalid number of items to pick")
spacing = max(float(len(items)) / n, 1)
spaced = []
i = 0
while int(i) < len(items) and len(spaced) < n:
spaced.append(items[int(i)])
i += spacing
return spaced
def get_multiprocessing(parallel):
"""
If parallel indicates that we want to do multiprocessing,
imports the multiprocessing module and sets the parallel
value accordingly.
"""
if parallel != 1:
import multiprocessing
if parallel <= 0:
parallel = multiprocessing.cpu_count()
return parallel, multiprocessing
return parallel, None
def iter_subclasses(cls):
"""
Returns all subclasses of a class.
"""
for x in cls.__subclasses__():
yield x
for y in iter_subclasses(x):
yield y
def hash_equal(a, b):
"""
Returns `True` if a and b represent the same commit hash.
"""
min_len = min(len(a), len(b))
return a.lower()[:min_len] == b.lower()[:min_len]
def get_cpu_info():
"""
Gets a human-friendly description of this machine's CPU.
Returns '' if it can't be obtained.
"""
if sys.platform.startswith('linux'):
with open("/proc/cpuinfo", "rb") as fd:
lines = fd.readlines()
for line in lines:
if b':' in line:
key, val = line.split(b':', 1)
key = key.strip()
val = val.strip()
if key == b'model name':
return val.decode('ascii')
elif sys.platform.startswith('darwin'):
sysctl = which('sysctl')
return check_output([sysctl, '-n', 'machdep.cpu.brand_string']).strip()
return ''
def get_memsize():
"""
Returns the amount of physical memory in this machine.
Returns '' if it can't be obtained.
"""
if sys.platform.startswith('linux'):
with open("/proc/meminfo", "rb") as fd:
lines = fd.readlines()
for line in lines:
if b':' in line:
key, val = line.split(b':', 1)
key = key.strip()
val = val.strip()
if key == b'MemTotal':
return int(val.split()[0])
elif sys.platform.startswith('darwin'):
sysctl = which('sysctl')
return int(check_output([sysctl, '-n', 'hw.memsize']).strip())
return ''
def _get_terminal_size_fallback():
"""
Returns a tuple (height, width) containing the height and width of
the terminal. Fallback for when sys.get_terminal_size() doesn't
exist or fails.
"""
try:
# Unix-specific code
import fcntl
import termios
s = struct.pack(str("HHHH"), 0, 0, 0, 0)
x = fcntl.ioctl(sys.stdout, termios.TIOCGWINSZ, s)
(lines, width, xpixels, ypixels) = struct.unpack(str("HHHH"), x)
if lines > 12:
lines -= 6
if width > 10:
width -= 1
return (lines, width)
except:
# Fall back on environment variables, or if not set, (25, 80)
try:
return (int(os.environ.get('LINES')),
int(os.environ.get('COLUMNS')))
except TypeError:
return 25, 80
def get_terminal_width():
"""
Return the terminal width, or an estimate thereof.
"""
try:
# Python 3.3 and higher: this works under Windows and Unix
return os.get_terminal_size().columns
except (AttributeError, OSError):
return _get_terminal_size_fallback()[1]
def format_text_table(rows, num_headers=0,
top_header_span_start=0,
top_header_text=None):
"""
Format rows in as a reStructuredText table, in the vein of::
========== ========== ==========
-- top header text, span start 1
---------- ---------------------
row0col0 r0c1 r0c2
========== ========== ==========
row1col0 r1c1 r1c2
row2col0 r2c1 r2c2
========== ========== ==========
"""
# Format content
text_rows = [["{0}".format(item).replace("\n", " ") for item in row]
for row in rows]
# Ensure same number of items on all rows
num_items = max(len(row) for row in text_rows)
for row in text_rows:
row.extend(['']*(num_items - len(row)))
# Determine widths
col_widths = [max(len(row[j]) for row in text_rows) + 2
for j in range(num_items)]
# Pad content
text_rows = [[item.center(w) for w, item in zip(col_widths, row)]
for row in text_rows]
# Generate result
headers = [" ".join(row) for row in text_rows[:num_headers]]
content = [" ".join(row) for row in text_rows[num_headers:]]
separator = " ".join("-"*w for w in col_widths)
result = []
if top_header_text is not None:
left_span = "-".join("-"*w for w in col_widths[:top_header_span_start])
right_span = "-".join("-"*w for w in col_widths[top_header_span_start:])
if left_span and right_span:
result += ["--" + " " * (len(left_span)-1) + top_header_text.center(len(right_span))]
result += [" ".join([left_span, right_span])]
else:
result += [top_header_text.center(len(separator))]
result += ["-".join([left_span, right_span])]
result += headers
result += [separator.replace("-", "=")]
elif headers:
result += headers
result += [separator]
result += content
result = [separator.replace("-", "=")] + result
result += [separator.replace("-", "=")]
return "\n".join(result)
def _datetime_to_timestamp(dt, divisor):
delta = dt - datetime.datetime(1970, 1, 1)
microseconds = (delta.days * 86400 + delta.seconds) * 10**6 + delta.microseconds
value, remainder = divmod(microseconds, divisor)
if remainder >= divisor//2:
value += 1
return value
def datetime_to_timestamp(dt):
"""
Convert a Python datetime object to a UNIX timestamp.
"""
return _datetime_to_timestamp(dt, 10**6)
def datetime_to_js_timestamp(dt):
"""
Convert a Python datetime object to a JavaScript timestamp.
"""
return _datetime_to_timestamp(dt, 10**3)
def js_timestamp_to_datetime(ts):
"""
Convert a JavaScript timestamp to a Python datetime object.
"""
return datetime.datetime.fromtimestamp(ts / 1000)
def is_nan(x):
"""
Returns `True` if x is a NaN value.
"""
if isinstance(x, float):
return x != x
return False
def is_na(value):
"""
Return True if value is None or NaN
"""
return value is None or is_nan(value)
def mean_na(values):
"""
Take a mean, with the understanding that None and NaN stand for
missing data.
"""
values = [x for x in values if not is_na(x)]
if values:
return sum(values) / len(values)
else:
return None
def geom_mean_na(values):
"""
Compute geometric mean, with the understanding that None and NaN
stand for missing data.
"""
values = [x for x in values if not is_na(x)]
if values:
exponent = 1/len(values)
prod = 1.0
acc = 0
for x in values:
prod *= abs(x)**exponent
acc += x
return prod if acc >= 0 else -prod
else:
return None
def ceildiv(numerator, denominator):
"""Ceiling division"""
return -((-numerator)//denominator)
if not WIN:
long_path_open = open
long_path_rmtree = shutil.rmtree
def long_path(path):
return path
else:
def long_path(path):
if path.startswith("\\\\"):
return path
return "\\\\?\\" + os.path.abspath(path)
def _remove_readonly(func, path, exc_info):
"""Try harder to remove files on Windows"""
if isinstance(exc_info[1], OSError) and exc_info[1].errno == errno.EACCES:
# Clear read-only flag and try again
try:
os.chmod(path, stat.S_IWRITE | stat.S_IREAD)
func(path)
return
except OSError:
pass
# Reraise original error
six.reraise(*exc_info)
def long_path_open(filename, *a, **kw):
return open(long_path(filename), *a, **kw)
def long_path_rmtree(path, ignore_errors=False):
if ignore_errors:
onerror = None
else:
onerror = _remove_readonly
shutil.rmtree(long_path(path),
ignore_errors=ignore_errors,
onerror=onerror)
def sanitize_filename(filename):
"""
Replace characters to make a string safe to use in file names.
This is not a 1-to-1 mapping.
The implementation needs to match www/asv.js:escape_graph_parameter
"""
if not isinstance(filename, six.text_type):
filename = filename.decode(sys.getfilesystemencoding())
# ntfs & ext3
filename = re.sub('[<>:"/\\^|?*\x00-\x1f]', '_', filename)
# ntfs
forbidden = ["CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3",
"COM4", "COM5", "COM6", "COM7", "COM8", "COM9", "LPT1",
"LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8",
"LPT9"]
if filename.upper() in forbidden:
filename = filename + "_"
return filename
def namedtuple_with_doc(name, slots, doc):
cls = collections.namedtuple(name, slots)
if sys.version_info[0] >= 3:
cls.__doc__ = doc
return cls
else:
return type(str(name), (cls,), {'__doc__': doc})
def recvall(sock, size):
"""
Receive data of given size from a socket connection
"""
data = b""
while len(data) < size:
s = sock.recv(size - len(data))
data += s
if not s:
raise RuntimeError("did not receive data from socket "
"(size {}, got only {!r})".format(size, data))
return data
def interpolate_command(command, variables):
"""
Parse a command with interpolated variables to a sequence of commands.
The command is parsed as in posix-style shell (by shlex) and split to
parts. Additional constructs recognized:
- ``ENVVAR=value <command>``: parsed as declaring an environment variable
named 'ENVVAR'.
- ``return-code=value <command>``: parsed as declaring valid return codes.
Parameters
----------
command : str
Command to execute, posix shell style.
variables : dict
Interpolation variables.
Returns
-------
command : list of str
Command arguments.
env : dict
Environment variables declared in the command.
return_codes : {set, int, None}
Valid return codes.
"""
parts = shlex.split(command)
try:
result = [c.format(**variables) for c in parts]
except KeyError as exc:
raise UserError("Configuration error: {{{0}}} not available "
"when substituting into command {1!r} "
"Available: {2!r}"
"".format(exc.args[0], command, variables))
env = {}
return_codes_set = False
return_codes = {0}
while result:
m = re.match('^([A-Za-z_][A-Za-z0-9_]*)=(.*)$', result[0])
if m:
env[m.group(1)] = m.group(2)
del result[0]
continue
if result[0].startswith('return-code='):
if return_codes_set:
raise UserError("Configuration error: multiple return-code specifications "
"in command {0!r} "
"".format(command))
break
if result[0] == 'return-code=any':
return_codes = None
return_codes_set = True
del result[0]
continue
m = re.match('^return-code=([0-9,]+)$', result[0])
if m:
try:
return_codes = set(int(x) for x in m.group(1).split(","))
return_codes_set = True
del result[0]
continue
except ValueError as exc:
pass
raise UserError("Configuration error: invalid return-code specification "
"{0!r} when substituting into command {1!r} "
"".format(result[0], command))
break
return result, env, return_codes
try:
from shlex import quote as shlex_quote
except ImportError:
_find_unsafe = re.compile(r'[^\w@%+=:,./-]').search
def shlex_quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
|
index.py
|
import os
import cv2
import time
import imutils
import threading
import numpy as np
from playsound import playsound
from imutils.video import VideoStream
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
def play_alert():
playsound('alert.mp3')
def detect_and_predict_mask(frame, faceNet, maskNet):
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300), (104.0, 177.0, 123.0))
faceNet.setInput(blob)
detections = faceNet.forward()
faces = []
locs = []
preds = []
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.5:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = frame[startY:endY, startX:endX]
if face.any():
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
faces.append(face)
locs.append((startX, startY, endX, endY))
if len(faces) > 0:
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
return (locs, preds)
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"])
weightsPath = os.path.sep.join(
["face_detector", "res10_300x300_ssd_iter_140000.caffemodel"]
)
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
print("[INFO] loading face mask detector model...")
maskNet = load_model("mask_detector.model")
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
while True:
frame = vs.read()
frame = imutils.resize(frame, width=700)
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
for (box, pred) in zip(locs, preds):
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
label = "Mask" if mask > withoutMask else "No Mask"
value = max(mask, withoutMask) * 100
print(f"{round(value,2)}% chances of '{label}'")
if label == "No Mask" and value > 80:
threading.Thread(target=play_alert, daemon=True).start()
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
label = "{}: {:.2f}%".format(label, value)
cv2.putText(
frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2
)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
|
manager.py
|
import argparse # noqa
import atexit # noqa
import codecs # noqa
import copy # noqa
import errno # noqa
import fnmatch # noqa
import hashlib # noqa
import os # noqa
import shutil # noqa
import signal # noqa
import sys # noqa
import threading # noqa
import traceback # noqa
from contextlib import contextmanager # noqa
from datetime import datetime, timedelta # noqa
from typing import Iterator, List, Optional, Sequence, Tuple, Type, Union # noqa
import sqlalchemy # noqa
import yaml # noqa
from loguru import logger # noqa
from sqlalchemy.exc import OperationalError # noqa
from sqlalchemy.ext.declarative import declarative_base # noqa
from sqlalchemy.orm import sessionmaker # noqa
# These need to be declared before we start importing from other flexget modules, since they might import them
from flexget.utils.sqlalchemy_utils import ContextSession # noqa
from flexget.utils.tools import get_current_flexget_version, io_encoding, pid_exists # noqa
Base = declarative_base()
Session: Type[ContextSession] = sessionmaker(class_=ContextSession)
import flexget.log # noqa
from flexget import config_schema, db_schema, plugin # noqa
from flexget.event import fire_event # noqa
from flexget.ipc import IPCClient, IPCServer # noqa
from flexget.options import ( # noqa
CoreArgumentParser,
ParserError,
get_parser,
manager_parser,
unicode_argv,
)
from flexget.task import Task # noqa
from flexget.task_queue import TaskQueue # noqa
from flexget.terminal import console, get_console_output # noqa
logger = logger.bind(name='manager')
manager = None
DB_CLEANUP_INTERVAL = timedelta(days=7)
class Manager:
"""Manager class for FlexGet
Fires events:
* manager.initialize
The first time the manager is initialized, before config is loaded
* manager.before_config_load
Before the config file is loaded from disk
* manager.before_config_validate
When updating the config, before the validator is run on it
* manager.config_updated
After a configuration file has been loaded or changed (and validated) this event is fired
* manager.startup
After manager has been initialized. This is when application becomes ready to use,
however no database lock is present, so the database must not be modified on this event.
* manager.lock_acquired
The manager does not always require a lock on startup, if one is requested,
this event will run when it has been acquired successfully
* manager.upgrade
If any plugins have declared a newer schema version than exists in the database,
this event will be fired to allow plugins to upgrade their tables
* manager.shutdown_requested
When shutdown has been requested. Any plugins which might add to
execution queue should stop when this is fired.
* manager.shutdown
When the manager is exiting
* manager.execute.completed
If execution in current process was completed
* manager.daemon.started
* manager.daemon.completed
* manager.db_cleanup
"""
unit_test = False
options = None
def __init__(self, args: Optional[Sequence]) -> None:
"""
:param args: CLI args
"""
global manager
if not self.unit_test:
assert not manager, 'Only one instance of Manager should be created at a time!'
elif manager:
logger.info('last manager was not torn down correctly')
if args is None:
# Decode all arguments to unicode before parsing
args = unicode_argv()[1:]
self.args = args
self.autoreload_config = False
self.config_file_hash = None
self.config_base = None
self.config_name = None
self.config_path = None
self.log_filename = None
self.db_filename = None
self.engine = None
self.lockfile = None
self.database_uri = None
self.db_upgraded = False
self._has_lock = False
self.is_daemon = False
self.ipc_server = None
self.task_queue = None
self.persist = None
self.initialized = False
self.config = {}
self.options = self._init_options(args)
try:
self._init_config(create=False)
except:
flexget.log.start(level=self.options.loglevel, to_file=False)
raise
manager = self
logger.debug('sys.defaultencoding: {}', sys.getdefaultencoding())
logger.debug('sys.getfilesystemencoding: {}', sys.getfilesystemencoding())
logger.debug('flexget detected io encoding: {}', io_encoding)
logger.debug('os.path.supports_unicode_filenames: {}', os.path.supports_unicode_filenames)
if (
codecs.lookup(sys.getfilesystemencoding()).name == 'ascii'
and not os.path.supports_unicode_filenames
):
logger.warning(
'Your locale declares ascii as the filesystem encoding. Any plugins reading filenames from '
'disk will not work properly for filenames containing non-ascii characters. Make sure your '
'locale env variables are set up correctly for the environment which is launching FlexGet.'
)
def _add_tray_icon_items(self, tray_icon: 'TrayIcon'):
tray_icon.add_menu_item(text='Shutdown', action=self.shutdown, index=2)
tray_icon.add_menu_item(text='Reload Config', action=self.load_config, index=3)
tray_icon.add_menu_separator(index=4)
@staticmethod
def _init_options(args: Sequence[str]) -> argparse.Namespace:
"""
Initialize argument parsing
"""
try:
options = CoreArgumentParser().parse_known_args(args, do_help=False)[0]
except ParserError as exc:
try:
# If a non-built-in command was used, we need to parse with a parser that
# doesn't define the subparsers
options = manager_parser.parse_known_args(args, do_help=False)[0]
except ParserError as e:
manager_parser.print_help()
print(f'\nError: {exc.message}')
sys.exit(1)
return options
def _init_logging(self, to_file: bool = True) -> None:
"""
Initialize logging facilities
"""
log_file = os.path.expanduser(self.options.logfile)
# If an absolute path is not specified, use the config directory.
if not os.path.isabs(log_file):
log_file = os.path.join(self.config_base, log_file)
self.log_filename = log_file
flexget.log.start(
log_file, self.options.loglevel, to_file=to_file, to_console=not self.options.cron
)
def initialize(self) -> None:
"""
Load plugins, database, and config. Also initializes (but does not start) the task queue and ipc server.
This should only be called after obtaining a lock.
"""
if self.initialized:
raise RuntimeError('Cannot call initialize on an already initialized manager.')
plugin.load_plugins(
extra_plugins=[os.path.join(self.config_base, 'plugins')],
extra_components=[os.path.join(self.config_base, 'components')],
)
# Reparse CLI options now that plugins are loaded
self.options = get_parser().parse_args(self.args)
self.task_queue = TaskQueue()
self.ipc_server = IPCServer(self, self.options.ipc_port)
self.setup_yaml()
self.init_sqlalchemy()
fire_event('manager.initialize', self)
try:
self.load_config()
except ValueError as e:
logger.critical('Failed to load config file: {}', e.args[0])
raise
# cannot be imported at module level because of circular references
from flexget.utils.simple_persistence import SimplePersistence
self.persist = SimplePersistence('manager')
if db_schema.upgrade_required():
logger.info('Database upgrade is required. Attempting now.')
fire_event('manager.upgrade', self)
if manager.db_upgraded:
fire_event('manager.db_upgraded', self)
fire_event('manager.startup', self)
self.initialized = True
@property
def tasks(self) -> List[str]:
"""A list of tasks in the config"""
if not self.config:
return []
return list(self.config.get('tasks', {}).keys())
@property
def has_lock(self) -> bool:
return self._has_lock
def execute(
self,
options: Optional[Union[dict, argparse.Namespace]] = None,
priority: int = 1,
suppress_warnings: Optional[Sequence[str]] = None,
) -> List[Tuple[str, str, threading.Event]]:
"""
Run all (can be limited with options) tasks from the config.
:param options: Either an :class:`argparse.Namespace` instance, or a dict, containing options for execution
:param priority: If there are other executions waiting to be run, they will be run in priority order,
lowest first.
:param suppress_warnings: Allows suppressing log warning about missing plugin in key phases
:returns: a list of :class:`threading.Event` instances which will be
set when each respective task has finished running
"""
if options is None:
options = copy.copy(self.options.execute)
elif isinstance(options, dict):
options_namespace = copy.copy(self.options.execute)
options_namespace.__dict__.update(options)
options = options_namespace
task_names = self.tasks
# Only reload config if daemon
config_hash = self.hash_config()
if self.is_daemon and self.autoreload_config and self.config_file_hash != config_hash:
logger.info('Config change detected. Reloading.')
try:
self.load_config(output_to_console=False, config_file_hash=config_hash)
logger.info('Config successfully reloaded!')
except Exception as e:
logger.error('Reloading config failed: {}', e)
# Handle --tasks
if options.tasks:
# Consider * the same as not specifying tasks at all (makes sure manual plugin still works)
if options.tasks == ['*']:
options.tasks = None
else:
# Create list of tasks to run, preserving order
task_names = []
for arg in options.tasks:
matches = [
t for t in self.tasks if fnmatch.fnmatchcase(str(t).lower(), arg.lower())
]
if not matches:
msg = f'`{arg}` does not match any tasks'
logger.error(msg)
continue
task_names.extend(m for m in matches if m not in task_names)
# Set the option as a list of matching task names so plugins can use it easily
options.tasks = task_names
# TODO: 1.2 This is a hack to make task priorities work still, not sure if it's the best one
task_names = sorted(
task_names, key=lambda t: self.config['tasks'][t].get('priority', 65535)
)
finished_events = []
for task_name in task_names:
task = Task(
self,
task_name,
options=options,
output=get_console_output(),
session_id=flexget.log.get_log_session_id(),
priority=priority,
suppress_warnings=suppress_warnings,
)
self.task_queue.put(task)
finished_events.append((task.id, task.name, task.finished_event))
return finished_events
def start(self) -> None:
"""
Starting point when executing from commandline, dispatch execution to correct destination.
If there is a FlexGet process with an ipc server already running, the command will be sent there for execution
and results will be streamed back.
If not, this will attempt to obtain a lock, initialize the manager, and run the command here.
"""
# When we are in test mode, we use a different lock file and db
if self.options.test:
self.lockfile = os.path.join(self.config_base, '.test-%s-lock' % self.config_name)
# If another process is started, send the execution to the running process
ipc_info = self.check_ipc_info()
# If we are connecting to a running daemon, we don't want to log to the log file,
# the daemon is already handling that.
self._init_logging(to_file=not ipc_info)
if ipc_info:
console(
'There is a FlexGet process already running for this config, sending execution there.'
)
logger.debug('Sending command to running FlexGet process: {}', self.args)
try:
client = IPCClient(ipc_info['port'], ipc_info['password'])
except ValueError as e:
logger.error(e)
else:
try:
client.handle_cli(self.args)
except KeyboardInterrupt:
logger.error(
'Disconnecting from daemon due to ctrl-c. Executions will still continue in the '
'background.'
)
except EOFError:
logger.error('Connection from daemon was severed.')
return
if self.options.test:
logger.info('Test mode, creating a copy from database ...')
db_test_filename = os.path.join(self.config_base, 'test-%s.sqlite' % self.config_name)
if os.path.exists(self.db_filename):
shutil.copy(self.db_filename, db_test_filename)
logger.info('Test database created')
self.db_filename = db_test_filename
# No running process, we start our own to handle command
with self.acquire_lock():
self.initialize()
self.handle_cli()
self._shutdown()
def handle_cli(self, options: Optional[argparse.Namespace] = None) -> None:
"""
Dispatch a cli command to the appropriate function.
* :meth:`.execute_command`
* :meth:`.daemon_command`
* CLI plugin callback function
The manager should have a lock and be initialized before calling this method.
:param options: argparse options for command. Defaults to options that manager was instantiated with.
"""
if not options:
options = self.options
command = options.cli_command
if command is None:
raise Exception('Command missing')
command_options = getattr(options, command)
# First check for built-in commands
if command in ['execute', 'daemon']:
if command == 'execute':
self.execute_command(command_options)
elif command == 'daemon':
self.daemon_command(command_options)
else:
# Otherwise dispatch the command to the callback function
options.cli_command_callback(self, command_options)
def execute_command(self, options: argparse.Namespace) -> None:
"""
Handles the 'execute' CLI command.
If there is already a task queue running in this process, adds the execution to the queue.
If FlexGet is being invoked with this command, starts up a task queue and runs the execution.
Fires events:
* manager.execute.started
* manager.execute.completed
:param options: argparse options
"""
fire_event('manager.execute.started', self, options)
if self.task_queue.is_alive() or self.is_daemon:
if not self.task_queue.is_alive():
logger.error(
'Task queue has died unexpectedly. Restarting it. Please open an issue on Github and include'
' any previous error logs.'
)
self.task_queue = TaskQueue()
self.task_queue.start()
if len(self.task_queue):
logger.verbose('There is a task already running, execution queued.')
finished_events = self.execute(options)
if not options.cron:
# Wait until execution of all tasks has finished
for _, _, event in finished_events:
event.wait()
else:
self.task_queue.start()
self.ipc_server.start()
self.execute(options)
self.shutdown(finish_queue=True)
self.task_queue.wait()
fire_event('manager.execute.completed', self, options)
def daemon_command(self, options: argparse.Namespace) -> None:
"""
Handles the 'daemon' CLI command.
Fires events:
* manager.daemon.started
* manager.daemon.completed
:param options: argparse options
"""
# Import API so it can register to daemon.started event
if options.action == 'start':
if self.is_daemon:
logger.error('Daemon already running for this config.')
return
elif self.task_queue.is_alive():
logger.error(
'Non-daemon execution of FlexGet is running. Cannot start daemon until it is finished.'
)
return
if options.daemonize:
self.daemonize()
if options.autoreload_config:
self.autoreload_config = True
try:
signal.signal(signal.SIGTERM, self._handle_sigterm)
except ValueError as e:
# If flexget is being called from another script, e.g. windows service helper, and we are not the
# main thread, this error will occur.
logger.debug('Error registering sigterm handler: {}', e)
self.is_daemon = True
def run_daemon(tray_icon: Optional['TrayIcon'] = None):
fire_event('manager.daemon.started', self)
self.task_queue.start()
self.ipc_server.start()
self.task_queue.wait()
fire_event('manager.daemon.completed', self)
if tray_icon:
tray_icon.stop()
if options.tray_icon:
from flexget.tray_icon import tray_icon # noqa
self._add_tray_icon_items(tray_icon)
# Tray icon must be run in the main thread.
m = threading.Thread(target=run_daemon, args=(tray_icon,))
m.start()
tray_icon.run()
m.join()
else:
run_daemon()
elif options.action in ['stop', 'reload-config', 'status']:
if not self.is_daemon:
logger.error('There does not appear to be a daemon running.')
return
if options.action == 'status':
logger.info('Daemon running. (PID: {})', os.getpid())
elif options.action == 'stop':
tasks = (
'all queued tasks (if any) have'
if options.wait
else 'currently running task (if any) has'
)
logger.info(
'Daemon shutdown requested. Shutdown will commence when {} finished executing.',
tasks,
)
self.shutdown(options.wait)
elif options.action == 'reload-config':
logger.info('Reloading config from disk.')
try:
self.load_config()
except ValueError as e:
logger.error('Error loading config: {}', e.args[0])
else:
logger.info('Config successfully reloaded from disk.')
def _handle_sigterm(self, signum, frame) -> None:
logger.info('Got SIGTERM. Shutting down.')
self.shutdown(finish_queue=False)
def setup_yaml(self) -> None:
"""Sets up the yaml loader to return unicode objects for strings by default"""
def construct_yaml_str(self, node):
# Override the default string handling function
# to always return unicode objects
return self.construct_scalar(node)
yaml.Loader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
yaml.SafeLoader.add_constructor('tag:yaml.org,2002:str', construct_yaml_str)
# Set up the dumper to not tag every string with !!python/unicode
def unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag='tag:yaml.org,2002:str', value=uni)
return node
yaml.add_representer(str, unicode_representer)
# Set up the dumper to increase the indent for lists
def increase_indent_wrapper(func):
def increase_indent(self, flow=False, indentless=False):
func(self, flow, False)
return increase_indent
yaml.Dumper.increase_indent = increase_indent_wrapper(yaml.Dumper.increase_indent)
yaml.SafeDumper.increase_indent = increase_indent_wrapper(yaml.SafeDumper.increase_indent)
def _init_config(self, create: bool = False) -> None:
"""
Find and load the configuration file.
:param bool create: If a config file is not found, and create is True, one will be created in the home folder
:raises: `OSError` when no config file could be found, and `create` is False.
"""
home_path = os.path.join(os.path.expanduser('~'), '.flexget')
options_config = os.path.expanduser(self.options.config)
possible = []
if os.path.isabs(options_config):
# explicit path given, don't try anything
config = options_config
possible = [config]
else:
logger.debug('Figuring out config load paths')
try:
possible.append(os.getcwd())
except OSError:
logger.debug('current directory invalid, not searching for config there')
# for virtualenv / dev sandbox
if hasattr(sys, 'real_prefix'):
logger.debug('Adding virtualenv path')
possible.append(sys.prefix)
# normal lookup locations
possible.append(home_path)
if sys.platform.startswith('win'):
# On windows look in ~/flexget as well, as explorer does not let you create a folder starting with a dot
home_path = os.path.join(os.path.expanduser('~'), 'flexget')
possible.append(home_path)
else:
# The freedesktop.org standard config location
xdg_config = os.environ.get(
'XDG_CONFIG_HOME', os.path.join(os.path.expanduser('~'), '.config')
)
possible.append(os.path.join(xdg_config, 'flexget'))
for path in possible:
config = os.path.join(path, options_config)
if os.path.exists(config):
logger.debug('Found config: {}', config)
break
else:
config = None
if create and not (config and os.path.exists(config)):
config = os.path.join(home_path, options_config)
logger.info('Config file {} not found. Creating new config {}', options_config, config)
with open(config, 'w') as newconfig:
# Write empty tasks to the config
newconfig.write(yaml.dump({'tasks': {}}))
elif not config:
logger.critical('Failed to find configuration file {}', options_config)
logger.info('Tried to read from: {}', ', '.join(possible))
raise OSError('No configuration file found.')
if not os.path.isfile(config):
raise OSError('Config `%s` does not appear to be a file.' % config)
logger.debug('Config file {} selected', config)
self.config_path = config
self.config_name = os.path.splitext(os.path.basename(config))[0]
self.config_base = os.path.normpath(os.path.dirname(config))
self.lockfile = os.path.join(self.config_base, '.%s-lock' % self.config_name)
self.db_filename = os.path.join(self.config_base, 'db-%s.sqlite' % self.config_name)
def hash_config(self) -> Optional[str]:
if not self.config_path:
return
sha1_hash = hashlib.sha1()
with open(self.config_path, 'rb') as f:
while True:
data = f.read(65536)
if not data:
break
sha1_hash.update(data)
return sha1_hash.hexdigest()
def load_config(
self, output_to_console: bool = True, config_file_hash: Optional[str] = None
) -> None:
"""
Loads the config file from disk, validates and activates it.
:raises: `ValueError` if there is a problem loading the config file
"""
fire_event('manager.before_config_load', self)
with open(self.config_path, 'r', encoding='utf-8') as f:
try:
raw_config = f.read()
except UnicodeDecodeError:
logger.critical('Config file must be UTF-8 encoded.')
raise ValueError('Config file is not UTF-8 encoded')
try:
self.config_file_hash = config_file_hash or self.hash_config()
config = yaml.safe_load(raw_config) or {}
except Exception as e:
msg = str(e).replace('\n', ' ')
msg = ' '.join(msg.split())
logger.critical(msg)
if output_to_console:
print('')
print('-' * 79)
print(' Malformed configuration file (check messages above). Common reasons:')
print('-' * 79)
print('')
print(' o Indentation error')
print(' o Missing : from end of the line')
print(' o Non ASCII characters (use UTF8)')
print(
' o If text contains any of :[]{}% characters it must be single-quoted '
'(eg. value{1} should be \'value{1}\')\n'
)
# Not very good practice but we get several kind of exceptions here, I'm not even sure all of them
# At least: ReaderError, YmlScannerError (or something like that)
if (
hasattr(e, 'problem')
and hasattr(e, 'context_mark')
and hasattr(e, 'problem_mark')
):
lines = 0
if e.problem is not None:
print(' Reason: %s\n' % e.problem)
if e.problem == 'mapping values are not allowed here':
print(' ----> MOST LIKELY REASON: Missing : from end of the line!')
print('')
if e.context_mark is not None:
print(
' Check configuration near line %s, column %s'
% (e.context_mark.line, e.context_mark.column)
)
lines += 1
if e.problem_mark is not None:
print(
' Check configuration near line %s, column %s'
% (e.problem_mark.line, e.problem_mark.column)
)
lines += 1
if lines:
print('')
if lines == 1:
print(' Fault is almost always in this or previous line\n')
if lines == 2:
print(' Fault is almost always in one of these lines or previous ones\n')
# When --debug escalate to full stacktrace
if self.options.debug or not output_to_console:
raise
raise ValueError('Config file is not valid YAML')
# config loaded successfully
logger.debug('config_name: {}', self.config_name)
logger.debug('config_base: {}', self.config_base)
# Install the newly loaded config
self.update_config(config)
def update_config(self, config: dict) -> None:
"""
Provide a new config for the manager to use.
:raises: `ValueError` and rolls back to previous config if the provided config is not valid.
"""
new_user_config = config
old_config = self.config
try:
self.config = self.validate_config(config)
except ValueError as e:
for error in getattr(e, 'errors', []):
logger.critical('[{}] {}', error.json_pointer, error.message)
logger.debug('invalid config, rolling back')
self.config = old_config
raise
logger.debug('New config data loaded.')
self.user_config = copy.deepcopy(new_user_config)
fire_event('manager.config_updated', self)
def backup_config(self) -> str:
backup_path = os.path.join(
self.config_base,
'%s-%s.bak' % (self.config_name, datetime.now().strftime('%y%m%d%H%M%S')),
)
logger.debug('backing up old config to {} before new save', backup_path)
try:
shutil.copy(self.config_path, backup_path)
except OSError as e:
logger.warning('Config backup creation failed: {}', str(e))
raise
return backup_path
def save_config(self) -> None:
"""Dumps current config to yaml config file"""
# TODO: Only keep x number of backups..
# Back up the user's current config before overwriting
try:
self.backup_config()
except OSError:
return
with open(self.config_path, 'w') as config_file:
config_file.write(yaml.dump(self.user_config, default_flow_style=False))
def config_changed(self) -> None:
"""Makes sure that all tasks will have the config_modified flag come out true on the next run.
Useful when changing the db and all tasks need to be completely reprocessed."""
from flexget.task import config_changed
config_changed()
fire_event('manager.config_updated', self)
def validate_config(self, config: Optional[dict] = None) -> dict:
"""
Check all root level keywords are valid. Config may be modified by before_config_validate hooks. Modified
config will be returned.
:param config: Config to check. If not provided, current manager config will be checked.
:raises: `ValueError` when config fails validation. There will be an `errors` attribute with the schema errors.
:returns: Final validated config.
"""
if not config:
config = self.config
config = fire_event('manager.before_config_validate', config, self)
errors = config_schema.process_config(config)
if errors:
err = ValueError('Did not pass schema validation.')
err.errors = errors
raise err
else:
return config
def init_sqlalchemy(self) -> None:
"""Initialize SQLAlchemy"""
try:
if [int(part) for part in sqlalchemy.__version__.split('.')] < [0, 7, 0]:
print(
'FATAL: SQLAlchemy 0.7.0 or newer required. Please upgrade your SQLAlchemy.',
file=sys.stderr,
)
sys.exit(1)
except ValueError as e:
logger.critical('Failed to check SQLAlchemy version, you may need to upgrade it')
# SQLAlchemy
if self.database_uri is None:
# in case running on windows, needs double \\
filename = self.db_filename.replace('\\', '\\\\')
self.database_uri = 'sqlite:///%s' % filename
if self.db_filename and not os.path.exists(self.db_filename):
logger.verbose('Creating new database {} - DO NOT INTERRUPT ...', self.db_filename)
# fire up the engine
logger.debug('Connecting to: {}', self.database_uri)
try:
self.engine = sqlalchemy.create_engine(
self.database_uri,
echo=self.options.debug_sql,
connect_args={'check_same_thread': False, 'timeout': 10},
)
except ImportError as e:
print(
'FATAL: Unable to use SQLite. Are you running Python 2.7, 3.3 or newer ?\n'
'Python should normally have SQLite support built in.\n'
'If you\'re running correct version of Python then it is not equipped with SQLite.\n'
'You can try installing `pysqlite`. If you have compiled python yourself, '
'recompile it with SQLite support.\n'
'Error: %s' % e,
file=sys.stderr,
)
sys.exit(1)
Session.configure(bind=self.engine)
# create all tables, doesn't do anything to existing tables
try:
Base.metadata.create_all(bind=self.engine)
except OperationalError as e:
if os.path.exists(self.db_filename):
print(
'%s - make sure you have write permissions to file %s'
% (e.message, self.db_filename),
file=sys.stderr,
)
else:
print(
'%s - make sure you have write permissions to directory %s'
% (e.message, self.config_base),
file=sys.stderr,
)
raise
def _read_lock(self) -> Optional[dict]:
"""
Read the values from the lock file. Returns None if there is no current lock file.
"""
if self.lockfile and os.path.exists(self.lockfile):
result = {}
with open(self.lockfile, encoding='utf-8') as f:
lines = [l for l in f.readlines() if l]
for line in lines:
try:
key, value = line.split(':', 1)
except ValueError:
logger.debug('Invalid line in lock file: {}', line)
continue
result[key.strip().lower()] = value.strip()
for key in result:
if result[key].isdigit():
result[key] = int(result[key])
result.setdefault('pid', None)
if not result['pid']:
logger.error(
'Invalid lock file. Make sure FlexGet is not running, then delete it.'
)
elif not pid_exists(result['pid']):
return None
return result
return None
def check_lock(self) -> bool:
"""Returns True if there is a lock on the database."""
lock_info = self._read_lock()
if not lock_info:
return False
# Don't count it if we hold the lock
if os.getpid() == lock_info['pid']:
return False
return True
def check_ipc_info(self) -> Optional[dict]:
"""If a daemon has a lock on the database, return info to connect to IPC."""
lock_info = self._read_lock()
if lock_info and 'port' in lock_info:
return lock_info
return None
@contextmanager
def acquire_lock(self, event: bool = True) -> Iterator:
"""
:param bool event: If True, the 'manager.lock_acquired' event will be fired after a lock is obtained
"""
acquired = False
try:
# Don't do anything if we already have a lock. This means only the outermost call will release the lock file
if not self._has_lock:
# Exit if there is an existing lock.
if self.check_lock():
with open(self.lockfile, encoding='utf-8') as f:
pid = f.read()
print(
'Another process (%s) is running, will exit.' % pid.split('\n')[0],
file=sys.stderr,
)
print(
'If you\'re sure there is no other instance running, delete %s'
% self.lockfile,
file=sys.stderr,
)
sys.exit(1)
self._has_lock = True
self.write_lock()
acquired = True
if event:
fire_event('manager.lock_acquired', self)
yield
finally:
if acquired:
self.release_lock()
self._has_lock = False
def write_lock(self, ipc_info: Optional[dict] = None) -> None:
assert self._has_lock
with open(self.lockfile, 'w', encoding='utf-8') as f:
f.write('PID: %s\n' % os.getpid())
if ipc_info:
for key in sorted(ipc_info):
f.write('%s: %s\n' % (key, ipc_info[key]))
def release_lock(self) -> None:
try:
os.remove(self.lockfile)
except OSError as e:
if e.errno != errno.ENOENT:
raise
logger.debug('Lockfile {} not found', self.lockfile)
else:
logger.debug('Removed {}', self.lockfile)
def daemonize(self) -> None:
"""Daemonizes the current process. Returns the new pid"""
if sys.platform.startswith('win'):
logger.error('Cannot daemonize on windows')
return
if threading.activeCount() != 1:
logger.critical(
'There are {!r} active threads. Daemonizing now may cause strange failures.',
threading.enumerate(),
)
logger.info('Daemonizing...')
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #1 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
# decouple from parent environment
os.chdir('/')
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# Don't run the exit handlers on the parent
atexit._exithandlers = []
# exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write('fork #2 failed: %d (%s)\n' % (e.errno, e.strerror))
sys.exit(1)
logger.info('Daemonize complete. New PID: {}', os.getpid())
# redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(os.devnull, 'r')
so = open(os.devnull, 'ab+')
se = open(os.devnull, 'ab+', 0)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# If we have a lock, update the lock file with our new pid
if self._has_lock:
self.write_lock()
def db_cleanup(self, force: bool = False) -> None:
"""
Perform database cleanup if cleanup interval has been met.
Fires events:
* manager.db_cleanup
If interval was met. Gives session to do the cleanup as a parameter.
:param bool force: Run the cleanup no matter whether the interval has been met.
"""
expired = (
self.persist.get('last_cleanup', datetime(1900, 1, 1))
< datetime.now() - DB_CLEANUP_INTERVAL
)
if force or expired:
logger.info('Running database cleanup.')
with Session() as session:
fire_event('manager.db_cleanup', self, session)
# Try to VACUUM after cleanup
fire_event('manager.db_vacuum', self)
# Just in case some plugin was overzealous in its cleaning, mark the config changed
self.config_changed()
self.persist['last_cleanup'] = datetime.now()
else:
logger.debug('Not running db cleanup, last run {}', self.persist.get('last_cleanup'))
def shutdown(self, finish_queue: bool = True) -> None:
"""
Request manager shutdown.
:param bool finish_queue: Should scheduler finish the task queue
"""
if not self.initialized:
raise RuntimeError('Cannot shutdown manager that was never initialized.')
fire_event('manager.shutdown_requested', self)
self.task_queue.shutdown(finish_queue)
def _shutdown(self) -> None:
"""Runs when the manager is done processing everything."""
if self.ipc_server:
self.ipc_server.shutdown()
fire_event('manager.shutdown', self)
if not self.unit_test: # don't scroll "nosetests" summary results when logging is enabled
logger.debug('Shutting down')
self.engine.dispose()
# remove temporary database used in test mode
if self.options.test:
if 'test' not in self.db_filename:
raise Exception('trying to delete non test database?')
if self._has_lock:
os.remove(self.db_filename)
logger.info('Removed test database')
global manager
manager = None
def crash_report(self) -> str:
"""
This should be called when handling an unexpected exception. Will create a new log file containing the last 50
debug messages as well as the crash traceback.
"""
if not self.unit_test:
log_dir = os.path.dirname(self.log_filename)
filename = os.path.join(
log_dir, datetime.now().strftime('crash_report.%Y.%m.%d.%H%M%S%f.log')
)
with codecs.open(filename, 'w', encoding='utf-8') as outfile:
outfile.writelines(flexget.log.debug_buffer)
traceback.print_exc(file=outfile)
logger.critical(
'An unexpected crash has occurred. Writing crash report to {}. '
'Please verify you are running the latest version of flexget by using "flexget -V" '
'from CLI or by using version_checker plugin'
' at http://flexget.com/wiki/Plugins/version_checker. '
'You are currently using version {}',
filename,
get_current_flexget_version(),
)
logger.opt(exception=True).debug('Traceback:')
return traceback.format_exc()
|
gerbil.py
|
"""
Gerbil - Copyright (c) 2015 Michael Franzl
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import time
import re
import threading
import atexit
import os
import collections
from queue import Queue
from .interface import Interface
from .callbackloghandler import CallbackLogHandler
from gcode_machine.gcode_machine import GcodeMachine
class Gerbil:
""" A universal Grbl CNC firmware interface module for Python3
providing a convenient high-level API for scripting or integration
into parent applications like GUI's.
There are a number of streaming applications available for the Grbl
CNC controller, but none of them seem to be an universal, re-usable
standard Python module. Gerbil attempts to fill that gap.
See README for usage examples.
Gerbil is a name of a cute desert rodent. We chose the name due to
its similarity to the name "Grbl".
Features:
* Re-usable across projects
* Non-blocking
* Asynchronous (event-based) callbacks for the parent application
* Two streaming modes: Incremental or fast ("counting characters")
* Defined shutdown
* G-Code cleanup
* G-Code variable expansion
* Dynamic feed override
* Buffer stashing
* Job halt and resume
Callbacks:
After assigning your own callback function (callback = ...) you will receive the following signals:
on_boot
: Emitted whenever Grbl boots (e.g. after a soft reset).
: No arguments.
on_disconnected
: Emitted whenever the serial port has been closed.
: No arguments
on_log
: Emitted for informal logging or debugging messages.
: 1 argument: LogRecord instance
on_line_sent
: Emitted whenever a line is actually sent to Grbl.
: 2 arguments: job_line_number, line
on_bufsize_change
: Emitted whenever lines have been appended to the buffer
: 1 argument: linecount
on_line_number_change
: Emitted whenever the current buffer position has been changed
: 1 argument: line_number
on_processed_command
: Emitted whenever Grbl confirms a command with "ok" and is now being executed physically
: 2 arguments: processed line number, processed line
on_alarm
: Emitted whenever Grbl sends an "ALARM" line
: 1 argument: the full line Grbl sent
on_error
: Emitted whenever Grbl sends an "ERROR" line
: 3 arguments: the full line Grbl sent, the line that caused the error, the line number in the buffer that caused the error
on_rx_buffer_percent
: Reports Grbl's serial receive buffer fill in percent. Emitted frequently while streaming.
: 1 argument: percentage integer from 0 to 100
on_progress_percent
: Reports the completion of the current job/buffer in percent. Emitted frequently while streaming.
: 1 argument: percentage integer from 0 to 100
on_job_completed
: Emitted when the current job/buffer has been streamed and physically executed entirely
on_stateupdate
: Emitted whenever Grbl's state has changed
: 3 arguments: Grbl's mode ('Idle', 'Run' etc.), machine position tuple, working position tupe
on_hash_stateupdate
: Emitted after Grbl's 'hash' EEPROM settings (`$#`) have been received
: 1 argument: dict of the settings
on_settings_downloaded
: Emitted after Grbl's EEPROM settings (`$$`) have been received
: 1 argument: dict of the settings
on_gcode_parser_stateupdate
: Emitted after Grbl's G-Code parser state has been received
: 1 argument: list of the state variables
on_simulation_finished
: Emitted when Gerbil's target is set to "simulator" and the job is executed.
: 1 argument: list of all G-Code commands that would have been sent to Grbl
on_vars_change
: Emitted after G-Code is loaded into the buffer and variables have been detected
: 1 argument: a dict of the detected variables
on_preprocessor_feed_change
: Emitted when a F keyword is parsed from the G-Code.
: 1 argument: the feed rate in mm/min
"""
__version__ = "0.5.0"
def __init__(self, callback, name="mygrbl"):
"""Straightforward initialization tasks.
@param callback
Set your own function that will be called when a number of
asynchronous events happen. Useful for UI's. The
default function will just log to stdout.
This callback function will receive two arguments. The first
is a string giving a label of the event, and the second is a variable
argument list `*args` containing data pertaining to the event.
Note that this function may be called from a Thread.
@param name
An informal name of the instance. Useful if you are running
several instances to control several CNC machines at once.
It is only used for logging output and UI messages.
"""
## @var name
# Set an informal name of the instance. Useful if you are
# running several instances to control several CNC machines at
# once. It is only used for logging output and UI messages.
self.name = name
## @var cmode
# Get Grbl's current mode.
# Will be strings 'Idle', 'Check', 'Run'
self.cmode = None
## @var cmpos
# Get a 3-tuple containing the current coordinates relative
# to the machine origin.
self.cmpos = (0, 0, 0)
## @var cwpos
# Get a 3-tuple containing the current working coordinates.
# Working coordinates are relative to the currently selected
# coordinate system.
self.cwpos = (0, 0, 0)
## @var gps
# Get list of 12 elements containing the 12 Gcode Parser State
# variables of Grbl which are obtained by sending the raw
# command `$G`. Will be available after setting
# `hash_state_requested` to True.
self.gps = [
"0", # motion mode
"54", # current coordinate system
"17", # current plane mode
"21", # units
"90", # current distance mode
"94", # feed rate mode
"0", # program mode
"0", # spindle state
"5", # coolant state
"0", # tool number
"99", # current feed
"0", # spindle speed
]
## @var poll_interval
# Set an interval in seconds for polling Grbl's state via
# the `?` command. The Grbl Wiki recommends to set this no lower
# than 0.2 (5 per second).
self.poll_interval = 0.2
## @var settings
# Get a dictionary of Grbl's EEPROM settings which can be read
# after sending the `$$` command, or more conveniently after
# calling the method `request_settings()` of this class.
self.settings = {
130: { "val": "1000", "cmt": "width" },
131: { "val": "1000", "cmt": "height" }
}
## @var settings_hash
# Get a dictionary of Grbl's 'hash' settings (also stored in the
# EEPROM) which can be read after sending the `$#` command. It
# contains things like coordinate system offsets. See Grbl
# documentation for more info. Will be available shortly after
# setting `hash_state_requested` to `True`.
self.settings_hash = {
"G54": (-600, -300, 0),
"G55": (-400, -300, 0),
"G56": (-200, -300, 0),
"G57": (-600, -600, 0),
"G58": (-400, -600, 0),
"G59": (-200, -600, 0),
"G28": (0, 0, 0),
"G30": (0, 0, 0),
"G92": (0, 0, 0),
"TLO": 0,
"PRB": (0, 0, 0),
}
## @var gcode_parser_state_requested
# Set this variable to `True` to receive a callback with the
# event string "on_gcode_parser_stateupdate" containing
# data that Grbl sends after receiving the `$G` command.
# After the callback, this variable reverts to `False`.
self.gcode_parser_state_requested = False
## @var hash_state_requested
# Set this variable to `True` to receive a callback with the
# event string "on_hash_stateupdate" containing
# the requested data. After the callback, this variable reverts
# to `False`.
self.hash_state_requested = False
## @var logger
# The logger used by this class. The default is Python's own
# logger module. Use `setup_logging()` to attach custom
# log handlers.
self.logger = logging.getLogger("gerbil")
self.logger.setLevel(5)
self.logger.propagate = False
## @var target
# Set this to change the output target. Default is "firmware"
# which means the serial port. Another target is "simulator",
# you will receive a callback with even string
# "on_simulation_finished" and a buffer of the G-Code commands
# that would have been sent out to Grbl.
# TODO: Add "file" target.
self.target = "firmware"
## @var connected
# `True` when connected to Grbl (after boot), otherwise `False`
self.connected = False
## @var preprocessor
# All G-code commands will go through the preprocessor
# before they are sent out via the serial port. The preprocessor
# keeps track of, and can dynamically change, feed rates, as well
# as substitute variables. It has its own state and callback
# functions.
self.preprocessor = GcodeMachine()
self.preprocessor.callback = self._preprocessor_callback
## @var travel_dist_buffer
# The total distance of all G-Codes in the buffer.
self.travel_dist_buffer = {}
## @var travel_dist_current
# The currently travelled distance. Can be used to calculate ETA.
self.travel_dist_current = {}
## @var is_standstill
# If the machine is currently not moving
self.is_standstill = False
self._ifacepath = None
self._last_setting_number = 132
self._last_cmode = None
self._last_cmpos = (0, 0, 0)
self._last_cwpos = (0, 0, 0)
self._standstill_watchdog_increment = 0
self._rx_buffer_size = 128
self._rx_buffer_fill = []
self._rx_buffer_backlog = []
self._rx_buffer_backlog_line_number = []
self._rx_buffer_fill_percent = 0
self._current_line = ""
self._current_line_sent = True
self._streaming_mode = None
self._wait_empty_buffer = False
self.streaming_complete = True
self.job_finished = True
self._streaming_src_end_reached = True
self._streaming_enabled = True
self._error = False
self._incremental_streaming = False
self._hash_state_sent = False
self.buffer = []
self.buffer_size = 0
self._current_line_nr = 0
self.buffer_stash = []
self.buffer_size_stash = 0
self._current_line_nr_stash = 0
self._poll_keep_alive = False
self._iface_read_do = False
self._thread_polling = None
self._thread_read_iface = None
self._iface = None
self._queue = Queue()
self._loghandler = None
self._counter = 0 # general-purpose counter for timing tasks inside of _poll_state
self._callback = callback
atexit.register(self.disconnect)
# supply defaults to GUI to make it operational
self._callback("on_settings_downloaded", self.settings)
self._callback("on_hash_stateupdate", self.settings_hash)
self.preprocessor.cs_offsets = self.settings_hash
self._callback("on_gcode_parser_stateupdate", self.gps)
def setup_logging(self, handler=None):
"""Assign a custom log handler.
Gerbil can be used in both console applications as well as
integrated in other projects like GUI's. Therefore, logging to
stdout is not always useful. You can pass a custom log message
handler to this method. If no handler is passed in, the default
handler is an instance of class `CallbackLogHandler`
(see file `callback_loghandler.py` included in this module).
CallbackLogHandler will deliver logged strings as callbacks to
the parent application, the event string will be "on_log".
@param handler=None
An instance of a subclass inheriting from `logging.StreamHandler`
"""
if handler:
self._loghandler = handler
else:
# The default log handler shipped with this module will call
# self._callback() with first parameter "on_log" and second
# parameter with the logged string.
lh = CallbackLogHandler()
self._loghandler = lh
# attach the selected log handler
self.logger.addHandler(self._loghandler)
self._loghandler.callback = self._callback
def cnect(self, path=None, baudrate=115200):
"""
Connect to the RS232 port of the Grbl controller.
@param path=None Path to the device node
This is done by instantiating a RS232 class, included in this
module, which by itself block-listens (in a thread) to
asynchronous data sent by the Grbl controller.
"""
if path == None or path.strip() == "":
return
else:
self._ifacepath = path
if self._iface == None:
self.logger.debug("{}: Setting up interface on {}".format(self.name, self._ifacepath))
self._iface = Interface("iface_" + self.name, self._ifacepath, baudrate)
self._iface.start(self._queue)
else:
self.logger.info("{}: Cannot start another interface. There is already an interface {}.".format(self.name, self._iface))
self._iface_read_do = True
self._thread_read_iface = threading.Thread(target=self._onread)
self._thread_read_iface.start()
self.softreset()
def disconnect(self):
"""
This method provides a controlled shutdown and cleanup of this
module.
It stops all threads, joins them, then closes the serial
connection. For a safe shutdown of Grbl you may also want to
call `softreset()` before you call this method.
"""
if self.is_connected() == False: return
self.poll_stop()
self._iface.stop()
self._iface = None
self.logger.debug("{}: Please wait until reading thread has joined...".format(self.name))
self._iface_read_do = False
self._queue.put("dummy_msg_for_joining_thread")
self._thread_read_iface.join()
self.logger.debug("{}: Reading thread successfully joined.".format(self.name))
self.connected = False
self._callback("on_disconnected")
def softreset(self):
"""
Immediately sends `Ctrl-X` to Grbl.
"""
self._iface.write("\x18") # Ctrl-X
self.update_preprocessor_position()
def abort(self):
"""
An alias for `softreset()`.
"""
if self.is_connected() == False: return
self.softreset()
def hold(self):
"""
Immediately sends the feed hold command (exclamation mark)
to Grbl.
"""
if self.is_connected() == False: return
self._iface_write("!")
def resume(self):
"""
Immediately send the resume command (tilde) to Grbl.
"""
if self.is_connected() == False: return
self._iface_write("~")
def killalarm(self):
"""
Immediately send the kill alarm command ($X) to Grbl.
"""
self._iface_write("$X\n")
def homing(self):
"""
Immediately send the homing command ($H) to Grbl.
"""
self._iface_write("$H\n")
def poll_start(self):
"""
Starts forever polling Grbl's status with the `?` command. The
polling interval is controlled by setting `self.poll_interval`.
You will receive callbacks with the "on_stateupdate" event
string containing 3 data parameters self.cmode, self.cmpos,
self.cwpos, but only when Grbl's state CHANGES.
"""
if self.is_connected() == False: return
self._poll_keep_alive = True
self._last_cmode = None
if self._thread_polling == None:
self._thread_polling = threading.Thread(target=self._poll_state)
self._thread_polling.start()
self.logger.debug("{}: Polling thread started".format(self.name))
else:
self.logger.debug("{}: Polling thread already running...".format(self.name))
def poll_stop(self):
"""
Stops polling that has been started with `poll_start()`
"""
if self.is_connected() == False: return
if self._thread_polling != None:
self._poll_keep_alive = False
self.logger.debug("{}: Please wait until polling thread has joined...".format(self.name))
self._thread_polling.join()
self.logger.debug("{}: Polling thread has successfully joined...".format(self.name))
else:
self.logger.debug("{}: Cannot start a polling thread. Another one is already running.".format(self.name))
self._thread_polling = None
def set_feed_override(self, val):
"""
Enable or disable the feed override feature.
@param val
Pass `True` or `False` as argument to enable or disable dynamic
feed override. After passing `True`, you may set the
requested feed by calling `self.request_feed()` one or many
times.
"""
self.preprocessor.do_feed_override = val
def request_feed(self, requested_feed):
"""
Override the feed speed. Effecive only when you set `set_feed_override(True)`.
@param requested_feed
The feed speed in mm/min.
"""
self.preprocessor.request_feed = float(requested_feed)
@property
def incremental_streaming(self):
return self._incremental_streaming
@incremental_streaming.setter
def incremental_streaming(self, onoff):
"""
Incremental streaming means that a new command is sent to Grbl
only after Grbl has responded with 'ok' to the last sent
command. This is necessary to flash $ settings to the EEPROM.
Non-incremental streaming means that Grbl's 100-some-byte
receive buffer will be kept as full as possible at all times,
to give its motion planner system enough data to work with.
This results in smoother and faster axis motion. This is also
called 'advanced streaming protocol based on counting
characters' -- see Grbl Wiki.
You can dynamically change the streaming method even
during streaming, while running a job. The buffer fill
percentage will reflect the change even during streaming.
@param onoff
Set to `True` to use incremental streaming. Set to `False` to
use non-incremental streaming. The default on module startup
is `False`.
"""
self._incremental_streaming = onoff
if self._incremental_streaming == True:
self._wait_empty_buffer = True
self.logger.debug("{}: Incremental streaming set to {}".format(self.name, self._incremental_streaming))
def send_immediately(self, line):
"""
G-Code command strings passed to this function will bypass
buffer management and will be sent to Grbl immediately.
Use this function with caution: Only send when you
are sure Grbl's receive buffer can handle the data volume and
when it doesn't interfere with currently running streams.
Only send single commands at a time.
Applications of this method: manual jogging, coordinate settings
etc.
@param line
A string of a single G-Code command to be sent. Doesn't have to
be \n terminated.
"""
bytes_in_firmware_buffer = sum(self._rx_buffer_fill)
if bytes_in_firmware_buffer > 0:
self.logger.error("Firmware buffer has {:d} unprocessed bytes in it. Will not send {}".format(bytes_in_firmware_buffer, line))
return
if self.cmode == "Alarm":
self.logger.error("Grbl is in ALARM state. Will not send {}.".format(line))
return
if self.cmode == "Hold":
self.logger.error("Grbl is in HOLD state. Will not send {}.".format(line))
return
if "$#" in line:
# The PRB response is sent for $# as well as when probing.
# Regular querying of the hash state needs to be done like this,
# otherwise the PRB response would be interpreted as a probe answer.
self.hash_state_requested = True
return
self.preprocessor.set_line(line)
self.preprocessor.strip()
self.preprocessor.tidy()
self.preprocessor.parse_state()
self.preprocessor.override_feed()
self._iface_write(self.preprocessor.line + "\n")
def stream(self, lines):
"""
A more convenient alias for `write(lines)` and `job_run()`
@param lines
A string of G-Code commands. Each command is \n separated.
"""
self._load_lines_into_buffer(lines)
self.job_run()
def write(self, lines):
"""
G-Code command strings passed to this function will be appended
to the current queue buffer, however a job is not started
automatically. You have to call `job_run()` to start streaming.
You can call this method repeatedly, e.g. for submitting chunks
of G-Code, even while a job is running.
@param lines
A string of G-Code commands. Each command is \n separated.
"""
if type(lines) is list:
lines = "\n".join(lines)
self._load_lines_into_buffer(lines)
def load_file(self, filename):
"""
Pass a filename to this function to load its contents into the
buffer. This only works when Grbl is Idle and the previous job
has completed. The previous buffer will be cleared. After this
function has completed, the buffer's contents will be identical
to the file content. Job is not started automatically.
Call `job_run` to start the job.
@param filename
A string giving the relative or absolute file path
"""
if self.job_finished == False:
self.logger.warning("{}: Job must be finished before you can load a file".format(self.name))
return
self.job_new()
with open(filename) as f:
self._load_lines_into_buffer(f.read())
def job_run(self, linenr=None):
"""
Run the current job, i.e. start streaming the current buffer
from a specific line number.
@param linenr
If `linenr` is not specified, start streaming from the current
buffer position (`self.current_line_number`). If `linenr` is specified, start streaming from this line.
"""
if self.buffer_size == 0:
self.logger.warning("{}: Cannot run job. Nothing in the buffer!".format(self.name))
return
if linenr:
self.current_line_number = linenr
self.travel_dist_current = {}
#self.preprocessor.current_feed = None
self._set_streaming_src_end_reached(False)
self._set_streaming_complete(False)
self._streaming_enabled = True
self._current_line_sent = True
self._set_job_finished(False)
self._stream()
def job_halt(self):
"""
Stop streaming. Grbl still will continue processing
all G-Code in its internal serial receive buffer.
"""
self._streaming_enabled = False
def job_new(self):
"""
Start a new job. A "job" in our terminology means the buffer's
contents. This function will empty the buffer, set the buffer
position to 0, and reset internal state.
"""
del self.buffer[:]
self.buffer_size = 0
self._current_line_nr = 0
self._callback("on_line_number_change", 0)
self._callback("on_bufsize_change", 0)
self._set_streaming_complete(True)
self.job_finished = True
self._set_streaming_src_end_reached(True)
self._error = False
self._current_line = ""
self._current_line_sent = True
self.travel_dist_buffer = {}
self.travel_dist_current = {}
self._callback("on_vars_change", self.preprocessor.vars)
@property
def current_line_number(self):
return self._current_line_nr
@current_line_number.setter
def current_line_number(self, linenr):
if linenr < self.buffer_size:
self._current_line_nr = linenr
self._callback("on_line_number_change", self._current_line_nr)
def request_settings(self):
"""
This will send `$$` to Grbl and you will receive a callback with
the argument 1 "on_settings_downloaded", and argument 2 a dict
of the settings.
"""
self._iface_write("$$\n")
def do_buffer_stash(self):
"""
Stash the current buffer and position away and initialize a
new job. This is useful if you want to stop the current job,
stream changed $ settings to Grbl, and then resume the job
where you left off. See also `self.buffer_unstash()`.
"""
self.buffer_stash = list(self.buffer)
self.buffer_size_stash = self.buffer_size
self._current_line_nr_stash = self._current_line_nr
self.job_new()
def do_buffer_unstash(self):
"""
Restores the previous stashed buffer and position.
"""
self.buffer = list(self.buffer_stash)
self.buffer_size = self.buffer_size_stash
self.current_line_number = self._current_line_nr_stash
self._callback("on_bufsize_change", self.buffer_size)
def update_preprocessor_position(self):
# keep preprocessor informed about current working pos
self.preprocessor.position_m = list(self.cmpos)
#self.preprocessor.target = list(self.cmpos)
def _preprocessor_callback(self, event, *data):
if event == "on_preprocessor_var_undefined":
self.logger.critical("HALTED JOB BECAUSE UNDEFINED VAR {}".format(data[0]))
self._set_streaming_src_end_reached(True)
self.job_halt()
else:
self._callback(event, *data)
def _stream(self):
if self._streaming_src_end_reached:
return
if self._streaming_enabled == False:
return
if self.target == "firmware":
if self._incremental_streaming:
self._set_next_line()
if self._streaming_src_end_reached == False:
self._send_current_line()
else:
self._set_job_finished(True)
else:
self._fill_rx_buffer_until_full()
elif self.target == "simulator":
buf = []
while self._streaming_src_end_reached == False:
self._set_next_line(True)
if self._current_line_nr < self.buffer_size:
buf.append(self._current_line)
# one line still to go
self._set_next_line(True)
buf.append(self._current_line)
self._set_job_finished(True)
self._callback("on_simulation_finished", buf)
def _fill_rx_buffer_until_full(self):
while True:
if self._current_line_sent == True:
self._set_next_line()
if self._streaming_src_end_reached == False and self._rx_buf_can_receive_current_line():
self._send_current_line()
else:
break
def _set_next_line(self, send_comments=False):
progress_percent = int(100 * self._current_line_nr / self.buffer_size)
self._callback("on_progress_percent", progress_percent)
if self._current_line_nr < self.buffer_size:
# still something in _buffer, pop it
line = self.buffer[self._current_line_nr].strip()
self.preprocessor.set_line(line)
self.preprocessor.substitute_vars()
self.preprocessor.parse_state()
self.preprocessor.override_feed()
self.preprocessor.scale_spindle()
if send_comments == True:
self._current_line = self.preprocessor.line + self.preprocessor.comment
else:
self._current_line = self.preprocessor.line
self._current_line_sent = False
self._current_line_nr += 1
self.preprocessor.done()
else:
# the buffer is empty, nothing more to read
self._set_streaming_src_end_reached(True)
def _send_current_line(self):
if self._error:
self.logger.error("Firmware reported error. Halting.")
self._set_streaming_src_end_reached(True)
self._set_streaming_complete(True)
return
self._set_streaming_complete(False)
line_length = len(self._current_line) + 1 # +1 for \n which we will append below
self._rx_buffer_fill.append(line_length)
self._rx_buffer_backlog.append(self._current_line)
self._rx_buffer_backlog_line_number.append(self._current_line_nr)
self._iface_write(self._current_line + "\n")
self._current_line_sent = True
self._callback("on_line_sent", self._current_line_nr, self._current_line)
def _rx_buf_can_receive_current_line(self):
rx_free_bytes = self._rx_buffer_size - sum(self._rx_buffer_fill)
required_bytes = len(self._current_line) + 1 # +1 because \n
return rx_free_bytes >= required_bytes
def _rx_buffer_fill_pop(self):
if len(self._rx_buffer_fill) > 0:
self._rx_buffer_fill.pop(0)
processed_command = self._rx_buffer_backlog.pop(0)
ln = self._rx_buffer_backlog_line_number.pop(0) - 1
self._callback("on_processed_command", ln, processed_command)
if self._streaming_src_end_reached == True and len(self._rx_buffer_fill) == 0:
self._set_job_finished(True)
self._set_streaming_complete(True)
def _iface_write(self, line):
self._callback("on_write", line)
if self._iface:
num_written = self._iface.write(line)
def _onread(self):
while self._iface_read_do == True:
line = self._queue.get()
if len(line) > 0:
if line[0] == "<":
self._update_state(line)
elif line == "ok":
self._handle_ok()
elif re.match("^\[G[0123] .*", line):
self._update_gcode_parser_state(line)
self._callback("on_read", line)
elif line == "[MSG:Caution: Unlocked]":
# nothing to do here
pass
elif re.match("^\[...:.*", line):
self._update_hash_state(line)
self._callback("on_read", line)
if "PRB" in line:
# last line
if self.hash_state_requested == True:
self._hash_state_sent = False
self.hash_state_requested = False
self._callback("on_hash_stateupdate", self.settings_hash)
self.preprocessor.cs_offsets = self.settings_hash
else:
self._callback("on_probe", self.settings_hash["PRB"])
elif "ALARM" in line:
self.cmode = "Alarm" # grbl for some reason doesn't respond to ? polling when alarm due to soft limits
self._callback("on_stateupdate", self.cmode, self.cmpos, self.cwpos)
self._callback("on_read", line)
self._callback("on_alarm", line)
elif "error" in line:
#self.logger.debug("ERROR")
self._error = True
#self.logger.debug("%s: _rx_buffer_backlog at time of error: %s", self.name, self._rx_buffer_backlog)
if len(self._rx_buffer_backlog) > 0:
problem_command = self._rx_buffer_backlog[0]
problem_line = self._rx_buffer_backlog_line_number[0]
else:
problem_command = "unknown"
problem_line = -1
self._callback("on_error", line, problem_command, problem_line)
self._set_streaming_complete(True)
self._set_streaming_src_end_reached(True)
elif "Grbl " in line:
self._callback("on_read", line)
self._on_bootup()
self.hash_state_requested = True
self.request_settings()
self.gcode_parser_state_requested = True
else:
m = re.match("\$(.*)=(.*) \((.*)\)", line)
if m:
key = int(m.group(1))
val = m.group(2)
comment = m.group(3)
self.settings[key] = {
"val" : val,
"cmt" : comment
}
self._callback("on_read", line)
if key == self._last_setting_number:
self._callback("on_settings_downloaded", self.settings)
else:
self._callback("on_read", line)
#self.logger.info("{}: Could not parse settings: {}".format(self.name, line))
def _handle_ok(self):
if self.streaming_complete == False:
self._rx_buffer_fill_pop()
if not (self._wait_empty_buffer and len(self._rx_buffer_fill) > 0):
self._wait_empty_buffer = False
self._stream()
self._rx_buffer_fill_percent = int(100 - 100 * (self._rx_buffer_size - sum(self._rx_buffer_fill)) / self._rx_buffer_size)
self._callback("on_rx_buffer_percent", self._rx_buffer_fill_percent)
def _on_bootup(self):
self._onboot_init()
self.connected = True
self.logger.debug("{}: Grbl has booted!".format(self.name))
self._callback("on_boot")
def _update_hash_state(self, line):
line = line.replace("]", "").replace("[", "")
parts = line.split(":")
key = parts[0]
tpl_str = parts[1].split(",")
tpl = tuple([float(x) for x in tpl_str])
self.settings_hash[key] = tpl
def _update_gcode_parser_state(self, line):
m = re.match("\[G(\d) G(\d\d) G(\d\d) G(\d\d) G(\d\d) G(\d\d) M(\d) M(\d) M(\d) T(\d) F([\d.-]*?) S([\d.-]*?)\]", line)
if m:
self.gps[0] = m.group(1) # motionmode
self.gps[1] = m.group(2) # current coordinate system
self.gps[2] = m.group(3) # plane
self.gps[3] = m.group(4) # units
self.gps[4] = m.group(5) # dist
self.gps[5] = m.group(6) # feed rate mode
self.gps[6] = m.group(7) # program mode
self.gps[7] = m.group(8) # spindle state
self.gps[8] = m.group(9) # coolant state
self.gps[9] = m.group(10) # tool number
self.gps[10] = m.group(11) # current feed
self.gps[11] = m.group(12) # current rpm
self._callback("on_gcode_parser_stateupdate", self.gps)
self.update_preprocessor_position()
else:
self.logger.error("{}: Could not parse gcode parser report: '{}'".format(self.name, line))
def _update_state(self, line):
m = re.match("<(.*?),MPos:(.*?),WPos:(.*?)>", line)
if m is not None:
# GRBL v0.9
self.cmode = m.group(1)
mpos_parts = m.group(2).split(",")
wpos_parts = m.group(3).split(",")
self.cmpos = (float(mpos_parts[0]), float(mpos_parts[1]), float(mpos_parts[2]))
self.cwpos = (float(wpos_parts[0]), float(wpos_parts[1]), float(wpos_parts[2]))
else:
# GRBL v1.1
# <Idle|MPos:0.0000,0.0000,0.0000|Bf:15,128|FS:0.0,0|WCO:0.0000,0.0000,0.0000>
m = re.match("<(.*?)\|MPos:(.*?)\|", line)
if m is not None:
# machine position reported (adjustable via $10)
self.cmode = m.group(1)
mpos_parts = m.group(2).split(",")
self.cmpos = (float(mpos_parts[0]), float(mpos_parts[1]), float(mpos_parts[2]))
else:
m = re.match("<(.*?)\|WPos:(.*?)\|", line)
if m is not None:
# work position reported (adjustble via $10)
self.cmode = m.group(1)
wpos_parts = m.group(2).split(",")
self.cwpos = (float(wpos_parts[0]), float(wpos_parts[1]), float(wpos_parts[2]))
else:
self.logger.error("{}: Could not parse MPos or WPos: '{}'".format(self.name, line))
return
# if we made it here, we parsed MPos or WPos or both
if (self.cmode != self._last_cmode or
self.cmpos != self._last_cmpos or
self.cwpos != self._last_cwpos):
self._callback("on_stateupdate", self.cmode, self.cmpos, self.cwpos)
if self.streaming_complete == True and self.cmode == "Idle":
self.update_preprocessor_position()
self.gcode_parser_state_requested = True
if (self.cmpos != self._last_cmpos):
if self.is_standstill == True:
self._standstill_watchdog_increment = 0
self.is_standstill = False
self._callback("on_movement")
else:
# no change in positions
self._standstill_watchdog_increment += 1
if self.is_standstill == False and self._standstill_watchdog_increment > 10:
# machine is not moving
self.is_standstill = True
self._callback("on_standstill")
self._last_cmode = self.cmode
self._last_cmpos = self.cmpos
self._last_cwpos = self.cwpos
def _load_line_into_buffer(self, line):
self.preprocessor.set_line(line)
split_lines = self.preprocessor.split_lines()
for l1 in split_lines:
self.preprocessor.set_line(l1)
self.preprocessor.strip()
self.preprocessor.tidy()
self.preprocessor.parse_state()
self.preprocessor.find_vars()
fractionized_lines = self.preprocessor.fractionize()
for l2 in fractionized_lines:
self.buffer.append(l2)
self.buffer_size += 1
self.preprocessor.done()
def _load_lines_into_buffer(self, string):
lines = string.split("\n")
for line in lines:
self._load_line_into_buffer(line)
self._callback("on_bufsize_change", self.buffer_size)
self._callback("on_vars_change", self.preprocessor.vars)
def is_connected(self):
if self.connected != True:
#self.logger.info("{}: Not yet connected".format(self.name))
pass
return self.connected
def _onboot_init(self):
# called after boot. Mimics Grbl's initial state after boot.
del self._rx_buffer_fill[:]
del self._rx_buffer_backlog[:]
del self._rx_buffer_backlog_line_number[:]
self._set_streaming_complete(True)
self._set_job_finished(True)
self._set_streaming_src_end_reached(True)
self._error = False
self._current_line = ""
self._current_line_sent = True
self._clear_queue()
self.is_standstill = False
self.preprocessor.reset()
self._callback("on_progress_percent", 0)
self._callback("on_rx_buffer_percent", 0)
def _clear_queue(self):
try:
junk = self._queue.get_nowait()
self.logger.debug("Discarding junk %s", junk)
except:
#self.logger.debug("Queue was empty")
pass
def _poll_state(self):
while self._poll_keep_alive:
self._counter += 1
if self.hash_state_requested:
self.get_hash_state()
elif self.gcode_parser_state_requested:
self.get_gcode_parser_state()
self.gcode_parser_state_requested = False
else:
self._get_state()
time.sleep(self.poll_interval)
self.logger.debug("{}: Polling has been stopped".format(self.name))
def _get_state(self):
self._iface.write("?")
def get_gcode_parser_state(self):
self._iface_write("$G\n")
def get_hash_state(self):
if self.cmode == "Hold":
self.hash_state_requested = False
self.logger.info("{}: $# command not supported in Hold mode.".format(self.name))
return
if self._hash_state_sent == False:
self._iface_write("$#\n")
self._hash_state_sent = True
def _set_streaming_src_end_reached(self, a):
self._streaming_src_end_reached = a
def _set_streaming_complete(self, a):
self.streaming_complete = a
def _set_job_finished(self, a):
self.job_finished = a
if a == True:
self._callback("on_job_completed")
def _default_callback(self, status, *args):
print("GERBIL DEFAULT CALLBACK", status, args)
|
ray_dispatcher.py
|
# -*- coding: utf-8 -*-
import os
import time
import random
import string
import signal
import multiprocessing
from datetime import datetime
import psutil
import ray
from aw_nas.utils.exception import expect, ConfigException
from aw_nas.trainer.async_trainer import BaseDispatcher
class KillSignal(ray.experimental.signal.Signal):
pass
@ray.remote
class Killer(object):
def send_kill(self):
ray.experimental.signal.send(KillSignal())
print("finished sending kill signals, "
"please wait for some seconds for all these tasks to exit")
class RayDispatcher(BaseDispatcher):
NAME = "ray"
def __init__(self, redis_addr=None):
super(RayDispatcher, self).__init__()
expect(redis_addr is not None, "Redis address must be specified", ConfigException)
self.redis_addr = redis_addr
ray.init(redis_address=redis_addr)
self.killer = Killer.remote() # create the killer actor
self.evaluator = None
self.evaluate_func = None
self.ckpt_dir = None
self.executing_ids = set()
def get_evaluate_func(self):
@ray.remote(num_gpus=1)
def evaluate_func(rollout, killer):
# TODO: use subprocess to run?
gpus = ray.get_gpu_ids()
gpu_str = ",".join(map(str, gpus))
# unset cuda visible devices, do not use this env, use ordinal consistently
os.environ.pop("CUDA_VISIBLE_DEVICES", None)
self.evaluator.set_device(gpu_str)
if self.ckpt_dir:
random_salt = "".join([random.choice(string.ascii_letters + string.digits)
for n in range(16)])
ckpt_subdir = "{time}-{gpu}-{salt}".format(
time=datetime.now().strftime("%Y-%m-%d_%H-%M-%S"),
gpu=gpu_str, salt=random_salt)
# handle output
rollout.set_ckpt_path(os.path.join(self.ckpt_dir, ckpt_subdir))
return_dct = multiprocessing.Manager().dict()
def _evaluate_wrapper(rollout, return_dct):
rollout = self.evaluator.evaluate_rollouts([rollout], is_training=True)[0]
return_dct["_"] = rollout
proc = multiprocessing.Process(target=_evaluate_wrapper, args=(rollout, return_dct))
proc.start()
# wait for proc finish or killed
while 1:
time.sleep(10)
if proc.is_alive():
sigs = ray.experimental.signal.receive([killer], timeout=1)
if sigs:
print("ray task: receive kill signal from killer, "
"kill the working processes")
process = psutil.Process(proc.pid)
for c_proc in process.children(recursive=True):
c_proc.kill()
process.kill()
exit_status = 1
break
else:
exit_status = proc.exitcode
break
if exit_status != 0:
return None
return return_dct["_"]
return evaluate_func
def init(self, evaluator, ckpt_dir):
# self.evaluate_func = ray.remote(evaluator.evalute, num_gpus=1)
self.evaluator = evaluator
self.ckpt_dir = ckpt_dir
self.evaluate_func = self.get_evaluate_func()
self._register_signal_handler()
def stop(self):
print("Stop ray dispatcher...")
self.killer.send_kill.remote()
def shutdown(self):
print("Shutdown ray dispatcher...")
self.killer.send_kill.remote()
def start_eval_rollout(self, rollout):
res_id = self.evaluate_func.remote(rollout, self.killer)
self.executing_ids.add(res_id)
def get_finished_rollouts(self, timeout=None):
ready_ids, _ = ray.wait(list(self.executing_ids), timeout=timeout)
f_rollouts = ray.get(ready_ids)
self.executing_ids -= set(ready_ids)
if None in f_rollouts:
# sigint
print("Found None in the finished rollout! Interrupted!")
raise KeyboardInterrupt()
return f_rollouts
@property
def parallelism(self):
# FIXME: temp. This returns the #available gpus on the current node, not correct
return ray.worker._global_node.get_resource_spec().num_gpus
def _register_signal_handler(self):
pass
# ori_sigint_handler = signal.getsignal(signal.SIGINT)
# def signal_handler(sig, frame):
# print("Receive sigint, sending kill signal...")
# self.killer.send_kill.remote()
# signal.signal(signal.SIGINT, signal_handler)
|
get_name_from_camera_feed.py
|
import face_recognition
import numpy as np
import cv2, queue, threading, time
import requests, os, re
# bufferless VideoCapture
class VideoCapture:
def __init__(self, name):
self.cap = cv2.VideoCapture(name)
self.q = queue.Queue()
t = threading.Thread(target=self._reader)
t.daemon = True
t.start()
# read frames as soon as they are available, keeping only most recent one
def _reader(self):
while True:
ret, frame = self.cap.read()
if not ret:
break
if not self.q.empty():
try:
self.q.get_nowait() # discard previous (unprocessed) frame
except queue.Empty:
pass
self.q.put(frame)
def read(self):
return self.q.get()
# Select the webcam of the computer
# video_capture = VideoCapture('https://stream-eu1-charlie.dropcam.com:443/nexus_aac/b85a6ec812c045cd921f4164e8e7ecc0/playlist.m3u8?public=GqJifk6U25')
video_capture = VideoCapture(0)
# video_capture.set(5,1)
# * -------------------- USERS -------------------- *
known_face_encodings = []
known_face_names = []
known_faces_filenames = []
for (dirpath, dirnames, filenames) in os.walk('assets/img/users/'):
known_faces_filenames.extend(filenames)
break
for filename in known_faces_filenames:
face = face_recognition.load_image_file('assets/img/users/' + filename)
known_face_names.append(re.sub("[0-9]",'', filename[:-4]))
known_face_encodings.append(face_recognition.face_encodings(face)[0])
face_locations = []
face_encodings = []
face_names = []
process_this_frame = True
while True:
# for i in range(5):
# video_capture.grab()
# Grab a single frame of video
frame = video_capture.read()
# # Resize frame of video to 1/4 size for faster face recognition processing
# small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)
# print(sys.exc_info())
# # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
# frame = small_frame[:, :, ::-1]
# Process every frame only one time
if process_this_frame:
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(frame)
face_encodings = face_recognition.face_encodings(frame, face_locations)
# Initialize an array for the name of the detected users
face_names = []
# * ---------- Initialyse JSON to EXPORT --------- *
json_to_export = {}
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
matches = face_recognition.compare_faces(known_face_encodings, face_encoding)
name = "Unknown"
# # If a match was found in known_face_encodings, just use the first one.
# if True in matches:
# first_match_index = matches.index(True)
# name = known_face_names[first_match_index]
# Or instead, use the known face with the smallest distance to the new face
face_distances = face_recognition.face_distance(known_face_encodings, face_encoding)
best_match_index = np.argmin(face_distances)
if matches[best_match_index]:
name = known_face_names[best_match_index]
# * ---------- SAVE data to send to the API -------- *
json_to_export['name'] = name
json_to_export['hour'] = f'{time.localtime().tm_hour}:{time.localtime().tm_min}'
json_to_export['date'] = f'{time.localtime().tm_year}-{time.localtime().tm_mon}-{time.localtime().tm_mday}'
json_to_export['picture_array'] = frame.tolist()
# * ---------- SEND data to API --------- *
r = requests.post(url='http://127.0.0.1:5000/receive_data', json=json_to_export)
print("Status: ", r.status_code)
face_names.append(name)
process_this_frame = not process_this_frame
# Display the results
for (top, right, bottom, left), name in zip(face_locations, face_names):
# Scale back up face locations since the frame we detected in was scaled to 1/4 size
# top *= 4
# right *= 4
# bottom *= 4
# left *= 4
# Draw a box around the face
cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)
# Draw a label with a name below the face
# cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (0, 0, 255), cv2.FILLED)
font = cv2.FONT_HERSHEY_DUPLEX
cv2.putText(frame, name, (left + 6, bottom - 6), font, 1.0, (255, 255, 255), 1)
# Display the resulting image
cv2.imshow('Video', frame)
# Hit 'q' on the keyboard to quit!
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Release handle to the webcam
video_capture.release()
cv2.destroyAllWindows()
|
interface_info.py
|
# Copyright 2019 Anjali Thontakudi
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import grpc
from multiprocessing import Process
import structlog
import openolt_pb2_grpc
import openolt_pb2
log = structlog.get_logger()
class InterfaceInfo(object):
def __init__(self, host_and_port, intf_id):
super(InterfaceInfo, self)
self.host_and_port = host_and_port
# self.intf_id = (int)(intf_id)
try:
self.process = Process(target=self.run, args=((int)(intf_id),))
except Exception as err:
log.exception("Failed to initialize interface", e=err)
def start(self):
try:
self.process.start()
except Exception as err:
log.exception("Failed to start interface", e=err)
try:
self.process.join()
except KeyboardInterrupt:
self.process.terminate()
def run(self, intf_id):
channel = grpc.insecure_channel(self.host_and_port)
self.stub = openolt_pb2_grpc.OpenoltStub(channel)
self.get_status(intf_id)
def get_status(self, if_id):
try:
status = self.stub.GetPonIf(openolt_pb2.Interface(intf_id=if_id))
print('PON interface status is ' + status.oper_state)
except Exception as err:
log.exception("Failed to retrieve interface status", e=err)
if __name__ == '__main__':
if len(sys.argv) < 3:
# Print some kind of error
sys.exit(1)
port_and_host = sys.argv[1]
if_id = sys.argv[2]
ifinfo = InterfaceInfo(port_and_host, if_id)
ifinfo.start()
|
joystick.py
|
import os
import threading
from picraftzero.utils import arduino_map
from picraftzero.log import logger
from picraftzero.zero import Button
from picraftzero.config import get_config
config = get_config()
USE_EVENT = True
USE_PYGAME = True
USE_BLUEDOT = config.getboolean('joystick', 'use_bluedot', fallback=False)
HAVE_EVENT = False
HAVE_PYGAME = False
HAVE_BLUEDOT = False
# ---------------------------------------------------------------------------------------------------------
# TODO: other joypads
# TODO: test the PyGame impl. on Windows
# TODO: shared common behaviour refactor
# ---------------------------------------------------------------------------------------------------------
# Look for Event support first (Linux) then PyGame (Linux, Windows, Mac, other)
try:
from evdev import InputDevice, categorize, AbsEvent, KeyEvent, list_devices
from evdev.ecodes import KEY, SYN, REL, ABS
HAVE_EVENT = True
except ImportError:
logger.info("Optional EventDev library not found")
HAVE_EVENT = False
try:
import pygame
from pygame.locals import *
HAVE_PYGAME = True
except ImportError:
logger.info("Optional PyGame library not found")
HAVE_PYGAME = False
try:
from bluedot import BlueDot
HAVE_BLUEDOT = True
except ImportError:
logger.info("Optional BlueDot library not found")
HAVE_BLUEDOT = False
# ---------------------------------------------------------------------------------------------------------
if HAVE_BLUEDOT and USE_BLUEDOT:
logger.info("Using BlueDot implementation")
logger.warning("Only 1 Joystick axis will be available")
class InputController:
def __init__(self, controller_id=0):
self._listener = None
if 0 == controller_id:
self.bd = BlueDot()
self.bd.when_moved = self._when_moved
self.bd.when_released = self._when_released
else:
logger.error("Only 1 joystick (id==0) is supported when using BlueDot")
self._x = 0
self._y = 0
def stop(self):
pass
def get_value(self, name):
if name == 'rx':
value = self._x
elif name == 'ry':
value = self._y
else:
value = 0
return value
def _when_moved(self, position):
self._x = int(position.x * 100)
self._y = int(position.y * 100)
if self._listener:
self._listener(self)
def _when_released(self, position):
self._x = 0
self._y = 0
if self._listener:
self._listener(self)
def add_listener(self, func):
self._listener = func
elif HAVE_EVENT and USE_EVENT:
logger.info("Using EventDev implementation")
ROCKCANDY_AXIS_DEADZONE = 5
ROCKCANDY_MAPPING = {
'lx': {'event_name': 'ABS_X', 'mapfunc': lambda x: arduino_map(x, 0, 255, -100, 100) if abs(x-128) > ROCKCANDY_AXIS_DEADZONE else 0},
'ly': {'event_name': 'ABS_Y', 'mapfunc': lambda x: arduino_map(x, 0, 255, 100, -100) if abs(x-128) > ROCKCANDY_AXIS_DEADZONE else 0},
'rx': {'event_name': 'ABS_Z', 'mapfunc': lambda x: arduino_map(x, 0, 255, -100, 100) if abs(x-128) > ROCKCANDY_AXIS_DEADZONE else 0},
'ry': {'event_name': 'ABS_RZ','mapfunc': lambda x: arduino_map(x, 0, 255, 100, -100) if abs(x-128) > ROCKCANDY_AXIS_DEADZONE else 0},
'BTN_A': 0,
'BTN_B': 1,
'BTN_C': 2,
'BTN_X': 3,
}
AFTERGLOW_MAPPING = ROCKCANDY_MAPPING
PIHUT_HJDX_MAPPING = ROCKCANDY_MAPPING
XB360_AXIS_DEADZONE = 500
XB360_MAPPING = {
'lx': {'event_name': 'ABS_X', 'mapfunc': lambda x: arduino_map(x, -32768, 32767, -100, 100) if abs(x) > XB360_AXIS_DEADZONE else 0},
'ly': {'event_name': 'ABS_Y', 'mapfunc': lambda x: arduino_map(x, -32768, 32767, 100, -100) if abs(x) > XB360_AXIS_DEADZONE else 0},
'rx': {'event_name': 'ABS_RX','mapfunc': lambda x: arduino_map(x, -32768, 32767, -100, 100) if abs(x) > XB360_AXIS_DEADZONE else 0},
'ry': {'event_name': 'ABS_RY','mapfunc': lambda x: arduino_map(x, -32768, 32767, 100, -100) if abs(x) > XB360_AXIS_DEADZONE else 0},
}
VENDOR_PRODUCT_MAPPINGS = {
"3695:296": ROCKCANDY_MAPPING,
"9571:1318": PIHUT_HJDX_MAPPING,
"1118:654": XB360_MAPPING, # Wired XBox360
"1118:673": XB360_MAPPING, # Wireless XBox360
"3695:532": AFTERGLOW_MAPPING,
"DEFAULT": XB360_MAPPING
}
class InputController:
def __init__(self, joystick_id=0):
self.controller_state = {}
self.keep_running = True
self.mapping = {}
self._listener = None
devices = list_devices()
if not len(devices) > 0:
logger.info("No input devices found")
return
logger.info("Found input devices: {}".format(devices))
device_path = devices[0] # Just joysticks on the first controller, for now
logger.info("Using device: {}".format(device_path))
self.input_device = InputDevice(device_path)
self.thread = threading.Thread(target=self._start, name="InputController"+str(joystick_id))
self.thread.daemon = True
self.thread.start()
vpid = "{}:{}".format(self.input_device.info.vendor, self.input_device.info.product)
logger.info("Device USB VPID: {}".format(vpid))
if vpid in VENDOR_PRODUCT_MAPPINGS:
self.mapping = VENDOR_PRODUCT_MAPPINGS[vpid]
else:
logger.warning("Input device USB VPID not found for '{}', using default".format(vpid))
self.mapping = VENDOR_PRODUCT_MAPPINGS["DEFAULT"]
def _start(self):
for event in self.input_device.read_loop():
cat_event = categorize(event)
if isinstance(cat_event, AbsEvent):
#logger.debug("Event= {} Cat={}".format(event, cat_event))
axis_key = ABS[cat_event.event.code]
axis_val = event.value
# TODO: move to init
logger.debug("Joypad event: {} = {}".format(axis_key, axis_val))
if axis_key not in self.controller_state:
self.controller_state[axis_key] = 0
if self.controller_state[axis_key] != axis_val:
self.controller_state[axis_key] = axis_val
if self._listener:
self._listener(self)
elif isinstance(cat_event, KeyEvent):
logger.debug("KeyEvent, keycode={}, scancode={}, keystate={}".format(cat_event.keycode, cat_event.scancode, cat_event.keystate))
button_id = self._get_button_id(cat_event.keycode)
if button_id is not None:
Button(button_id).value = 1 if bool(cat_event.keystate) else 0
else:
logger.debug("{}, {}".format(event, cat_event))
if not self.keep_running:
break
def stop(self):
self.keep_running = False
def add_listener(self, func):
self._listener = func
def get_value(self, name):
value = 0
if name in self.mapping:
event_info = self.mapping[name]
event_name = event_info['event_name']
if event_name in self.controller_state:
mapfunc = event_info['mapfunc']
value = mapfunc(self.controller_state[event_name])
return value
def _get_button_id(self, keycodes):
if not isinstance(keycodes, list):
keycodes = [keycodes]
for keycode in keycodes:
if keycode in self.mapping:
return self.mapping[keycode]
return None
# ---------------------------------------------------------------------------------------------------------
elif HAVE_PYGAME and USE_PYGAME:
logger.info("Using PyGame implementation")
from time import sleep
from picraftzero.utils import mainthread_dispatch
ROCKCANDY_AXIS_DEADZONE = 0.05
ROCKCANDY_MAPPING = {
'lx': {'event_name': 'AXIS0', 'mapfunc': lambda x: int(x * 100) if abs(x) > ROCKCANDY_AXIS_DEADZONE else 0},
'ly': {'event_name': 'AXIS1', 'mapfunc': lambda x: int(x * -100) if abs(x) > ROCKCANDY_AXIS_DEADZONE else 0},
'rx': {'event_name': 'AXIS2', 'mapfunc': lambda x: int(x * 100) if abs(x) > ROCKCANDY_AXIS_DEADZONE else 0},
'ry': {'event_name': 'AXIS3', 'mapfunc': lambda x: int(x * -100) if abs(x) > ROCKCANDY_AXIS_DEADZONE else 0},
}
NIMBUS_AXIS_DEADZONE = 0.05
NIMBUS_MAPPING = {
'lx': {'event_name': 'AXIS0', 'mapfunc': lambda x: int(x * 100) if abs(x) > NIMBUS_AXIS_DEADZONE else 0},
'ly': {'event_name': 'AXIS1', 'mapfunc': lambda x: int(x * -100) if abs(x) > NIMBUS_AXIS_DEADZONE else 0},
'rx': {'event_name': 'AXIS2', 'mapfunc': lambda x: int(x * 100) if abs(x) > NIMBUS_AXIS_DEADZONE else 0},
'ry': {'event_name': 'AXIS3', 'mapfunc': lambda x: int(x * -100) if abs(x) > NIMBUS_AXIS_DEADZONE else 0},
}
PS3_AXIS_DEADZONE = 0.05
PS3_MAPPING = {
'lx': {'event_name': 'AXIS0', 'mapfunc': lambda x: int(x * 100) if abs(x) > PS3_AXIS_DEADZONE else 0},
'ly': {'event_name': 'AXIS1', 'mapfunc': lambda x: int(x * -100) if abs(x) > PS3_AXIS_DEADZONE else 0},
'rx': {'event_name': 'AXIS2', 'mapfunc': lambda x: int(x * 100) if abs(x) > PS3_AXIS_DEADZONE else 0},
'ry': {'event_name': 'AXIS3', 'mapfunc': lambda x: int(x * -100) if abs(x) > PS3_AXIS_DEADZONE else 0},
}
XB360_AXIS_DEADZONE = 0.05
XB360_MAPPING = {
'lx': {'event_name': 'AXIS0', 'mapfunc': lambda x: int(x * 100) if abs(x) > XB360_AXIS_DEADZONE else 0},
'ly': {'event_name': 'AXIS1', 'mapfunc': lambda x: int(x * -100) if abs(x) > XB360_AXIS_DEADZONE else 0},
'rx': {'event_name': 'AXIS2', 'mapfunc': lambda x: int(x * 100) if abs(x) > XB360_AXIS_DEADZONE else 0},
'ry': {'event_name': 'AXIS3', 'mapfunc': lambda x: int(x * -100) if abs(x) > XB360_AXIS_DEADZONE else 0},
}
JOYSTICK_NAME_MAPPINGS = {
"Rock Candy Wireless Gamepad for PS3": ROCKCANDY_MAPPING, # Mac
"Performance Designed Products Rock Candy Wireless Gamepad for PS3": ROCKCANDY_MAPPING, # Pi
"Nimbus": NIMBUS_MAPPING,
"PLAYSTATION(R)3 Controller": PS3_MAPPING,
"Wireless 360 Controller": XB360_MAPPING,
"Xbox 360 Wired Controller": XB360_MAPPING, # Mac *1
"Microsoft X-Box 360 pad": XB360_MAPPING, # Pi *1 *1 = same actual controller
"hongjingda HJD-X": ROCKCANDY_MAPPING # PiHut JoyPad
}
# Make an attempt to setup video in order to get the event sub-system up and running
# TODO: This assumes always headless, users may not want his
def _setup_video():
"Ininitializes a new pygame screen using the framebuffer"
# Based on "Python GUI in Linux frame buffer"
# http://www.karoltomala.com/blog/?p=679
disp_no = os.getenv("DISPLAY")
# Check which frame buffer drivers are available
# Start with fbcon since directfb hangs with composite output
drivers = ['x11', 'fbcon', 'directfb', 'svgalib', 'Quartz']
found = False
for driver in drivers:
# Make sure that SDL_VIDEODRIVER is set
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
logger.error('Driver: {0} failed.'.format(driver))
continue
found = True
break
if not found:
logger.error('No suitable SDL video driver found to start the event subsystem, pygame joysticks may not work.')
try:
pygame.init()
_setup_video()
joystick_count = pygame.joystick.get_count()
joystick_names = []
for i in range(0, joystick_count):
joystick_names.append(pygame.joystick.Joystick(i).get_name())
logger.info("Joysticks count {} : {}".format(joystick_count, joystick_names))
if joystick_count > 0:
joystick_0 = pygame.joystick.Joystick(0)
joystick_0.init()
joystick_0_name = joystick_0.get_name()
except pygame.error as e:
logger.exception("PyGame error during joystick setup, {}".format(e))
class InputController:
def __init__(self, joystick_id=0):
self.keep_running = True
self.controller_state = {}
self._listener = None
if joystick_count < 1 or not (joystick_0_name in JOYSTICK_NAME_MAPPINGS):
return
self.joystick = joystick_0
self.mapping = JOYSTICK_NAME_MAPPINGS[joystick_0_name]
self.thread = threading.Thread(target=self._start, name="InputController"+str(joystick_id))
self.thread.daemon = True
self.thread.start()
def stop(self):
self.keep_running = False
def _start(self):
logger.info("Using Joystick : {}".format(self.joystick.get_name()))
while self.keep_running:
mainthread_dispatch(lambda: self._process_events(pygame.event.get()))
sleep(0.01)
def _process_events(self, events):
# TODO: to play nicely with other toys in the future this really should be fed from a global event loop
for e in events:
logger.debug("Joystick event: {}".format(e))
if e.type == JOYAXISMOTION:
for axis_num in range(0, self.joystick.get_numaxes()):
axis_key = 'AXIS'+str(axis_num)
axis_val = self.joystick.get_axis(axis_num)
if axis_key not in self.controller_state or self.controller_state[axis_key] != axis_val:
self.controller_state[axis_key] = axis_val
if self._listener:
self._listener(self)
elif e.type == JOYBUTTONDOWN:
logger.debug(e.button)
Button(e.button).value = 1
elif e.type == JOYBUTTONUP:
logger.debug(e.button)
Button(e.button).value = 0
else:
logger.debug(e)
def add_listener(self, func):
self._listener = func
def get_value(self, name):
value = 0
if name in self.mapping:
event_info = self.mapping[name]
event_name = event_info['event_name']
if event_name in self.controller_state:
mapfunc = event_info['mapfunc']
value = mapfunc(self.controller_state[event_name])
return value
# ---------------------------------------------------------------------------------------------------------
else: #Stub
logger.warning("Failing back to stub implementation")
class InputController:
def __init__(self, controller_id=0):
pass
def stop(self):
pass
def get_value(self, name):
return 0
def add_listener(self, func):
pass
|
generate_tfrecord.py
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Original code: https://github.com/tensorflow/models/blob/master/inception/inception/data/build_image_data.py
Modified by Victor Campos
Converts image data to TFRecords file format with Example protos.
The image data set is given by a text file with the following structure:
path1 label1_anp label1_noun label1_adj
path2 label2_anp label2_noun label2_adj
...
pathN labelN_anp labelN_noun labelN_adj
This TensorFlow script converts the training and evaluation data into
a sharded data set consisting of TFRecord files
output_directory/name-00000-of-N_shards
output_directory/name-N_shards-of-N_shards
...
output_directory/name-N_shards-1-of-N_shards
Each record within the TFRecord file is a serialized Example proto. The
Example proto contains the following fields:
image/encoded: string containing JPEG encoded image in RGB colorspace
image/height: integer, image height in pixels
image/width: integer, image width in pixels
image/colorspace: string, specifying the colorspace, always 'RGB'
image/channels: integer, specifying the number of channels, always 3
image/format: string, specifying the format, always'JPEG'
image/filename: string containing the basename of the image file
e.g. 'n01440764_10026.JPEG' or 'ILSVRC2012_val_00000293.JPEG'
image/class/label: list of integers specifying the indices in a classification layer.
The label ranges from [0, num_labels-1] and are given as [anp noun adjective].
image/class/text: string specifying the human-readable version of the labels
e.g. 'dog'
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import random
import sys
import threading
import numpy as np
import tensorflow as tf
tf.app.flags.DEFINE_string('images_directory', None, 'Image data directory')
tf.app.flags.DEFINE_string('input_file', None, 'Image data directory')
tf.app.flags.DEFINE_string('output_directory', None, 'Output data directory')
tf.app.flags.DEFINE_string('name', None, 'Name for the subset')
tf.app.flags.DEFINE_string('anp_list', None, 'File with the ANP labels')
tf.app.flags.DEFINE_string('noun_list', None, 'File with the Noun labels')
tf.app.flags.DEFINE_string('adj_list', None, 'File with the Adjective labels')
tf.app.flags.DEFINE_integer('num_shards', 2, 'Number of shards in training TFRecord files.')
tf.app.flags.DEFINE_integer('num_threads', 2, 'Number of threads to preprocess the images.')
tf.app.flags.DEFINE_integer('anp_offset', 0, 'Label offset for ANPs.')
tf.app.flags.DEFINE_integer('noun_offset', 0, 'Label offset for Nouns.')
tf.app.flags.DEFINE_integer('adj_offset', 0, 'Label offset for Adjectives.')
tf.app.flags.DEFINE_boolean('anp_only', False, 'Encode only ANPs, setting Noun and Adj labels to -1.')
FLAGS = tf.app.flags.FLAGS
def _int64_feature(value):
"""Wrapper for inserting int64 features into Example proto."""
if not isinstance(value, list):
value = [value]
return tf.train.Feature(int64_list=tf.train.Int64List(value=value))
def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
if isinstance(value, list):
value = value[0]
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _convert_to_example(filename, image_buffer, label, text, height, width):
"""Build an Example proto for an example.
Args:
filename: string, path to an image file, e.g., '/path/to/example.JPG'
image_buffer: string, JPEG encoding of RGB image
label: integer, identifier for the ground truth for the network
text: string, unique human-readable, e.g. 'dog'
height: integer, image height in pixels
width: integer, image width in pixels
Returns:
Example proto
"""
colorspace = 'RGB'
channels = 3
image_format = 'JPEG'
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': _int64_feature(height),
'image/width': _int64_feature(width),
'image/colorspace': _bytes_feature(colorspace),
'image/channels': _int64_feature(channels),
'image/class/label': _int64_feature(label),
'image/class/text': _bytes_feature(text),
'image/format': _bytes_feature(image_format),
'image/filename': _bytes_feature(os.path.basename(filename)),
'image/encoded': _bytes_feature(image_buffer)}))
return example
class ImageCoder(object):
"""Helper class that provides TensorFlow image coding utilities."""
def __init__(self):
# Create a single Session to run all image coding calls.
self._sess = tf.Session()
# Initializes function that converts PNG to JPEG data.
self._png_data = tf.placeholder(dtype=tf.string)
image = tf.image.decode_png(self._png_data, channels=3)
self._png_to_jpeg = tf.image.encode_jpeg(image, format='rgb', quality=100)
# Initializes function that decodes RGB JPEG data.
self._decode_jpeg_data = tf.placeholder(dtype=tf.string)
self._decode_jpeg = tf.image.decode_jpeg(self._decode_jpeg_data, channels=3)
def png_to_jpeg(self, image_data):
return self._sess.run(self._png_to_jpeg,
feed_dict={self._png_data: image_data})
def decode_jpeg(self, image_data):
image = self._sess.run(self._decode_jpeg,
feed_dict={self._decode_jpeg_data: image_data})
assert len(image.shape) == 3
assert image.shape[2] == 3
return image
def _is_png(filename):
"""Determine if a file contains a PNG format image.
Args:
filename: string, path of the image file.
Returns:
boolean indicating if the image is a PNG.
"""
return '.png' in filename
def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide TensorFlow image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
image_data = tf.gfile.FastGFile(filename, 'r').read()
# Convert any PNG to JPEG's for consistency.
if _is_png(filename):
print('Converting PNG to JPEG for %s' % filename)
image_data = coder.png_to_jpeg(image_data)
# Decode the RGB JPEG.
image = coder.decode_jpeg(image_data)
# Check that image converted to RGB
assert len(image.shape) == 3
height = image.shape[0]
width = image.shape[1]
assert image.shape[2] == 3
return image_data, height, width
def _process_image_files_batch(coder, thread_index, ranges, name, filenames,
texts, labels, num_shards):
"""Processes and saves list of images as TFRecord in 1 thread.
Args:
coder: instance of ImageCoder to provide TensorFlow image coding utils.
thread_index: integer, unique batch to run index is within [0, len(ranges)).
ranges: list of pairs of integers specifying ranges of each batches to
analyze in parallel.
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
# Each thread produces N shards where N = int(num_shards / num_threads).
# For instance, if num_shards = 128, and the num_threads = 2, then the first
# thread would produce shards [0, 64).
num_threads = len(ranges)
assert not num_shards % num_threads
num_shards_per_batch = int(num_shards / num_threads)
shard_ranges = np.linspace(ranges[thread_index][0],
ranges[thread_index][1],
num_shards_per_batch + 1).astype(int)
num_files_in_thread = ranges[thread_index][1] - ranges[thread_index][0]
counter = 0
for s in xrange(num_shards_per_batch):
# Generate a sharded version of the file name, e.g. 'train-00002-of-00010'
shard = thread_index * num_shards_per_batch + s
output_filename = '%s-%.5d-of-%.5d' % (name, shard, num_shards)
output_file = os.path.join(FLAGS.output_directory, output_filename)
writer = tf.python_io.TFRecordWriter(output_file)
shard_counter = 0
files_in_shard = np.arange(shard_ranges[s], shard_ranges[s + 1], dtype=int)
for i in files_in_shard:
filename = filenames[i]
label = labels[i]
text = texts[i]
image_buffer, height, width = _process_image(filename, coder)
example = _convert_to_example(filename, image_buffer, label,
text, height, width)
writer.write(example.SerializeToString())
shard_counter += 1
counter += 1
if not counter % 1000:
print('%s [thread %d]: Processed %d of %d images in thread batch.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
print('%s [thread %d]: Wrote %d images to %s' %
(datetime.now(), thread_index, shard_counter, output_file))
sys.stdout.flush()
shard_counter = 0
print('%s [thread %d]: Wrote %d images to %d shards.' %
(datetime.now(), thread_index, counter, num_files_in_thread))
sys.stdout.flush()
def _process_image_files(name, filenames, texts, labels, num_shards):
"""Process and save list of images as TFRecord of Example protos.
Args:
name: string, unique identifier specifying the data set
filenames: list of strings; each string is a path to an image file
texts: list of strings; each string is human readable, e.g. 'dog'
labels: list of integer; each integer identifies the ground truth
num_shards: integer number of shards for this data set.
"""
assert len(filenames) == len(texts)
assert len(filenames) == len(labels)
# Break all images into batches with a [ranges[i][0], ranges[i][1]].
spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
ranges = []
threads = []
for i in xrange(len(spacing) - 1):
ranges.append([spacing[i], spacing[i+1]])
# Launch a thread for each batch.
print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
sys.stdout.flush()
# Create a mechanism for monitoring when all threads are finished.
coord = tf.train.Coordinator()
# Create a generic TensorFlow-based utility for converting all image codings.
coder = ImageCoder()
threads = []
for thread_index in xrange(len(ranges)):
args = (coder, thread_index, ranges, name, filenames,
texts, labels, num_shards)
t = threading.Thread(target=_process_image_files_batch, args=args)
t.start()
threads.append(t)
# Wait for all the threads to terminate.
coord.join(threads)
print('%s: Finished writing all %d images in data set.' %
(datetime.now(), len(filenames)))
sys.stdout.flush()
def _find_image_files(input_file, anp_list_path, noun_list_path, adj_list_path, dataset_dir):
"""Build a list of all images files and labels in the data set.
Args:
input_file: path to the file listing (path, anp_label, noun_label, adj_label) tuples
anp_list_path: path to the file with the class id -> class name mapping for ANPs
noun_list_path: path to the file with the class id -> class name mapping for Nouns
adj_list_path: path to the file with the class id -> class name mapping for Adjectives
Returns:
filenames: list of strings; each string is a path to an image file.
texts: list of string tuples; each string is the tuple of classes, e.g. ('happy dog', 'happy', 'dog')
labels: list of integer tuples; each tuple identifies the ground truth: (anp_id, noun_id, adj_id)
"""
lines = [line.strip() for line in open(input_file, 'r')]
anp_list = [line.strip() for line in open(anp_list_path, 'r')]
if not FLAGS.anp_only:
noun_list = [line.strip() for line in open(noun_list_path, 'r')]
adj_list = [line.strip() for line in open(adj_list_path, 'r')]
filenames = list()
texts = list()
labels = list()
for line in lines:
if FLAGS.anp_only:
img, anp_id = line.split()
else:
img, anp_id, noun_id, adj_id = line.split()
filenames.append(os.path.join(dataset_dir, img))
if FLAGS.anp_only:
labels.append([int(anp_id) + FLAGS.anp_offset, -1, -1])
texts.append([anp_list[int(anp_id)], 'no_noun_label', 'no_adjective_label'])
else:
labels.append([int(anp_id)+FLAGS.anp_offset, int(noun_id)+FLAGS.noun_offset, int(adj_id)+FLAGS.adj_offset])
texts.append([anp_list[int(anp_id)], noun_list[int(noun_id)], adj_list[int(adj_id)]])
# Shuffle the ordering of all image files in order to guarantee
# random ordering of the images with respect to label in the
# saved TFRecord files. Make the randomization repeatable.
shuffled_index = range(len(filenames))
random.seed(12345)
random.shuffle(shuffled_index)
filenames = [filenames[i] for i in shuffled_index]
texts = [texts[i] for i in shuffled_index]
labels = [labels[i] for i in shuffled_index]
print('Found %d JPEG files.' % len(filenames))
return filenames, texts, labels
def _process_dataset(name, input_file, dataset_dir, num_shards, anp_list_path, noun_list_path, adj_list_path):
"""Process a complete data set and save it as a TFRecord.
Args:
name: string, unique identifier specifying the data set.
input_file: path to the file listing (path, anp_label, noun_label, adj_label) tuples
num_shards: integer number of shards for this data set.
anp_list_path: string, path to the labels file.
noun_list_path: string, path to the labels file.
adj_list_path: string, path to the labels file.
"""
filenames, texts, labels = _find_image_files(input_file, anp_list_path, noun_list_path, adj_list_path, dataset_dir)
_process_image_files(name, filenames, texts, labels, num_shards)
def main(unused_argv):
assert not FLAGS.num_shards % FLAGS.num_threads, (
'Please make the FLAGS.num_threads commensurate with FLAGS.train_shards')
print('Saving results to %s' % FLAGS.output_directory)
# Run it!
_process_dataset(FLAGS.name, FLAGS.input_file, FLAGS.images_directory, FLAGS.num_shards,
FLAGS.anp_list, FLAGS.noun_list, FLAGS.adj_list)
if __name__ == '__main__':
tf.app.run()
|
main.py
|
from time import sleep,asctime,strftime
from BeautifulSoup import BeautifulSoup
import threading
from threading import Thread
import Queue
from scapy.all import *
import logging
from plugins import *
import sys
import netifaces
class CoreMitm(object):
''' core mitmkin main thread'''
def __init__(self,options):
self.options = options
self.interface = self.options.interface
self.filter = self.options.filter
self.stopped = False
def run(self):
self.main()
def check_interface(self, iface):
if iface in netifaces.interfaces():
return True
print('[-] [{}] interface is not found...'.format(iface))
return False
def sniffer(self, q):
while not self.stopped:
try:
sniff(iface=self.interface,filter=self.filter,
prn =lambda x : q.put(x), store=0)
except Exception:
pass
if self.stopped:
break
def main(self):
self.plugins = {}
if not self.check_interface(self.interface):
return
self.plugin_classes = plugin.PSniffer.__subclasses__()
for p in self.plugin_classes:
self.plugins[p._name] = p()
print('[*] plugin::{0:17} status:On'.format(p._name))
self.plugins['Hexdump'].getInstance()._activated = True
print('\n')
q = Queue.Queue()
sniff = Thread(target =self.sniffer, args = (q,))
sniff.daemon = True
sniff.start()
while (not self.stopped):
try:
pkt = q.get(timeout = 1)
self.snifferParser(pkt)
for Active in self.plugins.keys():
if self.plugins[Active].getInstance()._activated:
self.plugins[Active].filterPackets(pkt)
except Queue.Empty:
pass
def snifferParser(self,pkt):
try:
if pkt.haslayer(Ether) and pkt.haslayer(Raw) and not pkt.haslayer(IP) and not pkt.haslayer(IPv6):
return
self.dport = pkt[TCP].dport
self.sport = pkt[TCP].sport
if pkt.haslayer(TCP) and pkt.haslayer(Raw) and pkt.haslayer(IP):
self.src_ip_port = str(pkt[IP].src)+':'+str(self.sport)
self.dst_ip_port = str(pkt[IP].dst)+':'+str(self.dport)
if pkt.haslayer(Raw):
self.load = pkt[Raw].load
if self.load.startswith('GET'):
self.get_http_GET(self.src_ip_port,self.dst_ip_port,self.load)
self.searchBingGET(self.load.split('\n', 1)[0].split('&')[0])
elif self.load.startswith('POST'):
header,url = self.get_http_POST(self.load)
self.getCredentials_POST(pkt.getlayer(Raw).load,url,header)
except:
pass
def searchBingGET(self,search):
if 'search?q' in search :
searched = search.split('search?q=',1)[1]
searched = searched.replace('+',' ')
print 'Search::BING { %s }'%(searched)
def getCredentials_POST(self,payload,url,header):
user_regex = '([Ee]mail|%5B[Ee]mail%5D|[Uu]ser|[Uu]sername|' \
'[Nn]ame|[Ll]ogin|[Ll]og|[Ll]ogin[Ii][Dd])=([^&|;]*)'
pw_regex = '([Pp]assword|[Pp]ass|[Pp]asswd|[Pp]wd|[Pp][Ss][Ww]|' \
'[Pp]asswrd|[Pp]assw|%5B[Pp]assword%5D)=([^&|;]*)'
username = re.findall(user_regex, payload)
password = re.findall(pw_regex, payload)
if not username ==[] and not password == []:
if url != None:
print('Request::POST {} '.format(url))
print('[-] Username: {}'.format(username[0][1]))
print('[-] Password: {}'.format(password[0][1]))
def get_http_POST(self,load):
dict_head = {}
try:
headers, body = load.split("\r\n\r\n", 1)
header_lines = headers.split('\r\n')
for item in header_lines:
try:
dict_head[item.split()[0]] = item.split()[1]
except Exception:
pass
if 'Referer:' in dict_head.keys():
return dict_head ,dict_head['Referer:']
except ValueError:
return None,None
return dict_head, None
def get_http_GET(self,src,dst,load):
if 'Referer:' in load:
print ('[{} > {}] GET {}'.format(src.split(':')[0],dst.split(':')[0],
load.split('Referer: ')[1].split('\n',1)[0]))
def stop(self):
self.stopped = True
print 'Stop Sniffer::Core:' + self.objectName()
|
install_models.py
|
# type: ignore
import os
import shutil
import threading
from pathlib import Path
from typing import Optional
import requests
from pydeepspeech.paths import PATH_MODELS
# AI model used for the application
_VERSION = "v0.9.3"
URL_PBMM = "https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.pbmm"
URL_SCORER = "https://github.com/mozilla/DeepSpeech/releases/download/v0.9.3/deepspeech-0.9.3-models.scorer"
MODEL_DIR = os.path.join(PATH_MODELS, _VERSION)
# Marks the model created.
IS_FINISHED_STAMP = os.path.join(MODEL_DIR, "is_finished")
def download_file(url, outfile) -> None:
if os.path.isfile(url):
# Actually it's a file, so we can just copy it.
shutil.copyfile(url, outfile)
return
# NOTE the stream=True parameter below
try:
tmp = f"{outfile}.tmp"
if os.path.exists(tmp):
os.remove(tmp)
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(tmp, "wb") as f:
for chunk in r.iter_content(chunk_size=8192):
# If you have chunk encoded response uncomment if
# and set chunk_size parameter to None.
# if chunk:
f.write(chunk)
os.rename(tmp, outfile)
except KeyboardInterrupt:
print(f"Aborted download of {url}")
return
def url_to_local_name(url: str) -> str:
return os.path.join(MODEL_DIR, url.split("/")[-1])
def is_models_installed() -> bool:
return os.path.exists(IS_FINISHED_STAMP)
def clean_dir(path: str) -> None:
"""
Removes all files in the directory.
"""
if not os.path.exists(path):
return
for f in os.listdir(path):
f = os.path.join(path, f)
if os.path.isfile(f):
os.remove(f)
def install_deepspeechmodules(
url_pbmm: Optional[str] = None,
url_scorer: Optional[str] = None,
) -> None:
url_pbmm = url_pbmm or URL_PBMM
url_scorer = url_scorer or URL_SCORER
os.makedirs(MODEL_DIR, exist_ok=True)
clean_dir(MODEL_DIR)
threads = {}
url_pbmm = url_pbmm or URL_PBMM
url_scorer = url_scorer or URL_SCORER
print(
"Downloading and installing the models for the first time. This may take a while."
)
for url in [url_pbmm, url_scorer]:
local_filename = url_to_local_name(url)
t = threading.Thread(target=download_file, args=(url, local_filename))
print(f"Downloading {url} -> {local_filename}")
threads[url] = t
t.daemon = True
t.start()
for url, t in threads.items():
while t.is_alive(): # allows keyboard interrupt.
t.join(0.2)
print(f"Finished downloading {url}")
Path(IS_FINISHED_STAMP).touch()
def install_dependencies_if_necessary() -> str: # pylint: disable=invalid-name
print(f"Model directory is: {MODEL_DIR}")
if not is_models_installed():
install_deepspeechmodules(url_pbmm=URL_PBMM, url_scorer=URL_SCORER)
return MODEL_DIR
if __name__ == "__main__":
install_dependencies_if_necessary()
|
omsagent.py
|
#!/usr/bin/env python
#
# OmsAgentForLinux Extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import pwd
import grp
import re
import sys
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib
import urllib2
import watcherutil
import shutil
from threading import Thread
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# Global Variables
PackagesDirectory = 'packages'
BundleFileName = 'omsagent-1.8.1-256.universal.x64.sh'
GUIDRegex = r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
GUIDOnlyRegex = r'^' + GUIDRegex + '$'
SCOMCertIssuerRegex = r'^[\s]*Issuer:[\s]*CN=SCX-Certificate/title=SCX' + GUIDRegex + ', DC=.*$'
SCOMPort = 1270
PostOnboardingSleepSeconds = 5
InitialRetrySleepSeconds = 30
IsUpgrade = False
# Paths
OMSAdminPath = '/opt/microsoft/omsagent/bin/omsadmin.sh'
OMSAgentServiceScript = '/opt/microsoft/omsagent/bin/service_control'
OMIConfigEditorPath = '/opt/omi/bin/omiconfigeditor'
OMIServerConfPath = '/etc/opt/omi/conf/omiserver.conf'
EtcOMSAgentPath = '/etc/opt/microsoft/omsagent/'
VarOMSAgentPath = '/var/opt/microsoft/omsagent/'
SCOMCertPath = '/etc/opt/microsoft/scx/ssl/scx.pem'
ExtensionStateSubdirectory = 'state'
# Commands
# Always use upgrade - will handle install if scx, omi are not installed or
# upgrade if they are
InstallCommandTemplate = '{0} --upgrade'
UninstallCommandTemplate = '{0} --remove'
WorkspaceCheckCommand = '{0} -l'.format(OMSAdminPath)
OnboardCommandWithOptionalParams = '{0} -w {1} -s {2} {3}'
RestartOMSAgentServiceCommand = '{0} restart'.format(OMSAgentServiceScript)
DisableOMSAgentServiceCommand = '{0} disable'.format(OMSAgentServiceScript)
# Error codes
DPKGLockedErrorCode = 55 #56, temporary as it excludes from SLA
InstallErrorCurlNotInstalled = 55 #64, temporary as it excludes from SLA
EnableErrorOMSReturned403 = 5
EnableErrorOMSReturnedNon200 = 6
EnableErrorResolvingHost = 7
EnableErrorOnboarding = 8
EnableCalledBeforeSuccessfulInstall = 9
UnsupportedOpenSSL = 55 #60, temporary as it excludes from SLA
# OneClick error codes
OneClickErrorCode = 40
ManagedIdentityExtMissingErrorCode = 41
ManagedIdentityExtErrorCode = 42
MetadataAPIErrorCode = 43
OMSServiceOneClickErrorCode = 44
MissingorInvalidParameterErrorCode = 11
UnwantedMultipleConnectionsErrorCode = 10
CannotConnectToOMSErrorCode = 55
UnsupportedOperatingSystem = 51
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# OneClick Constants
ManagedIdentityExtListeningURLPath = '/var/lib/waagent/ManagedIdentity-Settings'
GUIDRegex = '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
OAuthTokenResource = 'https://management.core.windows.net/'
OMSServiceValidationEndpoint = 'https://global.oms.opinsights.azure.com/ManagedIdentityService.svc/Validate'
AutoManagedWorkspaceCreationSleepSeconds = 20
# vmResourceId Metadata Service
VMResourceIDMetadataHost = '169.254.169.254'
VMResourceIDMetadataEndpoint = 'http://{0}/metadata/instance?api-version=2017-12-01'.format(VMResourceIDMetadataHost)
# agent permissions
AgentUser='omsagent'
AgentGroup='omiusers'
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.chmod(ext_log_path, 700)
except:
pass
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('OmsAgentForLinux started to handle.')
global IsUpgrade
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
IsUpgrade = True
elif re.match('^([-/]*)(telemetry)', option):
operation = 'Telemetry'
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', 1, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
exit_code = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code is 1 and operation == 'Install':
message = 'Install failed with exit code 1. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.EnterpriseCloud.' \
'Monitoring.OmsAgentForLinux'
elif exit_code is DPKGLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGLockedErrorCode)
elif exit_code is not 0:
message = '{0} failed with exit code {1}'.format(operation,
exit_code)
except OmsAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = 1
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def stop_telemetry_process():
pids_filepath = os.path.join(os.getcwd(),'omstelemetry.pid')
# kill existing telemetry watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pids in f.readlines():
kill_cmd = "kill " + pids
run_command_and_log(kill_cmd)
run_command_and_log("rm "+pids_filepath)
def start_telemetry_process():
"""
Start telemetry process that performs periodic monitoring activities
:return: None
"""
stop_telemetry_process()
#start telemetry watcher
omsagent_filepath = os.path.join(os.getcwd(),'omsagent.py')
args = ['python', omsagent_filepath, '-telemetry']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
HUtilObject.log('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def telemetry():
pids_filepath = os.path.join(os.getcwd(), 'omstelemetry.pid')
py_pid = os.getpid()
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
watcher = watcherutil.Watcher(HUtilObject.error, HUtilObject.log, log_to_console=True)
watcher_thread = Thread(target = watcher.watch)
self_mon_thread = Thread(target = watcher.monitor_health)
watcher_thread.start()
self_mon_thread.start()
watcher_thread.join()
self_mon_thread.join()
def prepare_update():
"""
Copy / move configuration directory to the backup
"""
# First check if backup directory was previously created for given workspace.
# If it is created with all the files , we need not move the files again.
public_settings, _ = get_settings()
workspaceId = public_settings.get('workspaceId')
etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)
etc_move_path = os.path.join(EtcOMSAgentPath, ExtensionStateSubdirectory, workspaceId)
if (not os.path.isdir(etc_move_path)):
shutil.move(etc_remove_path, etc_move_path)
return 0
def restore_state(workspaceId):
"""
Copy / move state from backup to the expected location.
"""
try:
etc_backup_path = os.path.join(EtcOMSAgentPath, ExtensionStateSubdirectory, workspaceId)
etc_final_path = os.path.join(EtcOMSAgentPath, workspaceId)
if (os.path.isdir(etc_backup_path) and not os.path.isdir(etc_final_path)):
shutil.move(etc_backup_path, etc_final_path)
except Exception as e:
hutil_log_error("Error while restoring the state. Exception : "+traceback.format_exc())
def install():
"""
Ensure that this VM distro and version are supported.
Install the OMSAgent shell bundle, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
exit_if_vm_not_supported('Install')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
workspaceId = public_settings.get('workspaceId')
check_workspace_id(workspaceId)
# Take the backup of the state for given workspace.
restore_state(workspaceId)
# In the case where a SCOM connection is already present, we should not
# create conflicts by installing the OMSAgent packages
stopOnMultipleConnections = public_settings.get('stopOnMultipleConnections')
if (stopOnMultipleConnections is not None
and stopOnMultipleConnections is True):
detect_multiple_connections(workspaceId)
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
cmd = InstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since install can fail due to concurrent package operations
exit_code = run_command_with_retries(cmd, retries = 15,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
return exit_code
def uninstall():
"""
Uninstall the OMSAgent shell bundle.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
global IsUpgrade
os.chmod(bundle_path, 100)
cmd = UninstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since uninstall can fail due to concurrent package operations
exit_code = run_command_with_retries(cmd, retries = 5,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
if IsUpgrade:
IsUpgrade = False
else:
remove_workspace_configuration()
return exit_code
def enable():
"""
Onboard the OMSAgent to the specified OMS workspace.
This includes enabling the OMS process on the VM.
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
exit_if_vm_not_supported('Enable')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
if protected_settings is None:
raise ParameterMissingException('Private configuration must be ' \
'provided')
vmResourceId = protected_settings.get('vmResourceId')
# If vmResourceId is not provided in private settings, get it from metadata API
if vmResourceId is None or not vmResourceId:
vmResourceId = get_vmresourceid_from_metadata()
hutil_log_info('vmResourceId from Metadata API is {0}'.format(vmResourceId))
if vmResourceId is None:
hutil_log_info('This may be a classic VM')
enableAutomaticManagement = public_settings.get('enableAutomaticManagement')
if (enableAutomaticManagement is not None
and enableAutomaticManagement is True):
hutil_log_info('enableAutomaticManagement is set to true; the ' \
'workspace ID and key will be determined by the OMS ' \
'service.')
workspaceInfo = retrieve_managed_workspace(vmResourceId)
if (workspaceInfo is None or 'WorkspaceId' not in workspaceInfo
or 'WorkspaceKey' not in workspaceInfo):
raise OneClickException('Workspace info was not determined')
else:
# Note: do NOT log workspace keys!
hutil_log_info('Managed workspaceInfo has been retrieved')
workspaceId = workspaceInfo['WorkspaceId']
workspaceKey = workspaceInfo['WorkspaceKey']
try:
check_workspace_id_and_key(workspaceId, workspaceKey)
except InvalidParameterError as e:
raise OMSServiceOneClickException('Received invalid ' \
'workspace info: ' \
'{0}'.format(e))
else:
workspaceId = public_settings.get('workspaceId')
workspaceKey = protected_settings.get('workspaceKey')
check_workspace_id_and_key(workspaceId, workspaceKey)
# Check if omsadmin script is available
if not os.path.exists(OMSAdminPath):
log_and_exit('Enable', EnableCalledBeforeSuccessfulInstall,
'OMSAgent onboarding script {0} does not exist. Enable ' \
'cannot be called before install.'.format(OMSAdminPath))
vmResourceIdParam = '-a {0}'.format(vmResourceId)
proxy = protected_settings.get('proxy')
proxyParam = ''
if proxy is not None:
proxyParam = '-p {0}'.format(proxy)
optionalParams = '{0} {1}'.format(proxyParam, vmResourceIdParam)
onboard_cmd = OnboardCommandWithOptionalParams.format(OMSAdminPath,
workspaceId,
workspaceKey,
optionalParams)
hutil_log_info('Handler initiating onboarding.')
exit_code = run_command_with_retries(onboard_cmd, retries = 5,
retry_check = retry_onboarding,
final_check = raise_if_no_internet,
check_error = True, log_cmd = False)
# now ensure the permissions and ownership is set recursively
workspaceId = public_settings.get('workspaceId')
etc_final_path = os.path.join(EtcOMSAgentPath, workspaceId)
if (os.path.isdir(etc_final_path)):
uid = pwd.getpwnam(AgentUser).pw_uid
gid = grp.getgrnam(AgentGroup).gr_gid
os.chown(etc_final_path, uid, gid)
os.chmod(etc_final_path, 0750)
for root, dirs, files in os.walk(etc_final_path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
os.chmod(os.path.join(root, d), 0750)
for f in files:
os.chown(os.path.join(root, f), uid, gid)
os.chmod(os.path.join(root, f), 0640)
if exit_code is 0:
# Create a marker file to denote the workspace that was
# onboarded using the extension. This will allow supporting
# multi-homing through the extension like Windows does
extension_marker_path = os.path.join(EtcOMSAgentPath, workspaceId,
'conf/.azure_extension_marker')
if os.path.exists(extension_marker_path):
hutil_log_info('Extension marker file {0} already ' \
'created'.format(extension_marker_path))
else:
try:
open(extension_marker_path, 'w').close()
hutil_log_info('Created extension marker file ' \
'{0}'.format(extension_marker_path))
except IOError as e:
hutil_log_error('Error creating {0} with error: ' \
'{1}'.format(extension_marker_path, e))
# Sleep to prevent bombarding the processes, then restart all processes
# to resolve any issues with auto-started processes from --upgrade
time.sleep(PostOnboardingSleepSeconds)
run_command_and_log(RestartOMSAgentServiceCommand)
#start telemetry process if enable is successful
start_telemetry_process()
return exit_code
def remove_workspace_configuration():
"""
This is needed to distinguish between extension removal vs extension upgrade.
Its a workaround for waagent upgrade routine calling 'remove' on an old version
before calling 'upgrade' on new extension version issue.
In upgrade case, we need workspace configuration to persist when in
remove case we need all the files be removed.
This method will remove all the files/folders from the workspace path in Etc and Var.
"""
public_settings, _ = get_settings()
workspaceId = public_settings.get('workspaceId')
etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)
var_remove_path = os.path.join(VarOMSAgentPath, workspaceId)
shutil.rmtree(etc_remove_path, True)
shutil.rmtree(var_remove_path, True)
hutil_log_info('Moved oms etc configuration directory and cleaned up var directory')
def get_vmresourceid_from_metadata():
req = urllib2.Request(VMResourceIDMetadataEndpoint)
req.add_header('Metadata', 'True')
try:
response = json.loads(urllib2.urlopen(req).read())
if ('compute' not in response or response['compute'] is None):
return None #classic vm
if response['compute']['vmScaleSetName']:
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachineScaleSets/{2}/virtualMachines/{3}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['vmScaleSetName'],response['compute']['name'])
else:
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['name'])
except urllib2.HTTPError as e:
hutil_log_error('Request to Metadata service URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from Metadata service: ' \
'{0}'.format(e.read()))
return None
except:
hutil_log_error('Unexpected error from Metadata service')
return None
def retrieve_managed_workspace(vm_resource_id):
"""
EnableAutomaticManagement has been set to true; the
ManagedIdentity extension and the VM Resource ID are also
required for the OneClick scenario
Using these and the Metadata API, we will call the OMS service
to determine what workspace ID and key to onboard to
"""
# Check for OneClick scenario requirements:
if not os.path.exists(ManagedIdentityExtListeningURLPath):
raise ManagedIdentityExtMissingException
# Determine the Tenant ID using the Metadata API
tenant_id = get_tenant_id_from_metadata_api(vm_resource_id)
# Retrieve an OAuth token using the ManagedIdentity extension
if tenant_id is not None:
hutil_log_info('Tenant ID from Metadata API is {0}'.format(tenant_id))
access_token = get_access_token(tenant_id, OAuthTokenResource)
else:
return None
# Query OMS service for the workspace info for onboarding
if tenant_id is not None and access_token is not None:
return get_workspace_info_from_oms(vm_resource_id, tenant_id,
access_token)
else:
return None
def disable():
"""
Disable all OMS workspace processes on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
#stop the telemetry process
stop_telemetry_process()
# Check if the service control script is available
if not os.path.exists(OMSAgentServiceScript):
log_and_exit('Disable', 1, 'OMSAgent service control script {0} does' \
'not exist. Disable cannot be called ' \
'before install.'.format(OMSAgentServiceScript))
return 1
exit_code, output = run_command_and_log(DisableOMSAgentServiceCommand)
return exit_code
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
# For update call we will only prepare the update by taking some backup of the state
# since omsagent.py->install() will be called
# everytime upgrade is done due to upgradeMode =
# "UpgradeWithInstall" set in HandlerManifest
'Update' : prepare_update,
'Telemetry' : telemetry
}
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
logFileName = 'extension.log'
if (operation == 'Telemetry'):
logFileName = 'watcher.log'
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def is_vm_supported_for_extension():
"""
Checks if the VM this extension is running on is supported by OMSAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the OMSAgent-for-Linux are allowed to utilize
this VM extension. All other distros will get error code 51
"""
supported_dists = {'redhat' : ['6', '7'], # CentOS
'centos' : ['6', '7'], # CentOS
'red hat' : ['6', '7'], # Oracle, RHEL
'oracle' : ['6', '7'], # Oracle
'debian' : ['8', '9'], # Debian
'ubuntu' : ['14.04', '16.04', '18.04'], # Ubuntu
'suse' : ['12'] #SLES
}
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
vm_dist, vm_ver, vm_id = platform.dist()
vm_supported = False
# Find this VM distribution in the supported list
for supported_dist in supported_dists.keys():
if not vm_dist.lower().startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num is not supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the OMSAgent.
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension()
if not vm_supported:
log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def exit_if_openssl_unavailable(operation):
"""
Check if the openssl commandline interface is available to use
If not, throw error to return UnsupportedOpenSSL error code
"""
exit_code, output = run_get_output('which openssl', True, False)
if exit_code is not 0:
log_and_exit(operation, UnsupportedOpenSSL, 'OpenSSL is not available')
return 0
def check_workspace_id_and_key(workspace_id, workspace_key):
"""
Validate formats of workspace_id and workspace_key
"""
check_workspace_id(workspace_id)
# Validate that workspace_key is of the correct format (base64-encoded)
if workspace_key is None:
raise ParameterMissingException('Workspace key must be provided')
try:
encoded_key = base64.b64encode(base64.b64decode(workspace_key))
if encoded_key != workspace_key:
raise InvalidParameterError('Workspace key is invalid')
except TypeError:
raise InvalidParameterError('Workspace key is invalid')
def check_workspace_id(workspace_id):
"""
Validate that workspace_id matches the GUID regex
"""
if workspace_id is None:
raise ParameterMissingException('Workspace ID must be provided')
search = re.compile(GUIDOnlyRegex, re.M)
if not search.match(workspace_id):
raise InvalidParameterError('Workspace ID is invalid')
def detect_multiple_connections(workspace_id):
"""
If the VM already has a workspace/SCOM configured, then we should
disallow a new connection when stopOnMultipleConnections is used
Throw an exception in these cases:
- The workspace with the given workspace_id has not been onboarded
to the VM, but at least one other workspace has been
- The workspace with the given workspace_id has not been onboarded
to the VM, and the VM is connected to SCOM
If the extension operation is connecting to an already-configured
workspace, it is not a stopping case
"""
other_connection_exists = False
if os.path.exists(OMSAdminPath):
exit_code, output = run_get_output(WorkspaceCheckCommand,
chk_err = False)
if output.strip().lower() != 'no workspace':
for line in output.split('\n'):
if workspace_id in line:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
else:
# Note: if scom workspace dir is created, a line containing
# "Workspace(SCOM Workspace): scom" will be here
# If any other line is here, it may start sending data later
other_connection_exists = True
else:
for dir_name, sub_dirs, files in os.walk(EtcOMSAgentPath):
for sub_dir in sub_dirs:
sub_dir_name = os.path.basename(sub_dir)
workspace_search = re.compile(GUIDOnlyRegex, re.M)
if sub_dir_name == workspace_id:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
elif (workspace_search.match(sub_dir_name)
or sub_dir_name == 'scom'):
other_connection_exists = True
if other_connection_exists:
err_msg = ('This machine is already connected to some other Log ' \
'Analytics workspace, please set ' \
'stopOnMultipleConnections to false in public ' \
'settings or remove this property, so this machine ' \
'can connect to new workspaces, also it means this ' \
'machine will get billed multiple times for each ' \
'workspace it report to. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
# This exception will get caught by the main method
raise UnwantedMultipleConnectionsException(err_msg)
else:
detect_scom_connection()
def detect_scom_connection():
"""
If these two conditions are met, then we can assume the
VM is monitored
by SCOM:
1. SCOMPort is open and omiserver is listening on it
2. scx certificate is signed by SCOM server
To determine it check for existence of below two
conditions:
1. SCOMPort is open and omiserver is listening on it:
/etc/omi/conf/omiserver.conf can be parsed to
determine it.
2. scx certificate is signed by SCOM server: scom cert
is present @ /etc/opt/omi/ssl/omi-host-<hostname>.pem
(/etc/opt/microsoft/scx/ssl/scx.pem is a softlink to
this). If the VM is monitored by SCOM then issuer
field of the certificate will have a value like
CN=SCX-Certificate/title=<GUID>, DC=<SCOM server hostname>
(e.g CN=SCX-Certificate/title=SCX94a1f46d-2ced-4739-9b6a-1f06156ca4ac,
DC=NEB-OM-1502733)
Otherwise, if a scom configuration directory has been
created, we assume SCOM is in use
"""
scom_port_open = None # return when determine this is false
cert_signed_by_scom = False
if os.path.exists(OMSAdminPath):
scom_port_open = detect_scom_using_omsadmin()
if scom_port_open is False:
return
# If omsadmin.sh option is not available, use omiconfigeditor
if (scom_port_open is None and os.path.exists(OMIConfigEditorPath)
and os.path.exists(OMIServerConfPath)):
scom_port_open = detect_scom_using_omiconfigeditor()
if scom_port_open is False:
return
# If omiconfigeditor option is not available, directly parse omiserver.conf
if scom_port_open is None and os.path.exists(OMIServerConfPath):
scom_port_open = detect_scom_using_omiserver_conf()
if scom_port_open is False:
return
if scom_port_open is None:
hutil_log_info('SCOM port could not be determined to be open')
return
# Parse the certificate to determine if SCOM issued it
if os.path.exists(SCOMCertPath):
exit_if_openssl_unavailable('Install')
cert_cmd = 'openssl x509 -in {0} -noout -text'.format(SCOMCertPath)
cert_exit_code, cert_output = run_get_output(cert_cmd, chk_err = False,
log_cmd = False)
if cert_exit_code is 0:
issuer_re = re.compile(SCOMCertIssuerRegex, re.M)
if issuer_re.search(cert_output):
hutil_log_info('SCOM cert exists and is signed by SCOM server')
cert_signed_by_scom = True
else:
hutil_log_info('SCOM cert exists but is not signed by SCOM ' \
'server')
else:
hutil_log_error('Error reading SCOM cert; cert could not be ' \
'determined to be signed by SCOM server')
else:
hutil_log_info('SCOM cert does not exist')
if scom_port_open and cert_signed_by_scom:
err_msg = ('This machine may already be connected to a System ' \
'Center Operations Manager server. Please set ' \
'stopOnMultipleConnections to false in public settings ' \
'or remove this property to allow connection to the Log ' \
'Analytics workspace. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
raise UnwantedMultipleConnectionsException(err_msg)
def detect_scom_using_omsadmin():
"""
This method assumes that OMSAdminPath exists; if packages have not
been installed yet, this may not exist
Returns True if omsadmin.sh indicates that SCOM port is open
"""
omsadmin_cmd = '{0} -o'.format(OMSAdminPath)
exit_code, output = run_get_output(omsadmin_cmd, False, False)
# Guard against older omsadmin.sh versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omsadmin_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omsadmin_cmd))
return False
def detect_scom_using_omiconfigeditor():
"""
This method assumes that the relevant files exist
Returns True if omiconfigeditor indicates that SCOM port is open
"""
omi_cmd = '{0} httpsport -q {1} < {2}'.format(OMIConfigEditorPath,
SCOMPort, OMIServerConfPath)
exit_code, output = run_get_output(omi_cmd, False, False)
# Guard against older omiconfigeditor versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omi_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omi_cmd))
return False
def detect_scom_using_omiserver_conf():
"""
This method assumes that the relevant files exist
Returns True if omiserver.conf indicates that SCOM port is open
"""
with open(OMIServerConfPath, 'r') as omiserver_file:
omiserver_txt = omiserver_file.read()
httpsport_search = r'^[\s]*httpsport[\s]*=(.*)$'
httpsport_re = re.compile(httpsport_search, re.M)
httpsport_matches = httpsport_re.search(omiserver_txt)
if (httpsport_matches is not None and
httpsport_matches.group(1) is not None):
ports = httpsport_matches.group(1)
ports = ports.replace(',', ' ')
ports_list = ports.split(' ')
if str(SCOMPort) in ports_list:
hutil_log_info('SCOM port is listed in ' \
'{0}'.format(OMIServerConfPath))
return True
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
return False
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd, output))
else:
hutil_log_info('Output: \n{0}'.format(output))
return exit_code, output
def run_command_with_retries(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check returns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code
def is_dpkg_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code is not 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
return False
def was_curl_found(exit_code, output):
"""
Returns false if exit_code indicates that curl was not installed; this can
occur when package lists need to be updated, or when some archives are
out-of-date
"""
if exit_code is InstallErrorCurlNotInstalled:
return False
return True
def retry_if_dpkg_locked_or_curl_is_not_found(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
Sometimes curl's dependencies (i.e. libcurl) are not installed; if this
is the case on a VM with apt-get, 'apt-get -f install' should be run
Sometimes curl is not installed and is also not found in the package list;
if this is the case on a VM with apt-get, update the package list
"""
retry_verbosely = False
dpkg_locked = is_dpkg_locked(exit_code, output)
curl_found = was_curl_found(exit_code, output)
apt_get_exit_code, apt_get_output = run_get_output('which apt-get',
chk_err = False,
log_cmd = False)
if dpkg_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
elif (not curl_found and apt_get_exit_code is 0 and
('apt-get -f install' in output
or 'Unmet dependencies' in output.lower())):
hutil_log_info('Installing all dependencies of curl:')
run_command_and_log('apt-get -f install')
return True, 'Retrying command because curl and its dependencies ' \
'needed to be installed', retry_verbosely
elif not curl_found and apt_get_exit_code is 0:
hutil_log_info('Updating package lists to make curl available')
run_command_and_log('apt-get update')
return True, 'Retrying command because package lists needed to be ' \
'updated', retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_locked(exit_code, output):
"""
If dpkg is still locked after the retries, we want to return a specific
error code
"""
dpkg_locked = is_dpkg_locked(exit_code, output)
if dpkg_locked:
exit_code = DPKGLockedErrorCode
return exit_code
def retry_onboarding(exit_code, output):
"""
Retry under any of these conditions:
- If the onboarding request returns 403: this may indicate that the agent
GUID and certificate should be re-generated
- If the onboarding request returns a different non-200 code: the OMS
service may be temporarily unavailable
- If the onboarding curl command returns an unaccounted-for error code,
we should retry with verbose logging
"""
retry_verbosely = False
if exit_code is EnableErrorOMSReturned403:
return True, 'Retrying the onboarding command to attempt generating ' \
'a new agent ID and certificate.', retry_verbosely
elif exit_code is EnableErrorOMSReturnedNon200:
return True, 'Retrying; the OMS service may be temporarily ' \
'unavailable.', retry_verbosely
elif exit_code is EnableErrorOnboarding:
return True, 'Retrying with verbose logging.', True
return False, '', False
def raise_if_no_internet(exit_code, output):
"""
Raise the CannotConnectToOMSException exception if the onboarding
script returns the error code to indicate that the OMS service can't be
resolved
"""
if exit_code is EnableErrorResolvingHost:
raise CannotConnectToOMSException
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if (h_settings.has_key('protectedSettings')
and h_settings.has_key('protectedSettingsCertThumbprint')
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', 1, 'Failed decrypting ' \
'protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output
return exit_code, output.encode('utf-8').strip()
def get_tenant_id_from_metadata_api(vm_resource_id):
"""
Retrieve the Tenant ID using the Metadata API of the VM resource ID
Since we have not authenticated, the Metadata API will throw a 401, but
the headers of the 401 response will contain the tenant ID
"""
tenant_id = None
metadata_endpoint = get_metadata_api_endpoint(vm_resource_id)
metadata_request = urllib2.Request(metadata_endpoint)
try:
# This request should fail with code 401
metadata_response = urllib2.urlopen(metadata_request)
hutil_log_info('Request to Metadata API did not fail as expected; ' \
'attempting to use headers from response to ' \
'determine Tenant ID')
metadata_headers = metadata_response.headers
except urllib2.HTTPError as e:
metadata_headers = e.headers
if metadata_headers is not None and 'WWW-Authenticate' in metadata_headers:
auth_header = metadata_headers['WWW-Authenticate']
auth_header_regex = r'authorization_uri=\"https:\/\/login\.windows\.net/(' + GUIDRegex + ')\"'
auth_header_search = re.compile(auth_header_regex)
auth_header_matches = auth_header_search.search(auth_header)
if not auth_header_matches:
raise MetadataAPIException('The WWW-Authenticate header in the ' \
'response does not contain expected ' \
'authorization_uri format')
else:
tenant_id = auth_header_matches.group(1)
else:
raise MetadataAPIException('Expected information from Metadata API ' \
'is not present')
return tenant_id
def get_metadata_api_endpoint(vm_resource_id):
"""
Extrapolate Metadata API endpoint from VM Resource ID
Example VM resource ID: /subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup/providers/Microsoft.Compute/virtualMachines/lagalbraOCUb16C
Corresponding example endpoint: https://management.azure.com/subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup?api-version=2016-09-01
"""
# Will match for ARM and Classic VMs, Availability Sets, VM Scale Sets
vm_resource_id_regex = r'^\/subscriptions\/(' + GUIDRegex + ')\/' \
'resourceGroups\/([^\/]+)\/providers\/Microsoft' \
'\.(?:Classic){0,1}Compute\/(?:virtualMachines|' \
'availabilitySets|virtualMachineScaleSets)' \
'\/[^\/]+$'
vm_resource_id_search = re.compile(vm_resource_id_regex, re.M)
vm_resource_id_matches = vm_resource_id_search.search(vm_resource_id)
if not vm_resource_id_matches:
raise InvalidParameterError('VM Resource ID is invalid')
else:
subscription_id = vm_resource_id_matches.group(1)
resource_group = vm_resource_id_matches.group(2)
metadata_url = 'https://management.azure.com/subscriptions/{0}' \
'/resourceGroups/{1}'.format(subscription_id,
resource_group)
metadata_data = urllib.urlencode({'api-version' : '2016-09-01'})
metadata_endpoint = '{0}?{1}'.format(metadata_url, metadata_data)
return metadata_endpoint
def get_access_token(tenant_id, resource):
"""
Retrieve an OAuth token by sending an OAuth2 token exchange
request to the local URL that the ManagedIdentity extension is
listening to
"""
# Extract the endpoint that the ManagedIdentity extension is listening on
with open(ManagedIdentityExtListeningURLPath, 'r') as listening_file:
listening_settings_txt = listening_file.read()
try:
listening_settings = json.loads(listening_settings_txt)
listening_url = listening_settings['url']
except:
raise ManagedIdentityExtException('Could not extract listening URL ' \
'from settings file')
# Send an OAuth token exchange request
oauth_data = {'authority' : 'https://login.microsoftonline.com/' \
'{0}'.format(tenant_id),
'resource' : resource
}
oauth_request = urllib2.Request(listening_url + '/oauth2/token',
urllib.urlencode(oauth_data))
oauth_request.add_header('Metadata', 'true')
try:
oauth_response = urllib2.urlopen(oauth_request)
oauth_response_txt = oauth_response.read()
except urllib2.HTTPError as e:
hutil_log_error('Request to ManagedIdentity extension listening URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from ManagedIdentity extension: ' \
'{0}'.format(e.read()))
raise ManagedIdentityExtException('Request to listening URL failed ' \
'with HTTPError {0}'.format(e))
except:
raise ManagedIdentityExtException('Unexpected error from request to ' \
'listening URL')
try:
oauth_response_json = json.loads(oauth_response_txt)
except:
raise ManagedIdentityExtException('Error parsing JSON from ' \
'listening URL response')
if (oauth_response_json is not None
and 'access_token' in oauth_response_json):
return oauth_response_json['access_token']
else:
raise ManagedIdentityExtException('Could not retrieve access token ' \
'in the listening URL response')
def get_workspace_info_from_oms(vm_resource_id, tenant_id, access_token):
"""
Send a request to the OMS service with the VM information to
determine the workspace the OMSAgent should onboard to
"""
oms_data = {'ResourceId' : vm_resource_id,
'TenantId' : tenant_id,
'JwtToken' : access_token
}
oms_request_json = json.dumps(oms_data)
oms_request = urllib2.Request(OMSServiceValidationEndpoint)
oms_request.add_header('Content-Type', 'application/json')
retries = 5
initial_sleep_time = AutoManagedWorkspaceCreationSleepSeconds
sleep_increase_factor = 1
try_count = 0
sleep_time = initial_sleep_time
# Workspace may not be provisioned yet; sleep and retry if
# provisioning has been accepted
while try_count <= retries:
try:
oms_response = urllib2.urlopen(oms_request, oms_request_json)
oms_response_txt = oms_response.read()
except urllib2.HTTPError as e:
hutil_log_error('Request to OMS threw HTTPError: {0}'.format(e))
hutil_log_info('Response from OMS: {0}'.format(e.read()))
raise OMSServiceOneClickException('ValidateMachineIdentity ' \
'request returned an error ' \
'HTTP code: {0}'.format(e))
except:
raise OMSServiceOneClickException('Unexpected error from ' \
'ValidateMachineIdentity ' \
'request')
should_retry = retry_get_workspace_info_from_oms(oms_response)
if not should_retry:
# TESTED
break
elif try_count == retries:
# TESTED
hutil_log_error('Retries for ValidateMachineIdentity request ran ' \
'out: required workspace information cannot be ' \
'extracted')
raise OneClickException('Workspace provisioning did not complete ' \
'within the allotted time')
# TESTED
try_count += 1
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if not oms_response_txt:
raise OMSServiceOneClickException('Body from ValidateMachineIdentity ' \
'response is empty; required ' \
'workspace information cannot be ' \
'extracted')
try:
oms_response_json = json.loads(oms_response_txt)
except:
raise OMSServiceOneClickException('Error parsing JSON from ' \
'ValidateMachineIdentity response')
if (oms_response_json is not None and 'WorkspaceId' in oms_response_json
and 'WorkspaceKey' in oms_response_json):
return oms_response_json
else:
hutil_log_error('Could not retrieve both workspace ID and key from ' \
'the OMS service response {0}; cannot determine ' \
'workspace ID and key'.format(oms_response_json))
raise OMSServiceOneClickException('Required workspace information ' \
'was not found in the ' \
'ValidateMachineIdentity response')
def retry_get_workspace_info_from_oms(oms_response):
"""
Return True to retry if the response from OMS for the
ValidateMachineIdentity request incidates that the request has
been accepted, but the managed workspace is still being
provisioned
"""
try:
oms_response_http_code = oms_response.getcode()
except:
hutil_log_error('Unable to get HTTP code from OMS repsonse')
return False
if (oms_response_http_code is 202 or oms_response_http_code is 204
or oms_response_http_code is 404):
hutil_log_info('Retrying ValidateMachineIdentity OMS request ' \
'because workspace is still being provisioned; HTTP ' \
'code from OMS is {0}'.format(oms_response_http_code))
return True
else:
hutil_log_info('Workspace is provisioned; HTTP code from OMS is ' \
'{0}'.format(oms_response_http_code))
return False
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = 1, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code is 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class OmsAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = 1
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(OmsAgentForLinuxException):
"""
There is a missing parameter for the OmsAgentForLinux Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self)
class InvalidParameterError(OmsAgentForLinuxException):
"""
There is an invalid parameter for the OmsAgentForLinux Extension
ex. Workspace ID does not match GUID regex
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to an invalid parameter: {1}'.format(operation,
self)
class UnwantedMultipleConnectionsException(OmsAgentForLinuxException):
"""
This VM is already connected to a different Log Analytics workspace
and stopOnMultipleConnections is set to true
"""
error_code = UnwantedMultipleConnectionsErrorCode
def get_error_message(self, operation):
return '{0} failed due to multiple connections: {1}'.format(operation,
self)
class CannotConnectToOMSException(OmsAgentForLinuxException):
"""
The OMSAgent cannot connect to the OMS service
"""
error_code = CannotConnectToOMSErrorCode # error code to indicate no internet access
def get_error_message(self, operation):
return 'The agent could not connect to the Microsoft Operations ' \
'Management Suite service. Please check that the system ' \
'either has Internet access, or that a valid HTTP proxy has ' \
'been configured for the agent. Please also check the ' \
'correctness of the workspace ID.'
class OneClickException(OmsAgentForLinuxException):
"""
A generic exception for OneClick-related issues
"""
error_code = OneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue related to the OneClick scenario: ' \
'{0}'.format(self)
class ManagedIdentityExtMissingException(OneClickException):
"""
This extension being present is required for the OneClick scenario
"""
error_code = ManagedIdentityExtMissingErrorCode
def get_error_message(self, operation):
return 'The ManagedIdentity extension is required to be installed ' \
'for Automatic Management to be enabled. Please set ' \
'EnableAutomaticManagement to false in public settings or ' \
'install the ManagedIdentityExtensionForLinux Azure VM ' \
'extension.'
class ManagedIdentityExtException(OneClickException):
"""
Thrown when we encounter an issue with ManagedIdentityExtensionForLinux
"""
error_code = ManagedIdentityExtErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the ManagedIdentity extension: ' \
'{0}'.format(self)
class MetadataAPIException(OneClickException):
"""
Thrown when we encounter an issue with Metadata API
"""
error_code = MetadataAPIErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the Metadata API: {0}'.format(self)
class OMSServiceOneClickException(OneClickException):
"""
Thrown when prerequisites were satisfied but could not retrieve the managed
workspace information from OMS service
"""
error_code = OMSServiceOneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the OMS service: ' \
'{0}'.format(self)
if __name__ == '__main__' :
main()
|
Arlo.py
|
##
# Copyright 2016 Jeffrey D. Walter
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
# 14 Sep 2016, Len Shustek: Added Logout()
# 17 Jul 2017, Andreas Jakl: Port to Python 3 (https://www.andreasjakl.com/using-netgear-arlo-security-cameras-for-periodic-recording/)
import datetime
#import logging
import json
import math
import monotonic
import os
import random
import requests
import signal
import sseclient
import threading
import time
import sys
if sys.version[0] == '2':
import Queue as queue
else:
import queue as queue
#logging.basicConfig(level=logging.DEBUG,format='[%(levelname)s] (%(threadName)-10s) %(message)s',)
class EventStream(object):
def __init__(self, event_handler, ping_handler, args):
self.connected = False
self.registered = False
self.queue = queue.Queue()
self.ping_stop_event = threading.Event()
self.arlo = args[0]
self.ping_handler = ping_handler
event_stream = sseclient.SSEClient('https://arlo.netgear.com/hmsweb/client/subscribe?token='+self.arlo.request.session.headers.get('Authorization'), session=self.arlo.request.session)
self.thread = threading.Thread(name="EventStream", target=event_handler, args=(args[0], event_stream, ))
self.thread.setDaemon(True)
def Get(self, block=True, timeout=None):
if sys.version[0] == '2' and block:
if timeout:
timeout += monotonic.monotonic()
# If timeout is None, then just pick some arbitrarily large # for the timeout value.
else:
timeout = 1000000 + monotonic.monotonic()
while True:
try:
# Allow check for Ctrl-C every second
item = self.queue.get(timeout=min(1, timeout - monotonic.monotonic()))
self.queue.task_done()
return item
except queue.Empty:
if monotonic.monotonic() > timeout:
raise
else:
pass
else:
item = self.queue.get(block=block, timeout=timeout)
self.queue.task_done()
return item
def Start(self):
self.thread.start()
return self
def Connect(self):
self.connected = True
def Disconnect(self):
self.connected = False
self.Unregister()
if self.queue:
self.queue.put(None)
def Register(self):
ping_thread = threading.Thread(name='PingThread', target=self.ping_handler, args=(self.arlo, self.ping_stop_event, ))
ping_thread.setDaemon(True)
ping_thread.start()
self.registered = True
def Unregister(self):
self.ping_stop_event.set()
self.registered = False
class Request(object):
"""HTTP helper class"""
def __init__(self):
self.session = requests.Session()
def _request(self, url, method='GET', params={}, headers={}, stream=False):
if method == 'GET':
r = self.session.get(url, headers=headers, stream=stream)
if stream is True:
return r
elif method == 'PUT':
r = self.session.put(url, json=params, headers=headers)
elif method == 'POST':
r = self.session.post(url, json=params, headers=headers)
r.raise_for_status()
body = r.json()
if body['success'] == True:
if 'data' in body:
return body['data']
else:
raise Exception('Request ({0} {1}) failed'.format(method, url), body)
def get(self, url, headers={}, stream=False):
return self._request(url, 'GET', {}, headers, stream)
def put(self, url, params={}, headers={}):
return self._request(url, 'PUT', params, headers)
def post(self, url, params={}, headers={}):
return self._request(url, 'POST', params, headers)
class Arlo(object):
TRANSID_PREFIX = 'web'
def __init__(self, username, password):
# signals only work in main thread
try:
signal.signal(signal.SIGINT, self.interrupt_handler)
except:
pass
self.event_streams = {}
self.request = None
self.Login(username, password)
def interrupt_handler(self, signum, frame):
print("Caught Ctrl-C, exiting.")
#for basestation_id in self.event_streams:
# self.event_streams[basestation_id].Disconnect()
os._exit(1)
def genTransId(self, trans_type=TRANSID_PREFIX):
def float2hex(f):
MAXHEXADECIMALS = 15
w = f // 1
d = f % 1
# Do the whole:
if w == 0: result = '0'
else: result = ''
while w:
w, r = divmod(w, 16)
r = int(r)
if r > 9: r = chr(r+55)
else: r = str(r)
result = r + result
# And now the part:
if d == 0: return result
result += '.'
count = 0
while d:
d = d * 16
w, d = divmod(d, 1)
w = int(w)
if w > 9: w = chr(w+55)
else: w = str(w)
result += w
count += 1
if count > MAXHEXADECIMALS: break
return result
now = datetime.datetime.today()
return trans_type+"!" + float2hex(random.random() * math.pow(2, 32)).lower() + "!" + str(int((time.mktime(now.timetuple())*1e3 + now.microsecond/1e3)))
##
# This call returns the following:
#{
# "userId":"XXX-XXXXXXX",
# "email":"user@example.com",
# "token":"2_5HicFJMXXXXX-S_7IuK2EqOUHXXXXXXXXXXX1CXKWTThgU18Va_XXXXXX5S00hUafv3PV_if_Bl_rhiFsDHYwhxI3CxlVnR5f3q2XXXXXX-Wnt9F7D82uN1f4cXXXXX-FMUsWF_6tMBqwn6DpzOaIB7ciJrnr2QJyKewbQouGM6",
# "paymentId":"XXXXXXXX",
# "authenticated":1472961381,
# "accountStatus":"registered",
# "serialNumber":"XXXXXXXXXXXXX",
# "countryCode":"US",
# "tocUpdate":false,
# "policyUpdate":false,
# "validEmail":true
#}
##
def Login(self, username, password):
self.username = username
self.password = password
self.request = Request()
body = self.request.post('https://arlo.netgear.com/hmsweb/login/v2', {'email': self.username, 'password': self.password})
headers = {
'DNT':'1',
'Host': 'arlo.netgear.com',
'Referer': 'https://arlo.netgear.com/',
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_2 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B202 NETGEAR/v1 (iOS Vuezone)',
'Authorization': body['token']
}
self.request.session.headers.update(headers)
self.user_id = body['userId']
return body
def Logout(self):
return self.request.put('https://arlo.netgear.com/hmsweb/logout')
##
# Arlo uses the EventStream interface in the browser to do pub/sub style messaging.
# Unfortunately, this appears to be the only way Arlo communicates these messages.
#
# This function makes the initial GET request to /subscribe, which returns the EventStream socket.
# Once we have that socket, the API requires a POST request to /notify with the "subscriptions" resource.
# This call "registers" the device (which should be the basestation) so that events will be sent to the EventStream
# when subsequent calls to /notify are made.
#
# Since this interface is asyncronous, and this is a quick and dirty hack to get this working, I'm using a thread
# to listen to the EventStream. This thread puts events into a queue. Some polling is required (see NotifyAndGetResponse()) because
# the event messages aren't guaranteed to be delivered in any specific order, but I wanted to maintain a synchronous style API.
#
# You generally shouldn't need to call Subscribe() directly, although I'm leaving it "public" for now.
##
def Subscribe(self, basestation):
basestation_id = basestation.get('deviceId')
def Register(self):
if basestation_id in self.event_streams and self.event_streams[basestation_id].connected:
self.Notify(basestation, {"action":"set","resource":"subscriptions/"+self.user_id+"_web","publishResponse":False,"properties":{"devices":[basestation_id]}})
event = self.event_streams[basestation_id].Get(block=True, timeout=120)
if event:
self.event_streams[basestation_id].Register()
return event
def Ping(self, stop_event):
while not stop_event.wait(25.0):
self.Notify(basestation, {"action":"set","resource":"subscriptions/"+self.user_id+"_web","publishResponse":False,"properties":{"devices":[basestation_id]}})
def QueueEvents(self, event_stream):
for event in event_stream:
response = json.loads(event.data)
if basestation_id in self.event_streams:
if self.event_streams[basestation_id].connected:
if response.get('action') == 'logout':
self.event_streams[basestation_id].Disconnect()
else:
self.event_streams[basestation_id].queue.put(response)
elif response.get('status') == 'connected':
self.event_streams[basestation_id].Connect()
if basestation_id not in self.event_streams or not self.event_streams[basestation_id].connected:
self.event_streams[basestation_id] = EventStream(QueueEvents, Ping, args=(self, )).Start()
while not self.event_streams[basestation_id].connected:
time.sleep(1)
if not self.event_streams[basestation_id].registered:
Register(self)
##
# This method stops the EventStream subscription and removes it from the event_stream collection.
##
def Unsubscribe(self, basestation):
basestation_id = basestation.get('deviceId')
if basestation_id in self.event_streams:
if self.event_streams[basestation_id].connected:
self.request.get('https://arlo.netgear.com/hmsweb/client/unsubscribe', 'Unsubscribe')
self.event_stream[basestation_id].Disconnect()
self.event_stream[basestation_id].remove()
##
# The following are examples of the json you would need to pass in the body of the Notify() call to interact with Arlo:
#
###############################################################################################################################
###############################################################################################################################
# NOTE: While you can call Notify() directly, responses from these notify calls are sent to the EventStream (see Subscribe()),
# and so it's better to use the Get/Set methods that are implemented using the NotifyAndGetResponse() method.
###############################################################################################################################
###############################################################################################################################
#
# Set System Mode (Armed, Disarmed) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"modes","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"active":"mode0"}}
# Set System Mode (Calendar) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"schedule","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"active":true}}
# Configure The Schedule (Calendar) - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"schedule","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"schedule":[{"modeId":"mode0","startTime":0},{"modeId":"mode2","startTime":28800000},{"modeId":"mode0","startTime":64800000},{"modeId":"mode0","startTime":86400000},{"modeId":"mode2","startTime":115200000},{"modeId":"mode0","startTime":151200000},{"modeId":"mode0","startTime":172800000},{"modeId":"mode2","startTime":201600000},{"modeId":"mode0","startTime":237600000},{"modeId":"mode0","startTime":259200000},{"modeId":"mode2","startTime":288000000},{"modeId":"mode0","startTime":324000000},{"modeId":"mode0","startTime":345600000},{"modeId":"mode2","startTime":374400000},{"modeId":"mode0","startTime":410400000},{"modeId":"mode0","startTime":432000000},{"modeId":"mode0","startTime":518400000}]}
# Create Mode -
# {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"add","resource":"rules","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"name":"Record video on Camera 1 if Camera 1 detects motion","id":"ruleNew","triggers":[{"type":"pirMotionActive","deviceId":"XXXXXXXXXXXXX","sensitivity":80}],"actions":[{"deviceId":"XXXXXXXXXXXXX","type":"recordVideo","stopCondition":{"type":"timeout","timeout":15}},{"type":"sendEmailAlert","recipients":["__OWNER_EMAIL__"]},{"type":"pushNotification"}]}}
# {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"add","resource":"modes","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"name":"Test","rules":["rule3"]}}
# Delete Mode - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"delete","resource":"modes/mode3","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true}
# Camera Off - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"privacyActive":false}}
# Night Vision On - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"zoom":{"topleftx":0,"toplefty":0,"bottomrightx":1280,"bottomrighty":720},"mirror":true,"flip":true,"nightVisionMode":1,"powerSaveMode":2}}
# Motion Detection Test - {"from":"XXX-XXXXXXX_web","to":"XXXXXXXXXXXXX","action":"set","resource":"cameras/XXXXXXXXXXXXX","transId":"web!XXXXXXXX.XXXXXXXXXXXXXXXXXXXX","publishResponse":true,"properties":{"motionSetupModeEnabled":true,"motionSetupModeSensitivity":80}}
#
# device_id = locations.data.uniqueIds
#
# System Properties: ("resource":"modes")
# active (string) - Mode Selection (mode2 = All Motion On, mode1 = Armed, mode0 = Disarmed, etc.)
#
# System Properties: ("resource":"schedule")
# active (bool) - Mode Selection (true = Calendar)
#
# Camera Properties: ("resource":"cameras/{id}")
# privacyActive (bool) - Camera On/Off
# zoom (topleftx (int), toplefty (int), bottomrightx (int), bottomrighty (int)) - Camera Zoom Level
# mirror (bool) - Mirror Image (left-to-right or right-to-left)
# flip (bool) - Flip Image Vertically
# nightVisionMode (int) - Night Mode Enabled/Disabled (1, 0)
# powerSaveMode (int) - PowerSaver Mode (3 = Best Video, 2 = Optimized, 1 = Best Battery Life)
# motionSetupModeEnabled (bool) - Motion Detection Setup Enabled/Disabled
# motionSetupModeSensitivity (int 0-100) - Motion Detection Sensitivity
##
def Notify(self, basestation, body):
basestation_id = basestation.get('deviceId')
body['transId'] = self.genTransId()
body['from'] = self.user_id+'_web'
body['to'] = basestation_id
self.request.post('https://arlo.netgear.com/hmsweb/users/devices/notify/'+body['to'], body, headers={"xcloudId":basestation.get('xCloudId')})
return body.get('transId')
def NotifyAndGetResponse(self, basestation, body, timeout=120):
basestation_id = basestation.get('deviceId')
self.Subscribe(basestation)
if basestation_id in self.event_streams and self.event_streams[basestation_id].connected and self.event_streams[basestation_id].registered:
transId = self.Notify(basestation, body)
event = self.event_streams[basestation_id].Get(block=True, timeout=timeout)
while self.event_streams[basestation_id].connected and event.get('transId') != transId:
self.event_streams[basestation_id].queue.put(event)
event = self.event_streams[basestation_id].Get(block=True, timeout=timeout)
return event
# Use this method to subscribe to motion events. You must provide a callback function which will get called once per motion event.
#
# The callback function should have the following signature:
# def callback(self, basestation_id, xcloud_id, event)
#
# This is an example of handling a specific event, in reality, you'd probably want to write a callback for HandleEvents()
# that has a big switch statement in it to handle all the various events Arlo produces.
def SubscribeToMotionEvents(self, basestation, callback, timeout=None):
def callbackwrapper(self, basestation, event):
if event.get('properties', {}).get('motionDetected'):
callback(self, basestation, event)
self.HandleEvents(basestation, callbackwrapper, timeout)
# Use this method to subscribe to the event stream and provide a callback that will be called for event event received.
# This function will allow you to potentially write a callback that can handle all of the events received from the event stream.
def HandleEvents(self, basestation, callback, timeout=None):
if not callable(callback):
raise Exception('The callback(self, basestation, event) should be a callable function!')
basestation_id = basestation.get('deviceId')
self.Subscribe(basestation)
if basestation_id in self.event_streams and self.event_streams[basestation_id].connected and self.event_streams[basestation_id].registered:
while basestation_id in self.event_streams and self.event_streams[basestation_id].connected:
event = self.event_streams[basestation_id].Get(block=True, timeout=timeout)
if event:
callback(self, basestation, event)
def GetBaseStationState(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"basestation","publishResponse":False})
def GetCameraState(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"cameras","publishResponse":False})
def GetRules(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"rules","publishResponse":False})
def GetModes(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"modes","publishResponse":False})
def GetCalendar(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"get","resource":"schedule","publishResponse":False})
def SirenOn(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"siren","publishResponse":True,"properties":{"sirenState":"on","duration":300,"volume":8,"pattern":"alarm"}})
def SirenOff(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"siren","publishResponse":True,"properties":{"sirenState":"off","duration":300,"volume":8,"pattern":"alarm"}})
def Arm(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"modes","publishResponse":True,"properties":{"active":"mode1"}})
def Disarm(self, basestation):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"modes","publishResponse":True,"properties":{"active":"mode0"}})
# NOTE: The Arlo API seems to disable calendar mode when switching to other modes, if it's enabled.
# You should probably do the same, although, the UI reflects the switch from calendar mode to say armed mode without explicitly setting calendar mode to inactive.
def Calendar(self, basestation, active=True):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"schedule","publishResponse":True,"properties":{"active":active}})
def CustomMode(self, basestation, mode):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"modes","publishResponse":True,"properties":{"active":mode}})
def DeleteMode(self, basestation, mode):
return self.NotifyAndGetResponse(basestation, {"action":"delete","resource":"modes/"+mode,"publishResponse":True})
# Privacy active = True - Camera is off.
# Privacy active = False - Camera is on.
def ToggleCamera(self, basestation, camera, active=True):
return self.NotifyAndGetResponse(basestation, {"action":"set","resource":"cameras/"+camera.get('deviceId'),"publishResponse":True,"properties":{"privacyActive":active}})
def Reset(self):
return self.request.get('https://arlo.netgear.com/hmsweb/users/library/reset')
def GetServiceLevel(self):
return self.request.get('https://arlo.netgear.com/hmsweb/users/serviceLevel')
def GetPaymentOffers(self):
return self.request.get('https://arlo.netgear.com/hmsweb/users/payment/offers')
def GetProfile(self):
return self.request.get('https://arlo.netgear.com/hmsweb/users/profile')
##
# {"userId":"336-4764296","email":"jeffreydwalter@gmail.com","token":"2_5BtvCDVr5K_KJyGKaq8H61hLybT7D69krsmaZeCG0tvs-yw5vm0Y1LKVVoVI9Id19Fk9vFcGFnMja0z_5eNNqP_BOXIX9rzekS2SgTjz7Ao6mPzGs86_yCBPqfaCZCkr0ogErwffuFIZsvh_XGodqkTehzkfQ4Xl8u1h9FhqDR2z","paymentId":"27432411","accountStatus":"registered","serialNumber":"48935B7SA9847","countryCode":"US","tocUpdate":false,"policyUpdate":false,"validEmail":true,"arlo":true,"dateCreated":1463975008658}
##
def GetSession(self):
return self.request.get('https://arlo.netgear.com/hmsweb/users/session')
def GetFriends(self):
return self.request.get('https://arlo.netgear.com/hmsweb/users/friends')
##
# This call returns the following:
#{
# "id":"XXX-XXXXXXX_20160823042047",
# "name":"Home",
# "ownerId":"XXX-XXXXXXX",
# "longitude":X.XXXXXXXXXXXXXXXX,
# "latitude":X.XXXXXXXXXXXXXXXX,
# "address":"123 Middle Of Nowhere Bumbfuck, EG, 12345",
# "homeMode":"schedule",
# "awayMode":"mode1",
# "geoEnabled":false,
# "geoRadius":150.0,
# "uniqueIds":[
# "XXX-XXXXXXX_XXXXXXXXXXXXX"
# ],
# "smartDevices":[
# "XXXXXXXXXX",
# "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX"
# ],
# "pushNotifyDevices":[
# "XXXXXXXXXX"
# ]
#}
##
def GetLocations(self):
return self.request.get('https://arlo.netgear.com/hmsweb/users/locations')
# Get location_id is the id field from the return of GetLocations()
# NOTE: The Arlo API seems to disable geofencing mode when switching to other modes, if it's enabled.
# You should probably do the same, although, the UI reflects the switch from calendar mode to say armed mode without explicitly setting calendar mode to inactive.
def Geofencing(self, location_id, active=True):
return self.request.put('https://arlo.netgear.com/hmsweb/users/locations/'+location_id, {"geoEnabled":active})
##
# This method returns an array that contains the basestation, cameras, etc. and their metadata.
# If you pass in a valid device type ('basestation', 'camera', etc.), this method will return an array of just those devices that match that type.
##
def GetDevices(self, device_type=None):
devices = self.request.get('https://arlo.netgear.com/hmsweb/users/devices')
if device_type:
return [ device for device in devices if device['deviceType'] == device_type]
return devices
def GetLibraryMetaData(self, from_date, to_date):
return self.request.post('https://arlo.netgear.com/hmsweb/users/library/metadata', {'dateFrom':from_date, 'dateTo':to_date})
def UpdateProfile(self, first_name, last_name):
return self.request.put('https://arlo.netgear.com/hmsweb/users/profile', {'firstName': first_name, 'lastName': last_name})
def UpdatePassword(self, password):
r = self.request.post('https://arlo.netgear.com/hmsweb/users/changePassword', {'currentPassword':self.password,'newPassword':password})
self.password = password
return r
##
# This is an example of the json you would pass in the body to UpdateFriends():
#{
# "firstName":"Some",
# "lastName":"Body",
# "devices":{
# "XXXXXXXXXXXXX":"Camera 1",
# "XXXXXXXXXXXXX":"Camera 2 ",
# "XXXXXXXXXXXXX":"Camera 3"
# },
# "lastModified":1463977440911,
# "adminUser":true,
# "email":"user@example.com",
# "id":"XXX-XXXXXXX"
#}
##
def UpdateFriends(self, body):
return self.request.put('https://arlo.netgear.com/hmsweb/users/friends', body)
def UpdateDeviceName(self, device, name):
return self.request.put('https://arlo.netgear.com/hmsweb/users/devices/renameDevice', {'deviceId':device.get('deviceId'), 'deviceName':name, 'parentId':device.get('parentId')})
##
# This is an example of the json you would pass in the body to UpdateDisplayOrder() of your devices in the UI.
#
# XXXXXXXXXXXXX is the device id of each camera. You can get this from GetDevices().
#{
# "devices":{
# "XXXXXXXXXXXXX":1,
# "XXXXXXXXXXXXX":2,
# "XXXXXXXXXXXXX":3
# }
#}
##
def UpdateDisplayOrder(self, body):
return self.request.post('https://arlo.netgear.com/hmsweb/users/devices/displayOrder', body)
##
# This call returns the following:
# presignedContentUrl is a link to the actual video in Amazon AWS.
# presignedThumbnailUrl is a link to the thumbnail .jpg of the actual video in Amazon AWS.
#
#[
# {
# "mediaDurationSecond": 30,
# "contentType": "video/mp4",
# "name": "XXXXXXXXXXXXX",
# "presignedContentUrl": "https://arlos3-prod-z2.s3.amazonaws.com/XXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXXX/XXX-XXXXXXX/XXXXXXXXXXXXX/recordings/XXXXXXXXXXXXX.mp4?AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX&Expires=1472968703&Signature=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
# "lastModified": 1472881430181,
# "localCreatedDate": XXXXXXXXXXXXX,
# "presignedThumbnailUrl": "https://arlos3-prod-z2.s3.amazonaws.com/XXXXXXX_XXXX_XXXX_XXXX_XXXXXXXXXXXXX/XXX-XXXXXXX/XXXXXXXXXXXXX/recordings/XXXXXXXXXXXXX_thumb.jpg?AWSAccessKeyId=XXXXXXXXXXXXXXXXXXXX&Expires=1472968703&Signature=XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
# "reason": "motionRecord",
# "deviceId": "XXXXXXXXXXXXX",
# "createdBy": "XXXXXXXXXXXXX",
# "createdDate": "20160903",
# "timeZone": "America/Chicago",
# "ownerId": "XXX-XXXXXXX",
# "utcCreatedDate": XXXXXXXXXXXXX,
# "currentState": "new",
# "mediaDuration": "00:00:30"
# }
#]
##
def GetLibrary(self, from_date, to_date):
return self.request.post('https://arlo.netgear.com/hmsweb/users/library', {'dateFrom':from_date, 'dateTo':to_date})
##
# Delete a single video recording from Arlo.
#
# All of the date info and device id you need to pass into this method are given in the results of the GetLibrary() call.
#
##
def DeleteRecording(self, camera, created_date, utc_created_date):
return self.request.post('https://arlo.netgear.com/hmsweb/users/library/recycle', {'data':[{'createdDate':created_date,'utcCreatedDate':utc_created_date,'deviceId':camera.get('deviceId')}]})
##
# Delete a batch of video recordings from Arlo.
#
# The GetLibrary() call response json can be passed directly to this method if you'd like to delete the same list of videos you queried for.
# If you want to delete some other batch of videos, then you need to send an array of objects representing each video you want to delete.
#
#[
# {
# "createdDate":"20160904",
# "utcCreatedDate":1473010280395,
# "deviceId":"XXXXXXXXXXXXX"
# },
# {
# "createdDate":"20160904",
# "utcCreatedDate":1473010280395,
# "deviceId":"XXXXXXXXXXXXX"
# }
#]
##
def BatchDeleteRecordings(self, recording_metadata):
return self.request.post('https://arlo.netgear.com/hmsweb/users/library/recycle', {'data':recording_metadata})
##
# Returns the whole video from the presignedContentUrl.
#
# Obviously, this function is generic and could be used to download anything. :)
##
def GetRecording(self, url, chunk_size=4096):
video = ''
r = requests.get(url, stream=True)
r.raise_for_status()
for chunk in r.iter_content(chunk_size):
if chunk: video += chunk
return video
##
# Returns a generator that is the chunked video stream from the presignedContentUrl.
#
# Obviously, this function is generic and could be used to download anything. :)
##
def StreamRecording(self, url, chunk_size=4096):
r = requests.get(url, stream=True)
r.raise_for_status()
for chunk in r.iter_content(chunk_size):
yield chunk
##
# Writes a video to a given local file path.
# url: presignedContentUrl
# to: path where the file should be written
##
def DownloadRecording(self, url, to):
stream = self.StreamRecording(url)
with open(to, 'w') as f:
for chunk in stream:
# Support both Python 2.7 and 3.
if sys.version[0] == '2':
f.write(chunk)
else:
f.buffer.write(chunk)
f.close()
##
# This function returns the url of the rtsp video stream
# This stream needs to be called within 30 seconds or else it becomes invalid
# It can be streamed via ffmpeg -re -i 'rtsps://<url>' -acodec copy -vcodec copy test.mp4
# The request to /users/devices/startStream returns:
#{ "url":"rtsp://<url>:443/vzmodulelive?egressToken=b<xx>&userAgent=iOS&cameraId=<camid>" }
#
##
def StartStream(self, camera):
stream_url_dict = self.request.post('https://arlo.netgear.com/hmsweb/users/devices/startStream', {"to":camera.get('parentId'),"from":self.user_id+"_web","resource":"cameras/"+camera.get('deviceId'),"action":"set","publishResponse":True,"transId":self.genTransId(),"properties":{"activityState":"startUserStream","cameraId":camera.get('deviceId')}}, headers={"xcloudId":camera.get('xCloudId')})
return stream_url_dict['url'].replace("rtsp://", "rtsps://")
##
# This function causes the camera to record a snapshot.
#
# You can get the timezone from GetDevices().
##
def TakeSnapshot(self, camera):
stream_url = self.StartStream(camera)
self.request.post('https://arlo.netgear.com/hmsweb/users/devices/takeSnapshot', {'xcloudId':camera.get('xCloudId'),'parentId':camera.get('parentId'),'deviceId':camera.get('deviceId'),'olsonTimeZone':camera.get('properties', {}).get('olsonTimeZone')}, headers={"xcloudId":camera.get('xCloudId')})
return stream_url;
##
# This function causes the camera to start recording.
#
# You can get the timezone from GetDevices().
##
def StartRecording(self, camera):
stream_url = self.StartStream(camera)
self.request.post('https://arlo.netgear.com/hmsweb/users/devices/startRecord', {'xcloudId':camera.get('xCloudId'),'parentId':camera.get('parentId'),'deviceId':camera.get('deviceId'),'olsonTimeZone':camera.get('properties', {}).get('olsonTimeZone')}, headers={"xcloudId":camera.get('xCloudId')})
return stream_url
##
# This function causes the camera to stop recording.
#
# You can get the timezone from GetDevices().
##
def StopRecording(self, camera):
return self.request.post('https://arlo.netgear.com/hmsweb/users/devices/stopRecord', {'xcloudId':camera.get('xCloudId'),'parentId':camera.get('parentId'),'deviceId':camera.get('deviceId'),'olsonTimeZone':camera.get('properties', {}).get('olsonTimeZone')}, headers={"xcloudId":camera.get('xCloudId')})
|
ScInit.py
|
import sys, getopt, struct, time, termios, fcntl, sys, os, colorsys, threading, datetime, subprocess, json
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/fbtft')
from RenderManager import RenderManager
from WanemManager import WanemManager
from HttpUtil import HttpUtil
from LogReporter import LogReporter
from ScBase import ScBase
from gfx import Rect
from DataAsset import CTX
from pprint import pprint
class ScInit(ScBase):
def __init__(self, pCTX, pRender, pWanem):
super(ScInit, self).__init__(pCTX, pRender, pWanem)
self.tickCnt = 0
self.tickDuration = 3
self.prevTickCnt = -1
self.stepLabel = [".","..","...","OK","ERR"]
self.worker = None
self.workerRet = 0
self.STATE_CHK_NETWORK = 1
self.STATE_GET_INFO = 2
self.STATE_CHK_EMULATE_DAT = 3
self.STATE_CHK_WIFI_DONGLE = 4
self.STATE_SETUP_AP = 5
self.STATE_CHK_LAN_INTERFACE = 6
def CheckHttpConnectivity(self):
print "-------------------------------"
while HttpUtil.CheckConnectivity(self.pCTX.connectivityCheckUrl, 1) == False:
time.sleep(1)
self.workerRet = 3
print "-------------------------------"
return
def GetApiInfo(self):
apiUrl = self.pCTX.infoApiUrl
savePath = "/tmp/WanemApiInfo.json"
print "-------------------------------"
while HttpUtil.Get(apiUrl, savePath, 1) == False:
time.sleep(1)
file = open(savePath)
dat = json.load(file)
file.close()
#pprint(dat)
self.pCTX.apiStatus = dat["status"]["maintStatus"]
self.workerRet = 3
print "-------------------------------"
return
def CheckWanemDat(self):
print "-------------------------------"
cmd = "php /home/pi/EM-uNetPi/scripts/php/SyncDat.php"
print cmd
ret = False
try:
subprocess.check_call(cmd.strip().split(" "))
ret = True
self.workerRet = 3
except subprocess.CalledProcessError:
ret = False
self.workerRet = 4
print str(ret)
print "-------------------------------"
return
def SetupAP(self):
print "-------------------------------"
cmd = "php /home/pi/EM-uNetPi/scripts/php/UpdateHostapdConf.php wanem-" + self.GetSelfId()
print cmd
ret = False
try:
subprocess.check_call(cmd.strip().split(" "))
ret = True
self.workerRet = 3
except subprocess.CalledProcessError:
ret = False
self.workerRet = 4
print str(ret)
print "-------------------------------"
return
def CheckLanInterface(self):
print "-------------------------------"
cmd = "ifconfig eth2"
ret = False
try:
subprocess.check_call(cmd.strip().split(" "))
self.pCTX.lanMode = self.pCTX.LAN_MODE_HYBRID
ret = True
self.workerRet = 3
except subprocess.CalledProcessError:
self.pCTX.lanMode = self.pCTX.LAN_MODE_WLAN
ret = True
self.workerRet = 3
print str(ret)
print "-------------------------------"
return
def CheckWifiDongle(self):
print "-------------------------------"
cmd = "lsusb -d 0411:0242"
ret = False
try:
subprocess.check_call(cmd.strip().split(" "))
self.pCTX.wifiDongleExist = True
ret = True
self.workerRet = 3
except subprocess.CalledProcessError:
self.pCTX.wifiDongleExist = False
ret = True
self.workerRet = 3
cmd = "cat /etc/wanem/apmode.prop"
try:
currentApMode = int(subprocess.check_output(cmd.strip().split(" ")).replace('\n',''))
except subprocess.CalledProcessError:
currentApMode = 0
# OverWrite to 2.4GHz Mode
if currentApMode == 1 and self.pCTX.wifiDongleExist == False:
cmd = "cp /etc/wanem/tpl/0.prop /etc/wanem/apmode.prop"
try:
subprocess.check_call(cmd.strip().split(" "))
except subprocess.CalledProcessError:
print ("Update Ap Mode Fail")
cmd = "cp /etc/wanem/tpl/2.prop /etc/wanem/apchannel.prop"
try:
subprocess.check_call(cmd.strip().split(" "))
except subprocess.CalledProcessError:
print ("Update Ap Channel Fail")
cmd = "cp /etc/wanem/tpl/raspi-blacklist-5.conf /etc/modprobe.d/raspi-blacklist.conf"
try:
subprocess.check_call(cmd.strip().split(" "))
except subprocess.CalledProcessError:
print ("Update Module Blacklist Fail")
print "WifiDongle Exist : " + str(self.pCTX.wifiDongleExist)
print "-------------------------------"
return
def Start(self):
super(ScInit, self).Start()
##[ INIT STATE ]################################################################
self.state = self.STATE_TERM
self.nextScene = "Menu"
#self.nextScene = "ManualEx"
self.state = self.STATE_CHK_NETWORK
#self.state = self.STATE_CHK_EMULATE_DAT
#self.workerRet = 0
self.worker = threading.Thread(target=self.CheckHttpConnectivity, args=())
#self.worker = threading.Thread(target=self.CheckWanemDat, args=())
self.worker.start()
##[ RENDER ]################################################################
self.pRender.UpdateTitle("Boot - rev : " + self.pCTX.revision)
c = yellow = self.pRender.fb.rgb(255,255,0)
self.pRender.fb.draw.rect(c, Rect(0, 54, self.pRender.xres, 1), 0)
label = "%-18s [ ]" % "CHK NETWORK"
self.pRender.fb.putstr(20, 74 + 32*0, label, self.pRender.W, 2)
label = "%-18s [ ]" % "GET API INFO"
self.pRender.fb.putstr(20, 74 + 32*1, label, self.pRender.W, 2)
label = "%-18s [ ]" % "CHK WIFI DONGLE"
self.pRender.fb.putstr(20, 74 + 32*2, label, self.pRender.W, 2)
label = "%-18s [ ]" % "SETUP AP"
self.pRender.fb.putstr(20, 74 + 32*3, label, self.pRender.W, 2)
label = "%-18s [ ]" % "CHK LAN INTERFACE"
self.pRender.fb.putstr(20, 74 + 32*4, label, self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32*0, " - ", self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32*1, " - ", self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32*2, " - ", self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32*3, " - ", self.pRender.W, 2)
self.pRender.fb.putstr(273, 74 + 32*4, " - ", self.pRender.W, 2)
#self.pRender.fb.draw.rect(self.pRender.W, Rect(271, 74, 40, 16), 0)
return
def Update(self):
if self.pCTX.tick == 1:
self.tickCnt = (self.tickCnt + 1) % self.tickDuration
if self.state == self.STATE_CHK_NETWORK:
if self.worker.isAlive() == False:
self.worker.join();
self.UpdateProgress(0, self.workerRet)
self.state = self.STATE_GET_INFO
self.worker = None
self.workerRet = 0
self.worker = threading.Thread(target=self.GetApiInfo, args=())
self.worker.start()
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(0, self.tickCnt)
elif self.state == self.STATE_GET_INFO:
if self.worker.isAlive() == False:
self.worker.join();
self.UpdateProgress(1, self.workerRet)
self.state = self.STATE_CHK_WIFI_DONGLE
self.worker = None
self.workerRet = 0
self.worker = threading.Thread(target=self.CheckWifiDongle, args=())
self.worker.start()
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(1, self.tickCnt)
elif self.state == self.STATE_CHK_WIFI_DONGLE:
if self.worker.isAlive() == False:
self.worker.join();
self.UpdateProgress(2, self.workerRet)
self.state = self.STATE_SETUP_AP
self.worker = None
self.workerRet = 0
self.worker = threading.Thread(target=self.SetupAP, args=())
self.worker.start()
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(2, self.tickCnt)
elif self.state == self.STATE_SETUP_AP:
if self.worker.isAlive() == False:
self.worker.join();
self.UpdateProgress(3, self.workerRet)
self.state = self.STATE_CHK_LAN_INTERFACE
self.worker = None
self.workerRet = 0
self.worker = threading.Thread(target=self.CheckLanInterface, args=())
self.worker.start()
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(3, self.tickCnt)
elif self.state == self.STATE_CHK_LAN_INTERFACE:
if self.worker.isAlive() == False:
self.worker.join();
self.UpdateProgress(4, self.workerRet)
if self.pCTX.apiStatus == 0:
LogReporter.SendLog(self.pCTX, 1, "StartUp")
self.state = self.STATE_TERM
self.worker = None
print self.pCTX.lanMode
else:
if self.tickCnt != self.prevTickCnt:
self.UpdateProgress(4, self.tickCnt)
self.prevTickCnt = self.tickCnt
return
def UpdateProgress(self, target, step):
if step == 3:
c = self.pRender.G
elif step == 4:
c = self.pRender.R
else:
c = self.pRender.W
self.pRender.fb.draw.rect(self.pRender.N, Rect(271, 74 + 32*target, 40, 16), 0)
self.pRender.fb.putstr(273, 74 + 32*target, self.stepLabel[step], c, 2)
|
run_nvmf.py
|
#!/usr/bin/env python3
from json.decoder import JSONDecodeError
import os
import re
import sys
import argparse
import json
import zipfile
import threading
import subprocess
import itertools
import configparser
import time
import uuid
from collections import OrderedDict
import paramiko
import pandas as pd
import rpc
import rpc.client
from common import *
class Server:
def __init__(self, name, general_config, server_config):
self.name = name
self.username = general_config["username"]
self.password = general_config["password"]
self.transport = general_config["transport"].lower()
self.nic_ips = server_config["nic_ips"]
self.mode = server_config["mode"]
self.irq_scripts_dir = "/usr/src/local/mlnx-tools/ofed_scripts"
if "irq_scripts_dir" in server_config and server_config["irq_scripts_dir"]:
self.irq_scripts_dir = server_config["irq_scripts_dir"]
self.local_nic_info = []
self._nics_json_obj = {}
self.svc_restore_dict = {}
self.sysctl_restore_dict = {}
self.tuned_restore_dict = {}
self.governor_restore = ""
self.tuned_profile = ""
self.enable_adq = False
self.adq_priority = None
if "adq_enable" in server_config and server_config["adq_enable"]:
self.enable_adq = server_config["adq_enable"]
self.adq_priority = 1
if "tuned_profile" in server_config:
self.tuned_profile = server_config["tuned_profile"]
if not re.match("^[A-Za-z0-9]*$", name):
self.log_print("Please use a name which contains only letters or numbers")
sys.exit(1)
def log_print(self, msg):
print("[%s] %s" % (self.name, msg), flush=True)
def get_uncommented_lines(self, lines):
return [line for line in lines if line and not line.startswith('#')]
def get_nic_name_by_ip(self, ip):
if not self._nics_json_obj:
nics_json_obj = self.exec_cmd(["ip", "-j", "address", "show"])
self._nics_json_obj = list(filter(lambda x: x["addr_info"], json.loads(nics_json_obj)))
for nic in self._nics_json_obj:
for addr in nic["addr_info"]:
if ip in addr["local"]:
return nic["ifname"]
def set_local_nic_info_helper(self):
pass
def set_local_nic_info(self, pci_info):
def extract_network_elements(json_obj):
nic_list = []
if isinstance(json_obj, list):
for x in json_obj:
nic_list.extend(extract_network_elements(x))
elif isinstance(json_obj, dict):
if "children" in json_obj:
nic_list.extend(extract_network_elements(json_obj["children"]))
if "class" in json_obj.keys() and "network" in json_obj["class"]:
nic_list.append(json_obj)
return nic_list
self.local_nic_info = extract_network_elements(pci_info)
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
return ""
def configure_system(self):
self.configure_services()
self.configure_sysctl()
self.configure_tuned()
self.configure_cpu_governor()
self.configure_irq_affinity()
def configure_adq(self):
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
self.adq_load_modules()
self.adq_configure_nic()
def adq_load_modules(self):
self.log_print("Modprobing ADQ-related Linux modules...")
adq_module_deps = ["sch_mqprio", "act_mirred", "cls_flower"]
for module in adq_module_deps:
try:
self.exec_cmd(["sudo", "modprobe", module])
self.log_print("%s loaded!" % module)
except CalledProcessError as e:
self.log_print("ERROR: failed to load module %s" % module)
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
def adq_configure_tc(self):
self.log_print("Configuring ADQ Traffic classess and filters...")
if self.mode == "kernel":
self.log_print("WARNING: ADQ setup not yet supported for Kernel mode. Skipping configuration.")
return
num_queues_tc0 = 2 # 2 is minimum number of queues for TC0
num_queues_tc1 = self.num_cores
port_param = "dst_port" if isinstance(self, Target) else "src_port"
port = "4420"
xps_script_path = os.path.join(self.spdk_dir, "scripts", "perf", "nvmf", "set_xps_rxqs")
for nic_ip in self.nic_ips:
nic_name = self.get_nic_name_by_ip(nic_ip)
tc_qdisc_map_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name,
"root", "mqprio", "num_tc", "2", "map", "0", "1",
"queues", "%s@0" % num_queues_tc0,
"%s@%s" % (num_queues_tc1, num_queues_tc0),
"hw", "1", "mode", "channel"]
self.log_print(" ".join(tc_qdisc_map_cmd))
self.exec_cmd(tc_qdisc_map_cmd)
time.sleep(5)
tc_qdisc_ingress_cmd = ["sudo", "tc", "qdisc", "add", "dev", nic_name, "ingress"]
self.log_print(" ".join(tc_qdisc_ingress_cmd))
self.exec_cmd(tc_qdisc_ingress_cmd)
tc_filter_cmd = ["sudo", "tc", "filter", "add", "dev", nic_name,
"protocol", "ip", "ingress", "prio", "1", "flower",
"dst_ip", "%s/32" % nic_ip, "ip_proto", "tcp", port_param, port,
"skip_sw", "hw_tc", "1"]
self.log_print(" ".join(tc_filter_cmd))
self.exec_cmd(tc_filter_cmd)
# show tc configuration
self.log_print("Show tc configuration for %s NIC..." % nic_name)
tc_disk_out = self.exec_cmd(["sudo", "tc", "qdisc", "show", "dev", nic_name])
tc_filter_out = self.exec_cmd(["sudo", "tc", "filter", "show", "dev", nic_name, "ingress"])
self.log_print("%s" % tc_disk_out)
self.log_print("%s" % tc_filter_out)
# Ethtool coalese settings must be applied after configuring traffic classes
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-rx", "off", "rx-usecs", "0"])
self.exec_cmd(["sudo", "ethtool", "--coalesce", nic_name, "adaptive-tx", "off", "tx-usecs", "500"])
self.log_print("Running set_xps_rxqs script for %s NIC..." % nic_name)
xps_cmd = ["sudo", xps_script_path, nic_name]
self.log_print(xps_cmd)
self.exec_cmd(xps_cmd)
def adq_configure_nic(self):
self.log_print("Configuring NIC port settings for ADQ testing...")
# Reload the driver first, to make sure any previous settings are re-set.
try:
self.exec_cmd(["sudo", "rmmod", "ice"])
self.exec_cmd(["sudo", "modprobe", "ice"])
except CalledProcessError as e:
self.log_print("ERROR: failed to reload ice module!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
self.log_print(nic)
try:
self.exec_cmd(["sudo", "ethtool", "-K", nic,
"hw-tc-offload", "on"]) # Enable hardware TC offload
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-inline-flow-director", "on"]) # Enable Intel Flow Director
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic, "fw-lldp-agent", "off"]) # Disable LLDP
# As temporary workaround for ADQ, channel packet inspection optimization is turned on during connection establishment.
# Then turned off before fio ramp_up expires in ethtool_after_fio_ramp().
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-pkt-inspect-optimize", "on"])
except CalledProcessError as e:
self.log_print("ERROR: failed to configure NIC port using ethtool!")
self.log_print("%s resulted in error: %s" % (e.cmd, e.output))
self.log_print("Please update your NIC driver and firmware versions and try again.")
self.log_print(self.exec_cmd(["sudo", "ethtool", "-k", nic]))
self.log_print(self.exec_cmd(["sudo", "ethtool", "--show-priv-flags", nic]))
def configure_services(self):
self.log_print("Configuring active services...")
svc_config = configparser.ConfigParser(strict=False)
# Below list is valid only for RHEL / Fedora systems and might not
# contain valid names for other distributions.
svc_target_state = {
"firewalld": "inactive",
"irqbalance": "inactive",
"lldpad.service": "inactive",
"lldpad.socket": "inactive"
}
for service in svc_target_state:
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
svc_config.read_string(out)
if "LoadError" in svc_config[service] and "not found" in svc_config[service]["LoadError"]:
continue
service_state = svc_config[service]["ActiveState"]
self.log_print("Current state of %s service is %s" % (service, service_state))
self.svc_restore_dict.update({service: service_state})
if service_state != "inactive":
self.log_print("Disabling %s. It will be restored after the test has finished." % service)
self.exec_cmd(["sudo", "systemctl", "stop", service])
def configure_sysctl(self):
self.log_print("Tuning sysctl settings...")
busy_read = 0
if self.enable_adq and self.mode == "spdk":
busy_read = 1
sysctl_opts = {
"net.core.busy_poll": 0,
"net.core.busy_read": busy_read,
"net.core.somaxconn": 4096,
"net.core.netdev_max_backlog": 8192,
"net.ipv4.tcp_max_syn_backlog": 16384,
"net.core.rmem_max": 268435456,
"net.core.wmem_max": 268435456,
"net.ipv4.tcp_mem": "268435456 268435456 268435456",
"net.ipv4.tcp_rmem": "8192 1048576 33554432",
"net.ipv4.tcp_wmem": "8192 1048576 33554432",
"net.ipv4.route.flush": 1,
"vm.overcommit_memory": 1,
}
for opt, value in sysctl_opts.items():
self.sysctl_restore_dict.update({opt: self.exec_cmd(["sysctl", "-n", opt]).strip()})
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def configure_tuned(self):
if not self.tuned_profile:
self.log_print("WARNING: Tuned profile not set in configration file. Skipping configuration.")
return
self.log_print("Configuring tuned-adm profile to %s." % self.tuned_profile)
service = "tuned"
tuned_config = configparser.ConfigParser(strict=False)
out = self.exec_cmd(["sudo", "systemctl", "show", "--no-page", service])
out = "\n".join(["[%s]" % service, out])
tuned_config.read_string(out)
tuned_state = tuned_config[service]["ActiveState"]
self.svc_restore_dict.update({service: tuned_state})
if tuned_state != "inactive":
profile = self.exec_cmd(["cat", "/etc/tuned/active_profile"]).strip()
profile_mode = self.exec_cmd(["cat", "/etc/tuned/profile_mode"]).strip()
self.tuned_restore_dict = {
"profile": profile,
"mode": profile_mode
}
self.exec_cmd(["sudo", "systemctl", "start", service])
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_profile])
self.log_print("Tuned profile set to %s." % self.exec_cmd(["cat", "/etc/tuned/active_profile"]))
def configure_cpu_governor(self):
self.log_print("Setting CPU governor to performance...")
# This assumes that there is the same CPU scaling governor on each CPU
self.governor_restore = self.exec_cmd(["cat", "/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor"]).strip()
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "performance"])
def configure_irq_affinity(self):
self.log_print("Setting NIC irq affinity for NICs...")
irq_script_path = os.path.join(self.irq_scripts_dir, "set_irq_affinity.sh")
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
irq_cmd = ["sudo", irq_script_path, nic]
self.log_print(irq_cmd)
self.exec_cmd(irq_cmd, change_dir=self.irq_scripts_dir)
def restore_services(self):
self.log_print("Restoring services...")
for service, state in self.svc_restore_dict.items():
cmd = "stop" if state == "inactive" else "start"
self.exec_cmd(["sudo", "systemctl", cmd, service])
def restore_sysctl(self):
self.log_print("Restoring sysctl settings...")
for opt, value in self.sysctl_restore_dict.items():
self.log_print(self.exec_cmd(["sudo", "sysctl", "-w", "%s=%s" % (opt, value)]).strip())
def restore_tuned(self):
self.log_print("Restoring tuned-adm settings...")
if not self.tuned_restore_dict:
return
if self.tuned_restore_dict["mode"] == "auto":
self.exec_cmd(["sudo", "tuned-adm", "auto_profile"])
self.log_print("Reverted tuned-adm to auto_profile.")
else:
self.exec_cmd(["sudo", "tuned-adm", "profile", self.tuned_restore_dict["profile"]])
self.log_print("Reverted tuned-adm to %s profile." % self.tuned_restore_dict["profile"])
def restore_governor(self):
self.log_print("Restoring CPU governor setting...")
if self.governor_restore:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", self.governor_restore])
self.log_print("Reverted CPU governor to %s." % self.governor_restore)
class Target(Server):
def __init__(self, name, general_config, target_config):
super(Target, self).__init__(name, general_config, target_config)
# Defaults
self.enable_sar = False
self.sar_delay = 0
self.sar_interval = 0
self.sar_count = 0
self.enable_pcm = False
self.pcm_dir = ""
self.pcm_delay = 0
self.pcm_interval = 0
self.pcm_count = 0
self.enable_bandwidth = 0
self.bandwidth_count = 0
self.enable_dpdk_memory = False
self.dpdk_wait_time = 0
self.enable_zcopy = False
self.scheduler_name = "static"
self.null_block = 0
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
self.subsystem_info_list = []
if "null_block_devices" in target_config:
self.null_block = target_config["null_block_devices"]
if "sar_settings" in target_config:
self.enable_sar, self.sar_delay, self.sar_interval, self.sar_count = target_config["sar_settings"]
if "pcm_settings" in target_config:
self.enable_pcm = True
self.pcm_dir, self.pcm_delay, self.pcm_interval, self.pcm_count = target_config["pcm_settings"]
if "enable_bandwidth" in target_config:
self.enable_bandwidth, self.bandwidth_count = target_config["enable_bandwidth"]
if "enable_dpdk_memory" in target_config:
self.enable_dpdk_memory, self.dpdk_wait_time = target_config["enable_dpdk_memory"]
if "scheduler_settings" in target_config:
self.scheduler_name = target_config["scheduler_settings"]
if "zcopy_settings" in target_config:
self.enable_zcopy = target_config["zcopy_settings"]
if "results_dir" in target_config:
self.results_dir = target_config["results_dir"]
self.script_dir = os.path.dirname(os.path.abspath(sys.argv[0]))
self.spdk_dir = os.path.abspath(os.path.join(self.script_dir, "../../../"))
self.set_local_nic_info(self.set_local_nic_info_helper())
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.zip_spdk_sources(self.spdk_dir, "/tmp/spdk.zip")
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
stderr_opt = None
if stderr_redirect:
stderr_opt = subprocess.STDOUT
if change_dir:
old_cwd = os.getcwd()
os.chdir(change_dir)
self.log_print("Changing directory to %s" % change_dir)
out = check_output(cmd, stderr=stderr_opt).decode(encoding="utf-8")
if change_dir:
os.chdir(old_cwd)
self.log_print("Changing directory to %s" % old_cwd)
return out
def zip_spdk_sources(self, spdk_dir, dest_file):
self.log_print("Zipping SPDK source directory")
fh = zipfile.ZipFile(dest_file, "w", zipfile.ZIP_DEFLATED)
for root, directories, files in os.walk(spdk_dir, followlinks=True):
for file in files:
fh.write(os.path.relpath(os.path.join(root, file)))
fh.close()
self.log_print("Done zipping")
def read_json_stats(self, file):
with open(file, "r") as json_data:
data = json.load(json_data)
job_pos = 0 # job_post = 0 because using aggregated results
# Check if latency is in nano or microseconds to choose correct dict key
def get_lat_unit(key_prefix, dict_section):
# key prefix - lat, clat or slat.
# dict section - portion of json containing latency bucket in question
# Return dict key to access the bucket and unit as string
for k, _ in dict_section.items():
if k.startswith(key_prefix):
return k, k.split("_")[1]
def get_clat_percentiles(clat_dict_leaf):
if "percentile" in clat_dict_leaf:
p99_lat = float(clat_dict_leaf["percentile"]["99.000000"])
p99_9_lat = float(clat_dict_leaf["percentile"]["99.900000"])
p99_99_lat = float(clat_dict_leaf["percentile"]["99.990000"])
p99_999_lat = float(clat_dict_leaf["percentile"]["99.999000"])
return [p99_lat, p99_9_lat, p99_99_lat, p99_999_lat]
else:
# Latest fio versions do not provide "percentile" results if no
# measurements were done, so just return zeroes
return [0, 0, 0, 0]
read_iops = float(data["jobs"][job_pos]["read"]["iops"])
read_bw = float(data["jobs"][job_pos]["read"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["read"])
read_avg_lat = float(data["jobs"][job_pos]["read"][lat_key]["mean"])
read_min_lat = float(data["jobs"][job_pos]["read"][lat_key]["min"])
read_max_lat = float(data["jobs"][job_pos]["read"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["read"])
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["read"][clat_key])
if "ns" in lat_unit:
read_avg_lat, read_min_lat, read_max_lat = [x / 1000 for x in [read_avg_lat, read_min_lat, read_max_lat]]
if "ns" in clat_unit:
read_p99_lat = read_p99_lat / 1000
read_p99_9_lat = read_p99_9_lat / 1000
read_p99_99_lat = read_p99_99_lat / 1000
read_p99_999_lat = read_p99_999_lat / 1000
write_iops = float(data["jobs"][job_pos]["write"]["iops"])
write_bw = float(data["jobs"][job_pos]["write"]["bw"])
lat_key, lat_unit = get_lat_unit("lat", data["jobs"][job_pos]["write"])
write_avg_lat = float(data["jobs"][job_pos]["write"][lat_key]["mean"])
write_min_lat = float(data["jobs"][job_pos]["write"][lat_key]["min"])
write_max_lat = float(data["jobs"][job_pos]["write"][lat_key]["max"])
clat_key, clat_unit = get_lat_unit("clat", data["jobs"][job_pos]["write"])
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat = get_clat_percentiles(
data["jobs"][job_pos]["write"][clat_key])
if "ns" in lat_unit:
write_avg_lat, write_min_lat, write_max_lat = [x / 1000 for x in [write_avg_lat, write_min_lat, write_max_lat]]
if "ns" in clat_unit:
write_p99_lat = write_p99_lat / 1000
write_p99_9_lat = write_p99_9_lat / 1000
write_p99_99_lat = write_p99_99_lat / 1000
write_p99_999_lat = write_p99_999_lat / 1000
return [read_iops, read_bw, read_avg_lat, read_min_lat, read_max_lat,
read_p99_lat, read_p99_9_lat, read_p99_99_lat, read_p99_999_lat,
write_iops, write_bw, write_avg_lat, write_min_lat, write_max_lat,
write_p99_lat, write_p99_9_lat, write_p99_99_lat, write_p99_999_lat]
def parse_results(self, results_dir, csv_file):
files = os.listdir(results_dir)
fio_files = filter(lambda x: ".fio" in x, files)
json_files = [x for x in files if ".json" in x]
headers = ["read_iops", "read_bw", "read_avg_lat_us", "read_min_lat_us", "read_max_lat_us",
"read_p99_lat_us", "read_p99.9_lat_us", "read_p99.99_lat_us", "read_p99.999_lat_us",
"write_iops", "write_bw", "write_avg_lat_us", "write_min_lat_us", "write_max_lat_us",
"write_p99_lat_us", "write_p99.9_lat_us", "write_p99.99_lat_us", "write_p99.999_lat_us"]
aggr_headers = ["iops", "bw", "avg_lat_us", "min_lat_us", "max_lat_us",
"p99_lat_us", "p99.9_lat_us", "p99.99_lat_us", "p99.999_lat_us"]
header_line = ",".join(["Name", *headers])
aggr_header_line = ",".join(["Name", *aggr_headers])
# Create empty results file
with open(os.path.join(results_dir, csv_file), "w") as fh:
fh.write(aggr_header_line + "\n")
rows = set()
for fio_config in fio_files:
self.log_print("Getting FIO stats for %s" % fio_config)
job_name, _ = os.path.splitext(fio_config)
# Look in the filename for rwmixread value. Function arguments do
# not have that information.
# TODO: Improve this function by directly using workload params instead
# of regexing through filenames.
if "read" in job_name:
rw_mixread = 1
elif "write" in job_name:
rw_mixread = 0
else:
rw_mixread = float(re.search(r"m_(\d+)", job_name).group(1)) / 100
# If "_CPU" exists in name - ignore it
# Initiators for the same job could have diffrent num_cores parameter
job_name = re.sub(r"_\d+CPU", "", job_name)
job_result_files = [x for x in json_files if job_name in x]
self.log_print("Matching result files for current fio config:")
for j in job_result_files:
self.log_print("\t %s" % j)
# There may have been more than 1 initiator used in test, need to check that
# Result files are created so that string after last "_" separator is server name
inits_names = set([os.path.splitext(x)[0].split("_")[-1] for x in job_result_files])
inits_avg_results = []
for i in inits_names:
self.log_print("\tGetting stats for initiator %s" % i)
# There may have been more than 1 test run for this job, calculate average results for initiator
i_results = [x for x in job_result_files if i in x]
i_results_filename = re.sub(r"run_\d+_", "", i_results[0].replace("json", "csv"))
separate_stats = []
for r in i_results:
try:
stats = self.read_json_stats(os.path.join(results_dir, r))
separate_stats.append(stats)
self.log_print(stats)
except JSONDecodeError as e:
self.log_print("ERROR: Failed to parse %s results! Results might be incomplete!")
init_results = [sum(x) for x in zip(*separate_stats)]
init_results = [x / len(separate_stats) for x in init_results]
inits_avg_results.append(init_results)
self.log_print("\tAverage results for initiator %s" % i)
self.log_print(init_results)
with open(os.path.join(results_dir, i_results_filename), "w") as fh:
fh.write(header_line + "\n")
fh.write(",".join([job_name, *["{0:.3f}".format(x) for x in init_results]]) + "\n")
# Sum results of all initiators running this FIO job.
# Latency results are an average of latencies from accros all initiators.
inits_avg_results = [sum(x) for x in zip(*inits_avg_results)]
inits_avg_results = OrderedDict(zip(headers, inits_avg_results))
for key in inits_avg_results:
if "lat" in key:
inits_avg_results[key] /= len(inits_names)
# Aggregate separate read/write values into common labels
# Take rw_mixread into consideration for mixed read/write workloads.
aggregate_results = OrderedDict()
for h in aggr_headers:
read_stat, write_stat = [float(value) for key, value in inits_avg_results.items() if h in key]
if "lat" in h:
_ = rw_mixread * read_stat + (1 - rw_mixread) * write_stat
else:
_ = read_stat + write_stat
aggregate_results[h] = "{0:.3f}".format(_)
rows.add(",".join([job_name, *aggregate_results.values()]))
# Save results to file
for row in rows:
with open(os.path.join(results_dir, csv_file), "a") as fh:
fh.write(row + "\n")
self.log_print("You can find the test results in the file %s" % os.path.join(results_dir, csv_file))
def measure_sar(self, results_dir, sar_file_name):
self.log_print("Waiting %d delay before measuring SAR stats" % self.sar_delay)
cpu_number = os.cpu_count()
sar_idle_sum = 0
time.sleep(self.sar_delay)
out = self.exec_cmd(["sar", "-P", "ALL", "%s" % self.sar_interval, "%s" % self.sar_count])
with open(os.path.join(results_dir, sar_file_name), "w") as fh:
for line in out.split("\n"):
if "Average" in line:
if "CPU" in line:
self.log_print("Summary CPU utilization from SAR:")
self.log_print(line)
elif "all" in line:
self.log_print(line)
else:
sar_idle_sum += float(line.split()[7])
fh.write(out)
sar_cpu_usage = cpu_number * 100 - sar_idle_sum
with open(os.path.join(results_dir, sar_file_name), "a") as f:
f.write("Total CPU used: " + str(sar_cpu_usage))
def ethtool_after_fio_ramp(self, fio_ramp_time):
time.sleep(fio_ramp_time//2)
nic_names = [self.get_nic_name_by_ip(n) for n in self.nic_ips]
for nic in nic_names:
self.log_print(nic)
self.exec_cmd(["sudo", "ethtool", "--set-priv-flags", nic,
"channel-pkt-inspect-optimize", "off"]) # Disable channel packet inspection optimization
def measure_pcm_memory(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm-memory.x" % self.pcm_dir, "%s" % self.pcm_interval, "-csv=%s/%s" % (results_dir, pcm_file_name)]
pcm_memory = subprocess.Popen(cmd)
time.sleep(self.pcm_count)
pcm_memory.terminate()
def measure_pcm(self, results_dir, pcm_file_name):
time.sleep(self.pcm_delay)
cmd = ["%s/pcm.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count, "-csv=%s/%s" % (results_dir, pcm_file_name)]
subprocess.run(cmd)
df = pd.read_csv(os.path.join(results_dir, pcm_file_name), header=[0, 1])
df = df.rename(columns=lambda x: re.sub(r'Unnamed:[\w\s]*$', '', x))
skt = df.loc[:, df.columns.get_level_values(1).isin({'UPI0', 'UPI1', 'UPI2'})]
skt_pcm_file_name = "_".join(["skt", pcm_file_name])
skt.to_csv(os.path.join(results_dir, skt_pcm_file_name), index=False)
def measure_pcm_power(self, results_dir, pcm_power_file_name):
time.sleep(self.pcm_delay)
out = self.exec_cmd(["%s/pcm-power.x" % self.pcm_dir, "%s" % self.pcm_interval, "-i=%s" % self.pcm_count])
with open(os.path.join(results_dir, pcm_power_file_name), "w") as fh:
fh.write(out)
def measure_network_bandwidth(self, results_dir, bandwidth_file_name):
self.log_print("INFO: starting network bandwidth measure")
self.exec_cmd(["bwm-ng", "-o", "csv", "-F", "%s/%s" % (results_dir, bandwidth_file_name),
"-a", "1", "-t", "1000", "-c", str(self.bandwidth_count)])
def measure_dpdk_memory(self, results_dir):
self.log_print("INFO: waiting to generate DPDK memory usage")
time.sleep(self.dpdk_wait_time)
self.log_print("INFO: generating DPDK memory usage")
rpc.env.env_dpdk_get_mem_stats
os.rename("/tmp/spdk_mem_dump.txt", "%s/spdk_mem_dump.txt" % (results_dir))
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(os.uname().release)
self.log_print("====Kernel command line:====")
with open('/proc/cmdline') as f:
cmdline = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(cmdline)))
self.log_print("====sysctl conf:====")
with open('/etc/sysctl.conf') as f:
sysctl = f.readlines()
self.log_print('\n'.join(self.get_uncommented_lines(sysctl)))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
self.log_print("====zcopy settings:====")
self.log_print("zcopy enabled: %s" % (self.enable_zcopy))
self.log_print("====Scheduler settings:====")
self.log_print("SPDK scheduler: %s" % (self.scheduler_name))
class Initiator(Server):
def __init__(self, name, general_config, initiator_config):
super(Initiator, self).__init__(name, general_config, initiator_config)
# Required fields
self.ip = initiator_config["ip"]
self.target_nic_ips = initiator_config["target_nic_ips"]
# Defaults
self.cpus_allowed = None
self.cpus_allowed_policy = "shared"
self.spdk_dir = "/tmp/spdk"
self.fio_bin = "/usr/src/fio/fio"
self.nvmecli_bin = "nvme"
self.cpu_frequency = None
self.subsystem_info_list = []
if "spdk_dir" in initiator_config:
self.spdk_dir = initiator_config["spdk_dir"]
if "fio_bin" in initiator_config:
self.fio_bin = initiator_config["fio_bin"]
if "nvmecli_bin" in initiator_config:
self.nvmecli_bin = initiator_config["nvmecli_bin"]
if "cpus_allowed" in initiator_config:
self.cpus_allowed = initiator_config["cpus_allowed"]
if "cpus_allowed_policy" in initiator_config:
self.cpus_allowed_policy = initiator_config["cpus_allowed_policy"]
if "cpu_frequency" in initiator_config:
self.cpu_frequency = initiator_config["cpu_frequency"]
if os.getenv('SPDK_WORKSPACE'):
self.spdk_dir = os.getenv('SPDK_WORKSPACE')
self.ssh_connection = paramiko.SSHClient()
self.ssh_connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.ssh_connection.connect(self.ip, username=self.username, password=self.password)
self.exec_cmd(["sudo", "rm", "-rf", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["mkdir", "-p", "%s" % self.spdk_dir])
self._nics_json_obj = json.loads(self.exec_cmd(["ip", "-j", "address", "show"]))
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.copy_spdk("/tmp/spdk.zip")
self.set_local_nic_info(self.set_local_nic_info_helper())
self.set_cpu_frequency()
self.configure_system()
if self.enable_adq:
self.configure_adq()
self.sys_config()
def set_local_nic_info_helper(self):
return json.loads(self.exec_cmd(["lshw", "-json"]))
def __del__(self):
self.ssh_connection.close()
def exec_cmd(self, cmd, stderr_redirect=False, change_dir=None):
if change_dir:
cmd = ["cd", change_dir, ";", *cmd]
# In case one of the command elements contains whitespace and is not
# already quoted, # (e.g. when calling sysctl) quote it again to prevent expansion
# when sending to remote system.
for i, c in enumerate(cmd):
if (" " in c or "\t" in c) and not (c.startswith("'") and c.endswith("'")):
cmd[i] = '"%s"' % c
cmd = " ".join(cmd)
# Redirect stderr to stdout thanks using get_pty option if needed
_, stdout, _ = self.ssh_connection.exec_command(cmd, get_pty=stderr_redirect)
out = stdout.read().decode(encoding="utf-8")
# Check the return code
rc = stdout.channel.recv_exit_status()
if rc:
raise CalledProcessError(int(rc), cmd, out)
return out
def put_file(self, local, remote_dest):
ftp = self.ssh_connection.open_sftp()
ftp.put(local, remote_dest)
ftp.close()
def get_file(self, remote, local_dest):
ftp = self.ssh_connection.open_sftp()
ftp.get(remote, local_dest)
ftp.close()
def copy_spdk(self, local_spdk_zip):
self.log_print("Copying SPDK sources to initiator %s" % self.name)
self.put_file(local_spdk_zip, "/tmp/spdk_drop.zip")
self.log_print("Copied sources zip from target")
self.exec_cmd(["unzip", "-qo", "/tmp/spdk_drop.zip", "-d", self.spdk_dir])
self.log_print("Sources unpacked")
def copy_result_files(self, dest_dir):
self.log_print("Copying results")
if not os.path.exists(dest_dir):
os.mkdir(dest_dir)
# Get list of result files from initiator and copy them back to target
file_list = self.exec_cmd(["ls", "%s/nvmf_perf" % self.spdk_dir]).strip().split("\n")
for file in file_list:
self.get_file(os.path.join(self.spdk_dir, "nvmf_perf", file),
os.path.join(dest_dir, file))
self.log_print("Done copying results")
def discover_subsystems(self, address_list, subsys_no):
num_nvmes = range(0, subsys_no)
nvme_discover_output = ""
for ip, subsys_no in itertools.product(address_list, num_nvmes):
self.log_print("Trying to discover: %s:%s" % (ip, 4420 + subsys_no))
nvme_discover_cmd = ["sudo",
"%s" % self.nvmecli_bin,
"discover", "-t", "%s" % self.transport,
"-s", "%s" % (4420 + subsys_no),
"-a", "%s" % ip]
try:
stdout = self.exec_cmd(nvme_discover_cmd)
if stdout:
nvme_discover_output = nvme_discover_output + stdout
except CalledProcessError:
# Do nothing. In case of discovering remote subsystems of kernel target
# we expect "nvme discover" to fail a bunch of times because we basically
# scan ports.
pass
subsystems = re.findall(r'trsvcid:\s(\d+)\s+' # get svcid number
r'subnqn:\s+([a-zA-Z0-9\.\-\:]+)\s+' # get NQN id
r'traddr:\s+(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})', # get IP address
nvme_discover_output) # from nvme discovery output
subsystems = filter(lambda x: x[-1] in address_list, subsystems)
subsystems = list(set(subsystems))
subsystems.sort(key=lambda x: x[1])
self.log_print("Found matching subsystems on target side:")
for s in subsystems:
self.log_print(s)
self.subsystem_info_list = subsystems
def gen_fio_filename_conf(self, *args, **kwargs):
# Logic implemented in SPDKInitiator and KernelInitiator classes
pass
def gen_fio_config(self, rw, rwmixread, block_size, io_depth, subsys_no, num_jobs=None, ramp_time=0, run_time=10, rate_iops=0):
fio_conf_template = """
[global]
ioengine={ioengine}
{spdk_conf}
thread=1
group_reporting=1
direct=1
percentile_list=50:90:99:99.5:99.9:99.99:99.999
norandommap=1
rw={rw}
rwmixread={rwmixread}
bs={block_size}
time_based=1
ramp_time={ramp_time}
runtime={run_time}
rate_iops={rate_iops}
"""
if "spdk" in self.mode:
bdev_conf = self.gen_spdk_bdev_conf(self.subsystem_info_list)
self.exec_cmd(["echo", "'%s'" % bdev_conf, ">", "%s/bdev.conf" % self.spdk_dir])
ioengine = "%s/build/fio/spdk_bdev" % self.spdk_dir
spdk_conf = "spdk_json_conf=%s/bdev.conf" % self.spdk_dir
else:
ioengine = self.ioengine
spdk_conf = ""
out = self.exec_cmd(["sudo", "nvme", "list", "|", "grep", "-E", "'SPDK|Linux'",
"|", "awk", "'{print $1}'"])
subsystems = [x for x in out.split("\n") if "nvme" in x]
if self.cpus_allowed is not None:
self.log_print("Limiting FIO workload execution on specific cores %s" % self.cpus_allowed)
cpus_num = 0
cpus = self.cpus_allowed.split(",")
for cpu in cpus:
if "-" in cpu:
a, b = cpu.split("-")
a = int(a)
b = int(b)
cpus_num += len(range(a, b))
else:
cpus_num += 1
self.num_cores = cpus_num
threads = range(0, self.num_cores)
elif hasattr(self, 'num_cores'):
self.log_print("Limiting FIO workload execution to %s cores" % self.num_cores)
threads = range(0, int(self.num_cores))
else:
self.num_cores = len(subsystems)
threads = range(0, len(subsystems))
if "spdk" in self.mode:
filename_section = self.gen_fio_filename_conf(self.subsystem_info_list, threads, io_depth, num_jobs)
else:
filename_section = self.gen_fio_filename_conf(threads, io_depth, num_jobs)
fio_config = fio_conf_template.format(ioengine=ioengine, spdk_conf=spdk_conf,
rw=rw, rwmixread=rwmixread, block_size=block_size,
ramp_time=ramp_time, run_time=run_time, rate_iops=rate_iops)
# TODO: hipri disabled for now, as it causes fio errors:
# io_u error on file /dev/nvme2n1: Operation not supported
# See comment in KernelInitiator class, kernel_init_connect() function
if hasattr(self, "ioengine") and "io_uring" in self.ioengine:
fio_config = fio_config + """
fixedbufs=1
registerfiles=1
#hipri=1
"""
if num_jobs:
fio_config = fio_config + "numjobs=%s \n" % num_jobs
if self.cpus_allowed is not None:
fio_config = fio_config + "cpus_allowed=%s \n" % self.cpus_allowed
fio_config = fio_config + "cpus_allowed_policy=%s \n" % self.cpus_allowed_policy
fio_config = fio_config + filename_section
fio_config_filename = "%s_%s_%s_m_%s" % (block_size, io_depth, rw, rwmixread)
if hasattr(self, "num_cores"):
fio_config_filename += "_%sCPU" % self.num_cores
fio_config_filename += ".fio"
self.exec_cmd(["mkdir", "-p", "%s/nvmf_perf" % self.spdk_dir])
self.exec_cmd(["echo", "'%s'" % fio_config, ">", "%s/nvmf_perf/%s" % (self.spdk_dir, fio_config_filename)])
self.log_print("Created FIO Config:")
self.log_print(fio_config)
return os.path.join(self.spdk_dir, "nvmf_perf", fio_config_filename)
def set_cpu_frequency(self):
if self.cpu_frequency is not None:
try:
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-g", "userspace"], True)
self.exec_cmd(["sudo", "cpupower", "frequency-set", "-f", "%s" % self.cpu_frequency], True)
self.log_print(self.exec_cmd(["sudo", "cpupower", "frequency-info"]))
except Exception:
self.log_print("ERROR: cpu_frequency will not work when intel_pstate is enabled!")
sys.exit()
else:
self.log_print("WARNING: you have disabled intel_pstate and using default cpu governance.")
def run_fio(self, fio_config_file, run_num=None):
job_name, _ = os.path.splitext(fio_config_file)
self.log_print("Starting FIO run for job: %s" % job_name)
self.log_print("Using FIO: %s" % self.fio_bin)
if run_num:
for i in range(1, run_num + 1):
output_filename = job_name + "_run_" + str(i) + "_" + self.name + ".json"
try:
output = self.exec_cmd(["sudo", self.fio_bin, fio_config_file, "--output-format=json",
"--output=%s" % output_filename, "--eta=never"], True)
self.log_print(output)
except subprocess.CalledProcessError as e:
self.log_print("ERROR: Fio process failed!")
self.log_print(e.stdout)
else:
output_filename = job_name + "_" + self.name + ".json"
output = self.exec_cmd(["sudo", self.fio_bin,
fio_config_file, "--output-format=json",
"--output" % output_filename], True)
self.log_print(output)
self.log_print("FIO run finished. Results in: %s" % output_filename)
def sys_config(self):
self.log_print("====Kernel release:====")
self.log_print(self.exec_cmd(["uname", "-r"]))
self.log_print("====Kernel command line:====")
cmdline = self.exec_cmd(["cat", "/proc/cmdline"])
self.log_print('\n'.join(self.get_uncommented_lines(cmdline.splitlines())))
self.log_print("====sysctl conf:====")
sysctl = self.exec_cmd(["cat", "/etc/sysctl.conf"])
self.log_print('\n'.join(self.get_uncommented_lines(sysctl.splitlines())))
self.log_print("====Cpu power info:====")
self.log_print(self.exec_cmd(["cpupower", "frequency-info"]))
class KernelTarget(Target):
def __init__(self, name, general_config, target_config):
super(KernelTarget, self).__init__(name, general_config, target_config)
# Defaults
self.nvmet_bin = "nvmetcli"
if "nvmet_bin" in target_config:
self.nvmet_bin = target_config["nvmet_bin"]
def __del__(self):
nvmet_command(self.nvmet_bin, "clear")
def kernel_tgt_gen_subsystem_conf(self, nvme_list, address_list):
nvmet_cfg = {
"ports": [],
"hosts": [],
"subsystems": [],
}
# Split disks between NIC IP's
disks_per_ip = int(len(nvme_list) / len(address_list))
disk_chunks = [nvme_list[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(address_list))]
subsys_no = 1
port_no = 0
for ip, chunk in zip(address_list, disk_chunks):
for disk in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % subsys_no
nvmet_cfg["subsystems"].append({
"allowed_hosts": [],
"attr": {
"allow_any_host": "1",
"serial": "SPDK00%s" % subsys_no,
"version": "1.3"
},
"namespaces": [
{
"device": {
"path": disk,
"uuid": "%s" % uuid.uuid4()
},
"enable": 1,
"nsid": subsys_no
}
],
"nqn": nqn
})
nvmet_cfg["ports"].append({
"addr": {
"adrfam": "ipv4",
"traddr": ip,
"trsvcid": "%s" % (4420 + port_no),
"trtype": "%s" % self.transport
},
"portid": subsys_no,
"referrals": [],
"subsystems": [nqn]
})
subsys_no += 1
port_no += 1
self.subsystem_info_list.append([port_no, nqn, ip])
with open("kernel.conf", "w") as fh:
fh.write(json.dumps(nvmet_cfg, indent=2))
pass
def tgt_start(self):
self.log_print("Configuring kernel NVMeOF Target")
if self.null_block:
print("Configuring with null block device.")
null_blk_list = ["/dev/nullb{}".format(x) for x in range(self.null_block)]
self.kernel_tgt_gen_subsystem_conf(null_blk_list, self.nic_ips)
self.subsys_no = len(null_blk_list)
else:
print("Configuring with NVMe drives.")
nvme_list = get_nvme_devices()
self.kernel_tgt_gen_subsystem_conf(nvme_list, self.nic_ips)
self.subsys_no = len(nvme_list)
nvmet_command(self.nvmet_bin, "clear")
nvmet_command(self.nvmet_bin, "restore kernel.conf")
if self.enable_adq:
self.adq_configure_tc()
self.log_print("Done configuring kernel NVMeOF Target")
class SPDKTarget(Target):
def __init__(self, name, general_config, target_config):
super(SPDKTarget, self).__init__(name, general_config, target_config)
# Required fields
self.core_mask = target_config["core_mask"]
self.num_cores = self.get_num_cores(self.core_mask)
# Defaults
self.dif_insert_strip = False
self.null_block_dif_type = 0
self.num_shared_buffers = 4096
self.bpf_proc = None
self.bpf_scripts = []
if "num_shared_buffers" in target_config:
self.num_shared_buffers = target_config["num_shared_buffers"]
if "null_block_dif_type" in target_config:
self.null_block_dif_type = target_config["null_block_dif_type"]
if "dif_insert_strip" in target_config:
self.dif_insert_strip = target_config["dif_insert_strip"]
if "bpf_scripts" in target_config:
self.bpf_scripts = target_config["bpf_scripts"]
def get_num_cores(self, core_mask):
if "0x" in core_mask:
return bin(int(core_mask, 16)).count("1")
else:
num_cores = 0
core_mask = core_mask.replace("[", "")
core_mask = core_mask.replace("]", "")
for i in core_mask.split(","):
if "-" in i:
x, y = i.split("-")
num_cores += len(range(int(x), int(y))) + 1
else:
num_cores += 1
return num_cores
def spdk_tgt_configure(self):
self.log_print("Configuring SPDK NVMeOF target via RPC")
if self.enable_adq:
self.adq_configure_tc()
# Create RDMA transport layer
rpc.nvmf.nvmf_create_transport(self.client, trtype=self.transport,
num_shared_buffers=self.num_shared_buffers,
dif_insert_or_strip=self.dif_insert_strip,
sock_priority=self.adq_priority)
self.log_print("SPDK NVMeOF transport layer:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_transports(self.client))
if self.null_block:
self.spdk_tgt_add_nullblock(self.null_block)
self.spdk_tgt_add_subsystem_conf(self.nic_ips, self.null_block)
else:
self.spdk_tgt_add_nvme_conf()
self.spdk_tgt_add_subsystem_conf(self.nic_ips)
self.log_print("Done configuring SPDK NVMeOF Target")
def spdk_tgt_add_nullblock(self, null_block_count):
md_size = 0
block_size = 4096
if self.null_block_dif_type != 0:
md_size = 128
self.log_print("Adding null block bdevices to config via RPC")
for i in range(null_block_count):
self.log_print("Setting bdev protection to :%s" % self.null_block_dif_type)
rpc.bdev.bdev_null_create(self.client, 102400, block_size + md_size, "Nvme{}n1".format(i),
dif_type=self.null_block_dif_type, md_size=md_size)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_nvme_conf(self, req_num_disks=None):
self.log_print("Adding NVMe bdevs to config via RPC")
bdfs = get_nvme_devices_bdf()
bdfs = [b.replace(":", ".") for b in bdfs]
if req_num_disks:
if req_num_disks > len(bdfs):
self.log_print("ERROR: Requested number of disks is more than available %s" % len(bdfs))
sys.exit(1)
else:
bdfs = bdfs[0:req_num_disks]
for i, bdf in enumerate(bdfs):
rpc.bdev.bdev_nvme_attach_controller(self.client, name="Nvme%s" % i, trtype="PCIe", traddr=bdf)
self.log_print("SPDK Bdevs configuration:")
rpc.client.print_dict(rpc.bdev.bdev_get_bdevs(self.client))
def spdk_tgt_add_subsystem_conf(self, ips=None, req_num_disks=None):
self.log_print("Adding subsystems to config")
port = "4420"
if not req_num_disks:
req_num_disks = get_nvme_devices_count()
# Distribute bdevs between provided NICs
num_disks = range(0, req_num_disks)
if len(num_disks) == 1:
disks_per_ip = 1
else:
disks_per_ip = int(len(num_disks) / len(ips))
disk_chunks = [num_disks[i * disks_per_ip:disks_per_ip + disks_per_ip * i] for i in range(0, len(ips))]
# Create subsystems, add bdevs to namespaces, add listeners
for ip, chunk in zip(ips, disk_chunks):
for c in chunk:
nqn = "nqn.2018-09.io.spdk:cnode%s" % c
serial = "SPDK00%s" % c
bdev_name = "Nvme%sn1" % c
rpc.nvmf.nvmf_create_subsystem(self.client, nqn, serial,
allow_any_host=True, max_namespaces=8)
rpc.nvmf.nvmf_subsystem_add_ns(self.client, nqn, bdev_name)
rpc.nvmf.nvmf_subsystem_add_listener(self.client,
nqn=nqn,
trtype=self.transport,
traddr=ip,
trsvcid=port,
adrfam="ipv4")
self.subsystem_info_list.append([port, nqn, ip])
self.log_print("SPDK NVMeOF subsystem configuration:")
rpc.client.print_dict(rpc.nvmf.nvmf_get_subsystems(self.client))
def bpf_start(self):
self.log_print("Starting BPF Trace scripts: %s" % self.bpf_scripts)
bpf_script = os.path.join(self.spdk_dir, "scripts/bpftrace.sh")
bpf_traces = [os.path.join(self.spdk_dir, "scripts/bpf", trace) for trace in self.bpf_scripts]
results_path = os.path.join(self.results_dir, "bpf_traces.txt")
with open(self.pid, "r") as fh:
nvmf_pid = str(fh.readline())
cmd = [bpf_script, nvmf_pid, *bpf_traces]
self.log_print(cmd)
self.bpf_proc = subprocess.Popen(cmd, env={"BPF_OUTFILE": results_path})
def tgt_start(self):
if self.null_block:
self.subsys_no = 1
else:
self.subsys_no = get_nvme_devices_count()
self.log_print("Starting SPDK NVMeOF Target process")
nvmf_app_path = os.path.join(self.spdk_dir, "build/bin/nvmf_tgt")
proc = subprocess.Popen([nvmf_app_path, "--wait-for-rpc", "-m", self.core_mask])
self.pid = os.path.join(self.spdk_dir, "nvmf.pid")
with open(self.pid, "w") as fh:
fh.write(str(proc.pid))
self.nvmf_proc = proc
self.log_print("SPDK NVMeOF Target PID=%s" % self.pid)
self.log_print("Waiting for spdk to initilize...")
while True:
if os.path.exists("/var/tmp/spdk.sock"):
break
time.sleep(1)
self.client = rpc.client.JSONRPCClient("/var/tmp/spdk.sock")
if self.enable_zcopy:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix",
enable_zerocopy_send_server=True)
self.log_print("Target socket options:")
rpc.client.print_dict(rpc.sock.sock_impl_get_options(self.client, impl_name="posix"))
if self.enable_adq:
rpc.sock.sock_impl_set_options(self.client, impl_name="posix", enable_placement_id=1)
rpc.bdev.bdev_nvme_set_options(self.client, timeout_us=0, action_on_timeout=None,
nvme_adminq_poll_period_us=100000, retry_count=4)
rpc.nvmf.nvmf_set_config(self.client, acceptor_poll_rate=10000)
rpc.app.framework_set_scheduler(self.client, name=self.scheduler_name)
rpc.framework_start_init(self.client)
if self.bpf_scripts:
self.bpf_start()
self.spdk_tgt_configure()
def __del__(self):
if self.bpf_proc:
self.log_print("Stopping BPF Trace script")
self.bpf_proc.terminate()
self.bpf_proc.wait()
if hasattr(self, "nvmf_proc"):
try:
self.nvmf_proc.terminate()
self.nvmf_proc.wait()
except Exception as e:
self.log_print(e)
self.nvmf_proc.kill()
self.nvmf_proc.communicate()
class KernelInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super(KernelInitiator, self).__init__(name, general_config, initiator_config)
# Defaults
self.extra_params = ""
self.ioengine = "libaio"
if "extra_params" in initiator_config:
self.extra_params = initiator_config["extra_params"]
if "kernel_engine" in initiator_config:
self.ioengine = initiator_config["kernel_engine"]
if "io_uring" in self.ioengine:
self.extra_params = "--nr-poll-queues=8"
def __del__(self):
self.ssh_connection.close()
def get_connected_nvme_list(self):
json_obj = json.loads(self.exec_cmd(["sudo", "nvme", "list", "-o", "json"]))
nvme_list = [os.path.basename(x["DevicePath"]) for x in json_obj["Devices"]
if "SPDK" in x["ModelNumber"] or "Linux" in x["ModelNumber"]]
return nvme_list
def kernel_init_connect(self):
self.log_print("Below connection attempts may result in error messages, this is expected!")
for subsystem in self.subsystem_info_list:
self.log_print("Trying to connect %s %s %s" % subsystem)
self.exec_cmd(["sudo", self.nvmecli_bin, "connect", "-t", self.transport,
"-s", subsystem[0], "-n", subsystem[1], "-a", subsystem[2], self.extra_params])
time.sleep(2)
if "io_uring" in self.ioengine:
self.log_print("Setting block layer settings for io_uring.")
# TODO: io_poll=1 and io_poll_delay=-1 params not set here, because
# apparently it's not possible for connected subsystems.
# Results in "error: Invalid argument"
block_sysfs_settings = {
"iostats": "0",
"rq_affinity": "0",
"nomerges": "2"
}
for disk in self.get_connected_nvme_list():
sysfs = os.path.join("/sys/block", disk, "queue")
for k, v in block_sysfs_settings.items():
sysfs_opt_path = os.path.join(sysfs, k)
try:
self.exec_cmd(["sudo", "bash", "-c", "echo %s > %s" % (v, sysfs_opt_path)], stderr_redirect=True)
except subprocess.CalledProcessError as e:
self.log_print("Warning: command %s failed due to error %s. %s was not set!" % (e.cmd, e.output, v))
finally:
_ = self.exec_cmd(["sudo", "cat", "%s" % (sysfs_opt_path)])
self.log_print("%s=%s" % (sysfs_opt_path, _))
def kernel_init_disconnect(self):
for subsystem in self.subsystem_info_list:
self.exec_cmd(["sudo", self.nvmecli_bin, "disconnect", "-n", subsystem[1]])
time.sleep(1)
def gen_fio_filename_conf(self, threads, io_depth, num_jobs=1):
nvme_list = [os.path.join("/dev", nvme) for nvme in self.get_connected_nvme_list()]
filename_section = ""
nvme_per_split = int(len(nvme_list) / len(threads))
remainder = len(nvme_list) % len(threads)
iterator = iter(nvme_list)
result = []
for i in range(len(threads)):
result.append([])
for _ in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
class SPDKInitiator(Initiator):
def __init__(self, name, general_config, initiator_config):
super(SPDKInitiator, self).__init__(name, general_config, initiator_config)
if "skip_spdk_install" not in general_config or general_config["skip_spdk_install"] is False:
self.install_spdk()
# Required fields
self.num_cores = initiator_config["num_cores"]
def install_spdk(self):
self.log_print("Using fio binary %s" % self.fio_bin)
self.exec_cmd(["git", "-C", self.spdk_dir, "submodule", "update", "--init"])
self.exec_cmd(["git", "-C", self.spdk_dir, "clean", "-ffdx"])
self.exec_cmd(["cd", self.spdk_dir, "&&", "./configure", "--with-rdma", "--with-fio=%s" % os.path.dirname(self.fio_bin)])
self.exec_cmd(["make", "-C", self.spdk_dir, "clean"])
self.exec_cmd(["make", "-C", self.spdk_dir, "-j$(($(nproc)*2))"])
self.log_print("SPDK built")
self.exec_cmd(["sudo", "%s/scripts/setup.sh" % self.spdk_dir])
def gen_spdk_bdev_conf(self, remote_subsystem_list):
bdev_cfg_section = {
"subsystems": [
{
"subsystem": "bdev",
"config": []
}
]
}
for i, subsys in enumerate(remote_subsystem_list):
sub_port, sub_nqn, sub_addr = map(lambda x: str(x), subsys)
nvme_ctrl = {
"method": "bdev_nvme_attach_controller",
"params": {
"name": "Nvme{}".format(i),
"trtype": self.transport,
"traddr": sub_addr,
"trsvcid": sub_port,
"subnqn": sub_nqn,
"adrfam": "IPv4"
}
}
if self.enable_adq:
nvme_ctrl["params"].update({"priority": "1"})
bdev_cfg_section["subsystems"][0]["config"].append(nvme_ctrl)
return json.dumps(bdev_cfg_section, indent=2)
def gen_fio_filename_conf(self, subsystems, threads, io_depth, num_jobs=1):
filename_section = ""
if len(threads) >= len(subsystems):
threads = range(0, len(subsystems))
filenames = ["Nvme%sn1" % x for x in range(0, len(subsystems))]
nvme_per_split = int(len(subsystems) / len(threads))
remainder = len(subsystems) % len(threads)
iterator = iter(filenames)
result = []
for i in range(len(threads)):
result.append([])
for _ in range(nvme_per_split):
result[i].append(next(iterator))
if remainder:
result[i].append(next(iterator))
remainder -= 1
for i, r in enumerate(result):
header = "[filename%s]" % i
disks = "\n".join(["filename=%s" % x for x in r])
job_section_qd = round((io_depth * len(r)) / num_jobs)
if job_section_qd == 0:
job_section_qd = 1
iodepth = "iodepth=%s" % job_section_qd
filename_section = "\n".join([filename_section, header, disks, iodepth])
return filename_section
if __name__ == "__main__":
script_full_dir = os.path.dirname(os.path.realpath(__file__))
default_config_file_path = os.path.relpath(os.path.join(script_full_dir, "config.json"))
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-c', '--config', type=str, default=default_config_file_path,
help='Configuration file.')
parser.add_argument('-r', '--results', type=str, default='/tmp/results',
help='Results directory.')
parser.add_argument('-s', '--csv-filename', type=str, default='nvmf_results.csv',
help='CSV results filename.')
args = parser.parse_args()
print("Using config file: %s" % args.config)
with open(args.config, "r") as config:
data = json.load(config)
initiators = []
fio_cases = []
general_config = data["general"]
target_config = data["target"]
initiator_configs = [data[x] for x in data.keys() if "initiator" in x]
for k, v in data.items():
if "target" in k:
v.update({"results_dir": args.results})
if data[k]["mode"] == "spdk":
target_obj = SPDKTarget(k, data["general"], v)
elif data[k]["mode"] == "kernel":
target_obj = KernelTarget(k, data["general"], v)
pass
elif "initiator" in k:
if data[k]["mode"] == "spdk":
init_obj = SPDKInitiator(k, data["general"], v)
elif data[k]["mode"] == "kernel":
init_obj = KernelInitiator(k, data["general"], v)
initiators.append(init_obj)
elif "fio" in k:
fio_workloads = itertools.product(data[k]["bs"],
data[k]["qd"],
data[k]["rw"])
fio_run_time = data[k]["run_time"]
fio_ramp_time = data[k]["ramp_time"]
fio_rw_mix_read = data[k]["rwmixread"]
fio_run_num = data[k]["run_num"] if "run_num" in data[k].keys() else None
fio_num_jobs = data[k]["num_jobs"] if "num_jobs" in data[k].keys() else None
fio_rate_iops = 0
if "rate_iops" in data[k]:
fio_rate_iops = data[k]["rate_iops"]
else:
continue
try:
os.mkdir(args.results)
except FileExistsError:
pass
target_obj.tgt_start()
for i in initiators:
i.discover_subsystems(i.target_nic_ips, target_obj.subsys_no)
if i.enable_adq:
i.adq_configure_tc()
# Poor mans threading
# Run FIO tests
for block_size, io_depth, rw in fio_workloads:
threads = []
configs = []
for i in initiators:
if i.mode == "kernel":
i.kernel_init_connect()
cfg = i.gen_fio_config(rw, fio_rw_mix_read, block_size, io_depth, target_obj.subsys_no,
fio_num_jobs, fio_ramp_time, fio_run_time, fio_rate_iops)
configs.append(cfg)
for i, cfg in zip(initiators, configs):
t = threading.Thread(target=i.run_fio, args=(cfg, fio_run_num))
threads.append(t)
if target_obj.enable_sar:
sar_file_name = "_".join([str(block_size), str(rw), str(io_depth), "sar"])
sar_file_name = ".".join([sar_file_name, "txt"])
t = threading.Thread(target=target_obj.measure_sar, args=(args.results, sar_file_name))
threads.append(t)
if target_obj.enable_pcm:
pcm_fnames = ["%s_%s_%s_%s.csv" % (block_size, rw, io_depth, x) for x in ["pcm_cpu", "pcm_memory", "pcm_power"]]
pcm_cpu_t = threading.Thread(target=target_obj.measure_pcm, args=(args.results, pcm_fnames[0],))
pcm_mem_t = threading.Thread(target=target_obj.measure_pcm_memory, args=(args.results, pcm_fnames[1],))
pcm_pow_t = threading.Thread(target=target_obj.measure_pcm_power, args=(args.results, pcm_fnames[2],))
threads.append(pcm_cpu_t)
threads.append(pcm_mem_t)
threads.append(pcm_pow_t)
if target_obj.enable_bandwidth:
bandwidth_file_name = "_".join(["bandwidth", str(block_size), str(rw), str(io_depth)])
bandwidth_file_name = ".".join([bandwidth_file_name, "csv"])
t = threading.Thread(target=target_obj.measure_network_bandwidth, args=(args.results, bandwidth_file_name,))
threads.append(t)
if target_obj.enable_dpdk_memory:
t = threading.Thread(target=target_obj.measure_dpdk_memory, args=(args.results))
threads.append(t)
if target_obj.enable_adq:
ethtool_thread = threading.Thread(target=target_obj.ethtool_after_fio_ramp, args=(fio_ramp_time))
threads.append(ethtool_thread)
for t in threads:
t.start()
for t in threads:
t.join()
for i in initiators:
if i.mode == "kernel":
i.kernel_init_disconnect()
i.copy_result_files(args.results)
target_obj.restore_governor()
target_obj.restore_tuned()
target_obj.restore_services()
target_obj.restore_sysctl()
for i in initiators:
i.restore_governor()
i.restore_tuned()
i.restore_services()
i.restore_sysctl()
target_obj.parse_results(args.results, args.csv_filename)
|
__main__.py
|
import sys
from threading import Thread
from .actions import ActionRegistry
from .backends import BluetoothBackend, HidrawBackend
from .config import load_options
from .daemon import Daemon
from .eventloop import EventLoop
from .exceptions import BackendError
class DS4Controller(object):
def __init__(self, index, options, dynamic=False):
self.index = index
self.dynamic = dynamic
self.logger = Daemon.logger.new_module("controller {0}".format(index))
self.error = None
self.device = None
self.loop = EventLoop()
self.actions = [cls(self) for cls in ActionRegistry.actions]
self.bindings = options.parent.bindings
self.current_profile = "default"
self.default_profile = options
self.options = self.default_profile
self.profiles = options.profiles
self.profile_options = dict(options.parent.profiles)
self.profile_options["default"] = self.default_profile
if self.profiles:
self.profiles.append("default")
self.load_options(self.options)
def fire_event(self, event, *args):
self.loop.fire_event(event, *args)
def load_profile(self, profile):
if profile == self.current_profile:
return
profile_options = self.profile_options.get(profile)
if profile_options:
self.logger.info("Switching to profile: {0}", profile)
self.load_options(profile_options)
self.current_profile = profile
self.fire_event("load-profile", profile)
else:
self.logger.warning("Ignoring invalid profile: {0}", profile)
def next_profile(self):
if not self.profiles:
return
next_index = self.profiles.index(self.current_profile) + 1
if next_index >= len(self.profiles):
next_index = 0
self.load_profile(self.profiles[next_index])
def prev_profile(self):
if not self.profiles:
return
next_index = self.profiles.index(self.current_profile) - 1
if next_index < 0:
next_index = len(self.profiles) - 1
self.load_profile(self.profiles[next_index])
def setup_device(self, device):
self.logger.info("Connected to {0}", device.name)
self.device = device
self.device.set_led(*self.options.led)
self.fire_event("device-setup", device)
self.loop.add_watcher(device.report_fd, self.read_report)
self.load_options(self.options)
def cleanup_device(self):
self.logger.info("Disconnected")
self.fire_event("device-cleanup")
self.loop.remove_watcher(self.device.report_fd)
self.device.close()
self.device = None
if self.dynamic:
self.loop.stop()
def load_options(self, options):
self.fire_event("load-options", options)
self.options = options
def read_report(self):
report = self.device.read_report()
if not report:
if report is False:
return
self.cleanup_device()
return
self.fire_event("device-report", report)
def run(self):
self.loop.run()
def exit(self, *args):
if self.device:
self.cleanup_device()
self.logger.error(*args)
self.error = True
def create_controller_thread(index, controller_options, dynamic=False):
controller = DS4Controller(index, controller_options, dynamic=dynamic)
thread = Thread(target=controller.run)
thread.daemon = True
thread.controller = controller
thread.start()
return thread
def main():
try:
options = load_options()
except ValueError as err:
Daemon.exit("Failed to parse options: {0}", err)
if options.hidraw:
backend = HidrawBackend(Daemon.logger)
else:
backend = BluetoothBackend(Daemon.logger)
try:
backend.setup()
except BackendError as err:
Daemon.exit(err)
if options.daemon:
Daemon.fork(options.daemon_log, options.daemon_pid)
threads = []
for index, controller_options in enumerate(options.controllers):
thread = create_controller_thread(index + 1, controller_options)
threads.append(thread)
for device in backend.devices:
connected_devices = []
for thread in threads:
# Controller has received a fatal error, exit
if thread.controller.error:
sys.exit(1)
if thread.controller.device:
connected_devices.append(thread.controller.device.device_addr)
# Clean up dynamic threads
if not thread.is_alive():
threads.remove(thread)
if device.device_addr in connected_devices:
backend.logger.warning("Ignoring already connected device: {0}",
device.device_addr)
continue
for thread in filter(lambda t: not t.controller.device, threads):
break
else:
thread = create_controller_thread(len(threads) + 1,
options.default_controller,
dynamic=True)
threads.append(thread)
thread.controller.setup_device(device)
if __name__ == "__main__":
main()
|
actor_factory.py
|
#!/usr/bin/env python
#
# Copyright (c) 2020 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
import time
from threading import Thread, Lock
import itertools
from enum import Enum
try:
import queue
except ImportError:
import Queue as queue
from carla_ros_bridge.actor import Actor
from carla_ros_bridge.spectator import Spectator
from carla_ros_bridge.traffic import Traffic, TrafficLight
from carla_ros_bridge.vehicle import Vehicle
from carla_ros_bridge.lidar import Lidar, SemanticLidar
from carla_ros_bridge.radar import Radar
from carla_ros_bridge.gnss import Gnss
from carla_ros_bridge.pseudo_actor import PseudoActor
from carla_ros_bridge.imu import ImuSensor
from carla_ros_bridge.ego_vehicle import EgoVehicle
from carla_ros_bridge.collision_sensor import CollisionSensor
from carla_ros_bridge.lane_invasion_sensor import LaneInvasionSensor
from carla_ros_bridge.camera import Camera, RgbCamera, DepthCamera, SemanticSegmentationCamera, DVSCamera
from carla_ros_bridge.object_sensor import ObjectSensor
from carla_ros_bridge.rss_sensor import RssSensor
from carla_ros_bridge.walker import Walker
from carla_ros_bridge.traffic_lights_sensor import TrafficLightsSensor
from carla_ros_bridge.odom_sensor import OdometrySensor
from carla_ros_bridge.speedometer_sensor import SpeedometerSensor
from carla_ros_bridge.tf_sensor import TFSensor
from carla_ros_bridge.marker_sensor import MarkerSensor
from carla_ros_bridge.actor_list_sensor import ActorListSensor
from carla_ros_bridge.opendrive_sensor import OpenDriveSensor
from carla_ros_bridge.actor_control import ActorControl
from carla_ros_bridge.sensor import Sensor
import carla_common.transforms as trans
import carla
import numpy as np
# to generate a random spawning position or vehicles
import random
secure_random = random.SystemRandom()
class ActorFactory(object):
TIME_BETWEEN_UPDATES = 0.1
class TaskType(Enum):
SPAWN_PSEUDO_ACTOR = 0
DESTROY_ACTOR = 1
SYNC = 2
def __init__(self, node, world, sync_mode=False):
self.node = node
self.world = world
self.blueprint_lib = self.world.get_blueprint_library()
self.spawn_points = self.world.get_map().get_spawn_points()
self.sync_mode = sync_mode
self._previous_actor_ids = []
self.actors = {}
self._task_queue = queue.Queue()
self._known_actor_ids = [] # used to immediately reply to spawn_actor/destroy_actor calls
self.lock = Lock()
self.spawn_lock = Lock()
# id generator for pseudo sensors
self.id_gen = itertools.count(10000)
self.thread = Thread(target=self._update_thread)
def start(self):
# create initially existing actors
self.update_available_objects()
self.thread.start()
def _update_thread(self):
"""
execution loop for async mode actor discovery
"""
while not self.node.shutdown.is_set():
time.sleep(ActorFactory.TIME_BETWEEN_UPDATES)
self.world.wait_for_tick()
self.update_available_objects()
def update_available_objects(self):
"""
update the available actors
"""
# get only carla actors
previous_actors = self._previous_actor_ids
current_actors = [x.id for x in self.world.get_actors()]
self._previous_actor_ids = current_actors
new_actors = [x for x in current_actors if x not in previous_actors]
deleted_actors = [x for x in previous_actors if x not in current_actors]
# Actual creation/removal of objects
self.lock.acquire()
for actor_id in new_actors:
carla_actor = self.world.get_actor(actor_id)
self._create_object_from_actor(carla_actor)
for actor_id in deleted_actors:
self._destroy_object(actor_id, delete_actor=False)
# update objects for pseudo actors here as they might have an carla actor as parent ######
with self.spawn_lock:
while not self._task_queue.empty():
task = self._task_queue.get()
if task[0] == ActorFactory.TaskType.SPAWN_PSEUDO_ACTOR and not self.node.shutdown.is_set():
pseudo_object = task[1]
self._create_object(pseudo_object[0], pseudo_object[1].type, pseudo_object[1].id,
pseudo_object[1].attach_to, pseudo_object[1].transform)
elif task[0] == ActorFactory.TaskType.DESTROY_ACTOR:
actor_id = task[1]
self._destroy_object(actor_id, delete_actor=True)
elif task[0] == ActorFactory.TaskType.SYNC and not self.node.shutdown.is_set():
break
self.lock.release()
def update_actor_states(self, frame_id, timestamp):
"""
update the state of all known actors
"""
with self.lock:
for actor_id in self.actors:
try:
self.actors[actor_id].update(frame_id, timestamp)
except RuntimeError as e:
self.node.logwarn("Update actor {}({}) failed: {}".format(
self.actors[actor_id].__class__.__name__, actor_id, e))
continue
def clear(self):
for _, actor in self.actors.items():
actor.destroy()
self.actors.clear()
def spawn_actor(self, req):
"""
spawns an object
No object instances are created here. Instead carla-actors are created,
and pseudo objects are appended to a list to get created later.
"""
with self.spawn_lock:
if "pseudo" in req.type:
# only allow spawning pseudo objects if parent actor already exists in carla
if req.attach_to != 0:
carla_actor = self.world.get_actor(req.attach_to)
if carla_actor is None:
raise IndexError("Parent actor {} not found".format(req.attach_to))
id_ = next(self.id_gen)
self._task_queue.put((ActorFactory.TaskType.SPAWN_PSEUDO_ACTOR, (id_, req)))
else:
id_ = self._spawn_carla_actor(req)
self._task_queue.put((ActorFactory.TaskType.SYNC, None))
self._known_actor_ids.append(id_)
return id_
def destroy_actor(self, uid):
with self.spawn_lock:
objects_to_destroy = set(self._destroy_actor(uid))
for obj in objects_to_destroy:
self._task_queue.put((ActorFactory.TaskType.DESTROY_ACTOR, obj))
return objects_to_destroy
def _destroy_actor(self, uid):
objects_to_destroy = []
if uid in self._known_actor_ids:
objects_to_destroy.append(uid)
self._known_actor_ids.remove(uid)
# remove actors that have the actor to be removed as parent.
for actor in list(self.actors.values()):
if actor.parent is not None and actor.parent.uid == uid:
objects_to_destroy.extend(self._destroy_actor(actor.uid))
return objects_to_destroy
def _spawn_carla_actor(self, req):
"""
spawns an actor in carla
"""
if "*" in req.type:
blueprint = secure_random.choice(
self.blueprint_lib.filter(req.type))
else:
blueprint = self.blueprint_lib.find(req.type)
blueprint.set_attribute('role_name', req.id)
for attribute in req.attributes:
blueprint.set_attribute(attribute.key, attribute.value)
if req.random_pose is False:
transform = trans.ros_pose_to_carla_transform(req.transform)
else:
# get a random pose
transform = secure_random.choice(
self.spawn_points) if self.spawn_points else carla.Transform()
attach_to = None
if req.attach_to != 0:
attach_to = self.world.get_actor(req.attach_to)
if attach_to is None:
raise IndexError("Parent actor {} not found".format(req.attach_to))
carla_actor = self.world.spawn_actor(blueprint, transform, attach_to)
return carla_actor.id
def _create_object_from_actor(self, carla_actor):
"""
create a object for a given carla actor
Creates also the object for its parent, if not yet existing
"""
parent = None
relative_transform = None
if carla_actor.parent:
if carla_actor.parent.id in self.actors:
parent = self.actors[carla_actor.parent.id]
else:
parent = self._create_object_from_actor(carla_actor.parent)
# calculate relative transform
actor_transform_matrix = trans.ros_pose_to_transform_matrix(
trans.carla_transform_to_ros_pose(carla_actor.get_transform()))
parent_transform_matrix = trans.ros_pose_to_transform_matrix(
trans.carla_transform_to_ros_pose(carla_actor.parent.get_transform()))
relative_transform_matrix = np.matrix(
parent_transform_matrix).getI() * np.matrix(actor_transform_matrix)
relative_transform = trans.transform_matrix_to_ros_pose(relative_transform_matrix)
parent_id = 0
if parent is not None:
parent_id = parent.uid
name = carla_actor.attributes.get("role_name", "")
if not name:
name = str(carla_actor.id)
obj = self._create_object(carla_actor.id, carla_actor.type_id, name,
parent_id, relative_transform, carla_actor)
return obj
def _destroy_object(self, actor_id, delete_actor):
if actor_id not in self.actors:
return
actor = self.actors[actor_id]
del self.actors[actor_id]
carla_actor = None
if isinstance(actor, Actor):
carla_actor = actor.carla_actor
actor.destroy()
if carla_actor and delete_actor:
carla_actor.destroy()
self.node.loginfo("Removed {}(id={})".format(actor.__class__.__name__, actor.uid))
def get_pseudo_sensor_types(self):
pseudo_sensors = []
for cls in PseudoActor.__subclasses__():
if cls.__name__ != "Actor":
pseudo_sensors.append(cls.get_blueprint_name())
return pseudo_sensors
def _create_object(self, uid, type_id, name, attach_to, spawn_pose, carla_actor=None):
# check that the actor is not already created.
if carla_actor is not None and carla_actor.id in self.actors:
return None
if attach_to != 0:
if attach_to not in self.actors:
raise IndexError("Parent object {} not found".format(attach_to))
parent = self.actors[attach_to]
else:
parent = None
if type_id == TFSensor.get_blueprint_name():
actor = TFSensor(uid=uid, name=name, parent=parent, node=self.node)
elif type_id == OdometrySensor.get_blueprint_name():
actor = OdometrySensor(uid=uid,
name=name,
parent=parent,
node=self.node)
elif type_id == SpeedometerSensor.get_blueprint_name():
actor = SpeedometerSensor(uid=uid,
name=name,
parent=parent,
node=self.node)
elif type_id == MarkerSensor.get_blueprint_name():
actor = MarkerSensor(uid=uid,
name=name,
parent=parent,
node=self.node,
actor_list=self.actors)
elif type_id == ActorListSensor.get_blueprint_name():
actor = ActorListSensor(uid=uid,
name=name,
parent=parent,
node=self.node,
actor_list=self.actors)
elif type_id == ObjectSensor.get_blueprint_name():
actor = ObjectSensor(
uid=uid,
name=name,
parent=parent,
node=self.node,
actor_list=self.actors,
)
elif type_id == TrafficLightsSensor.get_blueprint_name():
actor = TrafficLightsSensor(
uid=uid,
name=name,
parent=parent,
node=self.node,
actor_list=self.actors,
)
elif type_id == OpenDriveSensor.get_blueprint_name():
actor = OpenDriveSensor(uid=uid,
name=name,
parent=parent,
node=self.node,
carla_map=self.world.get_map())
elif type_id == ActorControl.get_blueprint_name():
actor = ActorControl(uid=uid,
name=name,
parent=parent,
node=self.node)
elif carla_actor.type_id.startswith('traffic'):
if carla_actor.type_id == "traffic.traffic_light":
actor = TrafficLight(uid, name, parent, self.node, carla_actor)
else:
actor = Traffic(uid, name, parent, self.node, carla_actor)
elif carla_actor.type_id.startswith("vehicle"):
if carla_actor.attributes.get('role_name')\
in self.node.parameters['ego_vehicle']['role_name']:
actor = EgoVehicle(
uid, name, parent, self.node, carla_actor,
self.node._ego_vehicle_control_applied_callback)
else:
actor = Vehicle(uid, name, parent, self.node, carla_actor)
elif carla_actor.type_id.startswith("sensor"):
if carla_actor.type_id.startswith("sensor.camera"):
if carla_actor.type_id.startswith("sensor.camera.rgb"):
actor = RgbCamera(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.camera.depth"):
actor = DepthCamera(uid, name, parent, spawn_pose,
self.node, carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith(
"sensor.camera.semantic_segmentation"):
actor = SemanticSegmentationCamera(uid, name, parent,
spawn_pose, self.node,
carla_actor,
self.sync_mode)
elif carla_actor.type_id.startswith("sensor.camera.dvs"):
actor = DVSCamera(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
else:
actor = Camera(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.lidar"):
if carla_actor.type_id.endswith("sensor.lidar.ray_cast"):
actor = Lidar(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.endswith(
"sensor.lidar.ray_cast_semantic"):
actor = SemanticLidar(uid, name, parent, spawn_pose,
self.node, carla_actor,
self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.radar"):
actor = Radar(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.gnss"):
actor = Gnss(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.imu"):
actor = ImuSensor(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.collision"):
actor = CollisionSensor(uid, name, parent, spawn_pose,
self.node, carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.rss"):
actor = RssSensor(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("sensor.other.lane_invasion"):
actor = LaneInvasionSensor(uid, name, parent, spawn_pose,
self.node, carla_actor,
self.sync_mode)
else:
actor = Sensor(uid, name, parent, spawn_pose, self.node,
carla_actor, self.sync_mode)
elif carla_actor.type_id.startswith("spectator"):
actor = Spectator(uid, name, parent, self.node, carla_actor)
elif carla_actor.type_id.startswith("walker"):
actor = Walker(uid, name, parent, self.node, carla_actor)
else:
actor = Actor(uid, name, parent, self.node, carla_actor)
self.actors[actor.uid] = actor
self.node.loginfo("Created {}(id={})".format(actor.__class__.__name__, actor.uid))
return actor
|
app_tests.py
|
# -*- encoding: utf8 -*-
'''
Mantra - test selection page
'''
import os
import json
import shutil
import threading
import time
import csv
from operator import attrgetter
import logging
import dash_html_components as html
import dash.dependencies as dd
import dash_core_components as dcc
from app import app
from config import cfg
import utils
# - Module logger
log = logging.getLogger(cfg.app_name)
# log = getlogger(__name__)
log.debug('logging via %s', log.name)
# - PAGE globals
PATH = '/tests'
PAGE_ID = 'app-{}'.format(PATH)
ID = '{}-{{}}'.format(__name__).format
CONTROLS = utils.get_domid('controls', PATH)
DISPLAY = utils.get_domid('display', PATH)
TESTS = [] # Global list of available tests
#-- helpers
# def test_actions(idx):
# 'return array of dcc.Links for possible actions on idx'
# buttons = {
# utils.F_PLAYABLE: ('far fa-play-circle fa-1x', 'play'),
# utils.F_OUTDATED: ('fas fa-recycle fa-1x', 'compile'),
# utils.F_DSTERROR: ('fas fa-times-circle', 'clear'),
# utils.F_SRCERROR: ('fas fa-question-circle', 'error'),
# }
# rv = []
# for flag in buttons.keys():
# if idx.cflags & flag:
# button, action = buttons.get(flag)
# rv.append(
# dcc.Link(
# html.I(className=button),
# href='/{};{}'.format(action, idx.test_id),
# className='btn-awesome',
# )
# )
# return rv
def action_icon(test_id, flag):
klass = {
'U': ('fas fa-sync fa-1x', 'update'),
'P': ('far fa-play-circle fa-1x', 'run'),
'C': ('fas fa-wrench fa-1x', 'compile'),
'O': ('fas fa-child', 'revert')
}
button, action = klass.get(flag)
return dcc.Link(
html.I(className=button, title=action),
href='/{};{}'.format(action, test_id),
className='btn-awesome')
def action_menu(test_id, flag):
# links = [dcc.Link(x, href='/{};{}'.format(x, test_id)) for x in act]
# U(pdatable) -> run, (re)compile, preview, delete
# P(layable) -> run, (re)compile, preview, delete
# C(reatable) -> compile, no dsts files yet: cannot run, preview, delete
# O(rphaned) -> run, preview, delete, revert, missing source files
flagged = [
# new pages
('UPO', dcc.Link('run', href='/run;{}'.format(test_id))),
('UPC', dcc.Link('compile', href='/compile;{}'.format(test_id))),
# in-page actions
('UPO', dcc.Link('preview',
href='{};{}?action=preview'.format(PATH, test_id))),
('O', dcc.Link('revert',
href='{};{}?action=revert'.format(PATH, test_id))),
('UPO', dcc.Link('delete',
href='{};{}?action=delete'.format(PATH, test_id)))
]
# not all links make sense all the time
links = [link for flags, link in flagged if flag in flags]
return html.Div(className="dropdown",
children=[
html.I(className='fa fa-ellipsis-v dropbtn'),
html.Div(
className='dropdown-content',
id='app-menu-content',
children=links)
])
def test_table2(categories):
'load mantra.idx from disk and return as html.Table'
idxs = utils.MantraIdx(cfg.src_dir, cfg.dst_dir)
rows = [
# Header row
html.Tr([
html.Th('Tests'),
html.Th(),
html.Th(),
html.Th(),
])
]
for idx in idxs:
if len(categories) and idx.category not in categories:
continue
# 'no_op' disables the link -> see mantra.css
# link inactive if dsts has yet to be created
linkClassName = 'no_op' if idx.flag == 'C' else ''
row = html.Tr([
html.Td(action_icon(idx.test_id, idx.flag)),
html.Td(html.A(href='/run;{}'.format(idx.test_id),
children=os.path.basename(idx.src),
className=linkClassName,
)),
html.Td(idx.category),
html.Td(action_menu(idx.test_id, idx.flag)),
]) # , title=rowTitle)
rows.append(row)
return html.Table(rows)
# def test_table(categories):
# 'load mantra.idx from disk and return as html.Table'
# idxs = []
# try:
# idxs = utils.mtr_idx_read(cfg.dst_dir).values()
# except FileNotFoundError:
# pass
# rows = [
# # Header row
# html.Tr([
# html.Th('Category'),
# html.Th('Test'),
# html.Th('Score'),
# html.Th('Test ID'),
# html.Th('#Q\'s'),
# html.Th('Actions'),
# ])
# ]
# idxs = sorted(idxs, key=attrgetter('category', 'score'))
# for idx in idxs:
# if len(categories) and idx.category not in categories:
# continue
# # see mantra.css for no_op to flag inactive link
# linkClassName = '' if idx.cflags & utils.F_PLAYABLE else 'no_op'
# row = html.Tr([
# html.Td(idx.category),
# html.Td(html.A(href='/play;{}'.format(idx.test_id),
# children=os.path.basename(idx.src_file),
# className=linkClassName,
# )),
# html.Td('{}%'.format(idx.score)),
# html.Td(idx.test_id),
# html.Td(idx.numq),
# html.Td(test_actions(idx)),
# ]) # , title=rowTitle)
# rows.append(row)
# return html.Table(rows)
def category_options():
'setup categories for category filter on test table'
try:
tests = utils.MantraIdx(cfg.src_dir, cfg.dst_dir).idx.values()
except OSError:
tests = []
rv = []
for cat in sorted(set([test.category for test in tests])):
rv.append({'label': cat, 'value': cat})
return rv
_layout = html.Div(
className='row',
id=PAGE_ID,
children=[
# Controls | Html table
html.Div(
className='four columns',
children=[
html.Div(dcc.Dropdown(
id=ID('category'),
value=[],
multi=True,
placeholder='Category ...',
options=category_options()
),
style={'display': 'block'}),
html.Div('loading ...', id=DISPLAY)
]),
html.Progress(
max=100, value=60
),
# modal display
html.Div(
html.Div(
html.Div([
html.Div([
html.I(id=ID('modal-close'),
n_clicks=0,
className='fas fa-times w3-button w3-display-topright'),
html.H1(' ', id=ID('modal-header')),
],
className='w3-container w3-teal'),
html.Div([
dcc.Interval(interval=500, id=ID('modal-timer'),
n_intervals=0),
html.Div([], id=ID('modal-text'),
className='w3-container')
]),
],
className='w3-modal-content w3-animate-top w3-card-4'),
className='w3-modal',
style={'display': 'none'},
id=ID('modal-1')),
className='seven columns')
])
def layout(nav, controls):
# return static layout with cached controls settings, if any
log.debug('nav %s', nav)
log.debug('controls %s', controls)
if len(nav.query):
_layout[ID('modal-1')].style = {'display': 'block'}
_layout[ID('modal-timer')].interval = 500 # refresh 1/second
_layout[ID('modal-header')].children = [
html.I(className='far fa-file-code'),
' {}'.format(nav.test_id)
]
# handle query in diff. thread
QueryHandler(nav.test_id).start(nav, cfg)
else:
_layout[ID('modal-1')].style = {'display': 'none'}
_layout[ID('modal-timer')].interval = 1000*3600*24 # refresh 1/day
return utils.set_controls(_layout, controls)
# -- process nav.query
class QueryHandler(metaclass=utils.Cached):
def __init__(self, job):
self.job = job
self.msgs = []
self.running = False
def start(self, nav, cfg):
self.nav = nav
self.cfg = cfg
threading.Thread(target=self._run, name=self.job).start()
return self
@classmethod
def find(cls, *args):
'find instance for args or return None'
return cls._Cached__cache.get(args, None)
def _run(self):
self.running = True
log.debug('QueryHandler(%s) - started', self.job)
for action in self.nav.query.get('action', []):
meth = getattr(self, 'do_{}'.format(action), None)
if meth is None:
self.msgs.append('ignore unknown action {}'.format(action))
continue
else:
self.msgs.append('-> {}'.format(action))
meth()
time.sleep(2) # give modal update callback time to fire
def do_delete(self):
'delete compiled output files'
dst_dir = os.path.join(self.cfg.dst_dir, self.nav.test_id)
for fname in utils.glob_files(dst_dir, ['*']):
fullname = os.path.join(dst_dir, fname)
log.debug('del %s', fullname)
os.remove(fullname)
self.msgs.append('rm {}'.format(fullname))
shutil.rmtree(dst_dir)
self.msgs.append('rmtree {}'.format(dst_dir))
return self
def do_clear_history(self):
'clear logged history of tests'
pass
# -- Page controls
@app.callback(
dd.Output(CONTROLS, 'children'),
[dd.Input(ID('category'), 'value')])
def controls(category):
'store page state in cache and controls for revisits'
controls = json.dumps([
(ID('category'), 'value', category),
])
log.debug('save controls %s', controls)
return controls
# - display html table, trigger is page cache
@app.callback(
dd.Output(DISPLAY, 'children'),
[dd.Input(CONTROLS, 'children')])
def display(controls):
controls = json.loads(controls)
categories = []
for id_, attr, val in controls:
if id_ == ID('category'):
categories = val
return test_table2(categories)
@app.callback(
dd.Output('app-url', 'search'),
[dd.Input('app-url', 'href')])
def reset_url(pathname):
'clear query/search from url'
log.debug('resetting path %s', pathname)
return ''
@app.callback(
dd.Output(ID('modal-1'), 'style'),
[dd.Input(ID('modal-close'), 'n_clicks'),
dd.Input(ID('modal-text'), 'n_clicks')],
[dd.State(ID('modal-1'), 'style')])
def toggle_modal(n_close, n_modal, style):
'modal close button was clicked, so hide the modal'
n_close = 0 if n_close is None else n_close
n_modal = 0 if n_modal is None else n_modal
clicks = n_close + n_modal
style = {'display': 'none'} if clicks > 0 else style
return style
log.debug('[%s] <- %s', n_close, style)
style = {'display': 'none'} if n_close > 0 else style
log.debug('[%s] -> %s', n_close, style)
# return style
if n_close is None: # in case n_clicks was not specified in _layout
return style # - this is a no-op (initial callback on load)
elif n_close == 0: # same case, but with initial n_clicks=0 in layout
return style # - again a no-op
else:
style = {'display': 'none'}
log.debug('[%s] -> %s', n_close, style)
return style
@app.callback(
dd.Output(ID('modal-timer'), 'interval'),
[dd.Input(ID('modal-1'), 'style'),
dd.Input(ID('modal-timer'), 'n_intervals')],
[dd.State('app-nav', 'children')])
def toggle_updates(style, nvals, nav):
'stop updating if there is no QueryHandler for nav anymore'
ON = '500' # once/second
OFF = '86400000' # once/day
nav = utils.UrlNav(*json.loads(nav))
qh = QueryHandler.find(nav.test_id)
rv = ON if qh and qh.running else OFF
msg = 'running -> ON' if qh and qh.running else 'not found -> OFF'
log.debug('QueryHandler(%s) - %s', nav.test_id, msg)
return rv
@app.callback(
dd.Output(ID('modal-text'), 'children'),
[dd.Input(ID('modal-timer'), 'n_intervals')],
[dd.State('app-nav', 'children'),
dd.State(ID('modal-text'), 'children')])
def update_modal(nvals, nav, kids):
'display QueryHandler.msgs while it is running'
nav = utils.UrlNav(*json.loads(nav))
log.debug('[%s] update_modal', nvals)
qh = QueryHandler.find(nav.test_id)
if qh and qh.running:
log.debug(' - return %s QueryHandler.msgs', len(qh.msgs))
return html.Pre('\n'.join(qh.msgs))
log.debug(' - returning %s kids', len(kids))
return kids
|
video.py
|
"""
This file contains routines to help dealing with processing
streaming videos and reading frames and pixel locations
Code written by Daiki Horiike and Robin Scheibler, 2018
"""
import cv2
from threading import Thread
import queue
class FrameGrabber(object):
def __init__(self):
self.the_frame = None
def process(self, frame):
self.the_frame = frame
def extract(self):
return self.the_frame
class ThreadedVideoStream(object):
"""
This class capture a video (either from live stream or file) using
the opencv api in a separate thread.
Parameters
----------
video: int or str
If an int, this is the index of the video stream. If a str, this is a filename.
"""
def __init__(self, video, start=0, end=None, qmax_len=200):
# The maximum number of frames to buffer
self.qmax_len = qmax_len
# we'll store frames there
self.queue = queue.Queue()
self.video_source = video
self._start = start
if end == -1:
self._end = None
else:
self._end = end
# Try to access the device
self.capture = cv2.VideoCapture(self.video_source)
if not self.capture:
raise ValueError("Couldn" "t open the device.")
if self._start != 0:
# So CAP_PROP_POS_FRAMES seems to be broken,
# per https://github.com/opencv/opencv/issues/9053
self.pos_frames = self._start // 2 # set first frame to read
print(
"Warning: setting start frame seems buggy "
"(https://github.com/opencv/opencv/issues/9053). "
"A hack was used. Use at your own risk"
)
if self._end is not None:
self._end = self._end - self._start
""" Start the stream """
self._stopped = False # Use a flag to know the state of the process
self._count = 0
self.thread = Thread(target=self._frame_read_loop, args=())
self.thread.start()
def __len__(self):
return self.queue.qsize()
@property
def is_streaming(self):
""" Returns True if capture is running, and False otherwise. """
return not self._stopped
@property
def available(self):
return self.is_streaming and self.queue.qsize() > 0
@property
def width(self):
return int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))
@property
def height(self):
return int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
@property
def brightness(self):
return self.capture.get(cv2.CAP_PROP_BRIGHTNESS)
@brightness.setter
def brightness(self, value):
self.capture.set(cv2.CAP_PROP_BRIGHTNESS, value)
@property
def exposure(self):
return self.capture.get(cv2.CAP_PROP_EXPOSURE)
@exposure.setter
def exposure(self, value):
self.capture.set(cv2.CAP_PROP_EXPOSURE, value)
@property
def auto_exposure(self):
return self.capture.get(cv2.CAP_PROP_AUTO_EXPOSURE)
@auto_exposure.setter
def auto_exposure(self, value):
self.capture.set(cv2.CAP_PROP_AUTO_EXPOSURE, value)
@property
def fps(self):
return self.capture.get(cv2.CAP_PROP_FPS)
@property
def shape(self):
""" The frame shape in numpy format """
return (self.height, self.width)
@property
def pos_frames(self):
return self.capture.get(cv2.CAP_PROP_POS_FRAMES)
@pos_frames.setter
def pos_frames(self, value):
self.capture.set(cv2.CAP_PROP_POS_FRAMES, value)
def __del__(self):
self.stop() # stop, just in case
def stop(self):
""" Stop the stream """
self._stopped = True
self.thread.join() # wait for frame reading loop to stop
with self.queue.mutex:
self.queue.queue.clear()
# close the video feed
if self.capture.isOpened():
self.capture.release()
def read(self, n=1, block=True, timeout=1):
"""
Read some frames.
Parameters
----------
n: int
Number of frames to retrieve
block: optional, bool
Wether to do a blocking call or not
"""
if self.queue.qsize() == 0 and self._stopped:
return None
if n == 1:
while not self._stopped:
try:
return self.queue.get(block=block, timeout=timeout)
except queue.Empty:
if self._stopped:
return None
elif n > 1:
ret = []
while len(ret) < n:
try:
ret.append(self.queue.get(block=block, timeout=timeout))
except queue.Empty:
if self._stopped:
return ret
return ret
else:
raise ValueError("n must be strictly positive")
def _frame_read_loop(self):
""" This method will fetch the frames in a concurrent thread """
while not self._stopped:
if self.queue.qsize() >= self.qmax_len:
continue
ret, frame = self.capture.read()
self._count += 1
if not ret:
break
else:
# Return RGB frame
self.queue.put(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if self._end is not None and self._count >= self._end:
break
self._stopped = True
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self.stop()
def video_stream(video, start=0, end=None, callback=None, show=False):
"""
Streams a video for display or processing
Parameters
----------
video: int or str
If an int, this is the index of the video stream. If a str, this is a filename.
start: int
The frame number where to start the streaming
end: int
The last frame to stream
callback: func
A function to call on each frame
show: bool
If True, the video is displayed during streaming
"""
if show:
cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE)
with ThreadedVideoStream(video, start=start, end=end) as cap:
fps = cap.get_fps()
while cap.is_streaming:
frame = cap.read()
if frame is None:
break
if show:
cv2.imshow("image", frame)
if cv2.waitKey(1) & 0xFF == ord("q"):
break
if callback is not None:
callback(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
if show:
cv2.destroyAllWindows()
return fps
def frame_grabber(video, frame=0, show=False):
"""
Retrieve a single frame from a video file or stream
Parameters
----------
video: int or str
If an int, this is the index of the video stream. If a str, this is a filename.
frame: int, optional
The frame index to grab
show: bool, optional
Display the frame using matplotlib
"""
grabber = FrameGrabber()
video_stream(video, start=frame, end=frame, callback=grabber.process)
if show:
import matplotlib.pyplot as plt
import numpy as np
plt.imshow(np.array(grabber.extract()))
plt.show()
return grabber.extract()
class MouseParam(object):
""" Helper object to get a mouse click coordinates in an image """
def __init__(self, input_img_name):
# parameters of the mouse click
self.mouse_event = {"x": None, "y": None, "event": None, "flags": None}
# setting of the mouse callback
cv2.setMouseCallback(input_img_name, self.__callback, None)
# callback function
def __callback(self, event_type, x, y, flags, userdata):
self.mouse_event["x"] = x
self.mouse_event["y"] = y
self.mouse_event["event"] = event_type
self.mouse_event["flags"] = flags
def get(self, param_name):
return self.mouse_event[param_name]
def get_pos(self):
return (self.mouse_event["x"], self.mouse_event["y"])
def pixel_from_click(frame):
"""
Obtain the location of a pixel clicked with the mouse in a frame
"""
window_name = "input window"
cv2.imshow(window_name, frame)
mouse_data = MouseParam(window_name)
while 1:
cv2.waitKey(20)
if mouse_data.get("event") == cv2.EVENT_LBUTTONDOWN: # left click
y, x = mouse_data.get_pos()
break
cv2.destroyAllWindows()
return x, y
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Video loader/stream player")
parser.add_argument("video", default=0, type=str, help="Stream index or file name")
parser.add_argument(
"--display",
"-d",
action="store_true",
help="Displays the video as it is streamed",
)
args = parser.parse_args()
try:
video = int(args.video)
except:
video = args.video
# Start capture
video_stream(video, show=args.display)
|
__init__.py
|
#! /usr/bin/env python3
# Example wiring (LT-8900 on board to Raspberry Pi):
#
# LT-8900
# _--------------------------------------------------------_
# | VCC | RST | MISO | MOSI | SCK | CS | GND |
# |-------+-------+----- -+-------+-------+-------+--------|
# | 3.3v | Reset | SPI | SPI | SPI | SPI | Ground |
# | | | | | Clock | CE0 | |
# -___+___|___+___|___+___|___+___|___+___|___+___|___+____-
# | | | | | | |
# | | | | | | |
# | | | | | | |
# _---+-------+-------+-------+-------+-------+-------+----_
# | 3.3v | GPIO5 | MISO | MOSI | SCLK | CE0 | 0v |
# |-------+-------+-------+-------+-------+-------+--------|
# | P1-17 | P1-18 | P1-21 | P1-19 | P1-23 | P1-24 | P1-25 |
# -________________________________________________________-
# Raspberry Pi
import spidev
import time
import threading
import collections
class dummy_context_mgr():
def __enter__(self):
return None
def __exit__(self, exc_type, exc_value, traceback):
return False
class Radio:
_default_register_values = {
'format_config': {
'crc_enabled': 1,
'scramble_enabled': 0,
'packet_length_encoded': 1,
'auto_term_tx': 1,
'auto_ack': 1,
'pkt_fifo_polarity': 0,
'crc_initial_data': 0
},
'radio_state': {'tx_enabled': 0, 'rx_enabled': 0, 'channel': 76},
'undocumented_power': {'value': 0x6c90},
'power': {
'current': 8,
'reserved_1': 1,
'gain': 0
},
'rssi_power': {'mode': 0},
'crystal': {'trim_adjust': 0},
'packet_config': {
'preamble_len': 2,
'syncword_len': 1,
'trailer_len': 0,
'packet_type': 0,
'fec_type': 0,
'br_clock_sel': 0
},
'chip_power': {
'power_down': 0,
'sleep_mode': 0,
'br_clock_on_sleep': 0,
'rexmit_times': 10,
'miso_tri_opt': 0,
'scramble_value': 0
},
'thresholds': {
'fifo_empty_threshold': 8,
'fifo_full_threshold': 16,
'syncword_error_bits': 2
},
'scan_rssi': {'channel': 63, 'ack_time': 176},
'gain_block': {'enabled': 1},
'vco_calibrate': {'enabled': 0},
'scan_rssi_state': {'enabled': 0, 'channel_offset': 0, 'wait_time': 15}
}
_register_map = [
{'name': "Unknown"}, # 0
{'name': "Unknown"}, # 1
{'name': "Unknown"}, # 2
{ # 3
'name': 'phase_lock',
'reserved_1': [13, 15],
'rf_synth_lock': [12, 12],
'reserved_2': [0, 11]
},
{'name': "Unknown"}, # 4
{'name': "Unknown"}, # 5
{ # 6
'name': "raw_rssi",
'raw_rssi': [10, 15],
'reserved_1': [0, 9]
},
{ # 7
'name': "radio_state",
'reserved_1': [9, 15],
'tx_enabled': [8, 8],
'rx_enabled': [7, 7],
'channel': [0, 6]
},
{ # 8
'name': "undocumented_power",
'value': [0, 15]
},
{ # 9
'name': "power",
'current': [12, 15],
'reserved_1': [11, 11],
'gain': [7, 10],
'reserved_2': [0, 6]
},
{ # 10
'name': "gain_block",
'reserved_1': [1, 15],
'enabled': [0, 0]
},
{ # 11
'name': "rssi_power",
'reserved_1': [9, 15],
'mode': [8, 8],
'reserved_2': [0, 7]
},
{'name': "Unknown"}, # 12
{'name': "Unknown"}, # 13
{'name': "Unknown"}, # 14
{'name': "Unknown"}, # 15
{'name': "Unknown"}, # 16
{'name': "Unknown"}, # 17
{'name': "Unknown"}, # 18
{'name': "Unknown"}, # 19
{'name': "Unknown"}, # 20
{'name': "Unknown"}, # 21
{'name': "Unknown"}, # 22
{ # 23
'name': "vco_calibrate",
'reserved_1': [3, 15],
'enabled': [2, 2],
'reserved_2': [0, 1]
},
{'name': "Unknown"}, # 24
{'name': "Unknown"}, # 25
{'name': "Unknown"}, # 26
{ # 27
'name': "crystal",
'reserved_1': [6, 15],
'trim_adjust': [0, 5]
},
{'name': "Unknown"}, # 28
{ # 29
'name': "minor_version",
'reserved_1': [8, 15],
'rf': [4, 7],
'reserved_2': [3, 3],
'digital': [0, 2]
},
{ # 30
'name': "manufacture_1",
'manuf_code_low': [0, 15]
},
{ # 31
'name': "manufacture_2",
'rf_code': [12, 15],
'manuf_code_high': [0, 11]
},
{ # 32
'name': "packet_config",
'preamble_len': [13, 15],
'syncword_len': [11, 12],
'trailer_len': [8, 10],
'packet_type': [6, 7],
'fec_type': [4, 5],
'br_clock_sel': [1, 3],
'reserved_1': [0, 0]
},
{ # 33
'name': "vco_pa_delays",
'vco_on_delay': [8, 15],
'pa_off_delay': [6, 7],
'pa_tx_delay': [0, 5]
},
{ # 34
'name': "tx_packet_delays",
'packet_control_direct': [15, 15],
'tx_cw_delay': [8, 14],
'reserved_1': [6, 7],
'tx_sw_on_delay': [0, 5]
},
{ # 35
'name': "chip_power",
'power_down': [15, 15],
'sleep_mode': [14, 14],
'reserved_1': [13, 13],
'br_clock_on_sleep': [12, 12],
'rexmit_times': [8, 11],
'miso_tri_opt': [7, 7],
'scramble_value': [0, 6]
},
{ # 36
'name': "syncword_0",
'value': [0, 15]
},
{ # 37
'name': "syncword_1",
'value': [0, 15]
},
{ # 38
'name': "syncword_2",
'value': [0, 15]
},
{ # 39
'name': "syncword_3",
'value': [0, 15]
},
{ # 40
'name': "thresholds",
'fifo_empty_threshold': [11, 15],
'fifo_full_threshold': [6, 10],
'syncword_error_bits': [0, 5]
},
{ # 41
'name': "format_config",
'crc_enabled': [15, 15],
'scramble_enabled': [14, 14],
'packet_length_encoded': [13, 13],
'auto_term_tx': [12, 12],
'auto_ack': [11, 11],
'pkt_fifo_polarity': [10, 10],
'reserved_1': [8, 9],
'crc_initial_data': [0, 7]
},
{ # 42
'name': "scan_rssi",
'channel': [10, 15],
'reserved_1': [8, 9],
'ack_time': [0, 7]
},
{ # 43
'name': "scan_rssi_state",
'enabled': [15, 15],
'channel_offset': [8, 14],
'wait_time': [0, 7]
},
{'name': "Unknown"}, # 44
{'name': "Unknown"}, # 45
{'name': "Unknown"}, # 46
{'name': "Unknown"}, # 47
{ # 48
'name': "status",
'crc_error': [15, 15],
'fec_error': [14, 14],
'framer_status': [8, 13],
'syncword_rx': [7, 7],
'packet_flag': [6, 6],
'fifo_flag': [5, 5],
'reserved_1': [0, 4]
},
{'name': "Unknown"}, # 49
{ # 50
'name': "fifo",
'value': [0, 15]
},
{'name': "Unknown"}, # 51
{ # 52
'name': "fifo_state",
'clear_write': [15, 15],
'reserved_1': [14, 14],
'write_ptr': [8, 13],
'clear_read': [7, 7],
'reserved_2': [6, 6],
'read_ptr': [0, 5]
}
]
def __init__(self, spi_bus, spi_dev, config = None):
spi = spidev.SpiDev()
spi.open(spi_bus, spi_dev)
self._spi = spi
self._dequeue_thread = None
self._last_syncword = None
self._software_tx_queue = {}
self._software_tx_queue_next_time = {}
self.configure(config, update = False)
if len(self._register_map) != 53:
raise ValueError('Inconsistent register map!')
return None
def __del__(self):
self._debug('Deleting object')
self._config['use_software_tx_queue'] = False
self._spi.close()
def _debug(self, message):
if 'debug_log_command' in self._config:
self._config['debug_log_command'](message)
return None
def _info(self, message):
log_command = None
if 'info_log_command' in self._config:
log_command = self._config['info_log_command']
elif 'debug_log_command' in self._config:
log_command = self._config['debug_log_command']
if log_command is None:
return None
log_command(message)
return None
def _error(self, message):
log_command = None
if 'error_log_command' in self._config:
log_command = self._config['error_log_command']
elif 'info_log_command' in self._config:
log_command = self._config['info_log_command']
elif 'debug_log_command' in self._config:
log_command = self._config['debug_log_command']
if log_command is None:
return None
log_command(message)
return None
def _get_mutex(self, real_mutex = True):
if not real_mutex:
return dummy_context_mgr()
mutex = self._config.get('mutex', dummy_context_mgr())
return mutex
def _reset_device(self):
self._info("Resetting radio {}".format(__name__))
reset_command = self._config.get('reset_command', None)
if reset_command is None:
return None
reset_command()
return None
def _should_use_queue(self):
if 'use_software_tx_queue' in self._config:
return self._config['use_software_tx_queue']
return False
def _register_name(self, reg_number):
return self._register_map[reg_number]['name']
def _register_number(self, reg_string):
reg_string_orig = reg_string
if isinstance(reg_string, int):
return reg_string
if reg_string.isnumeric():
return int(reg_string)
for reg_number, reg_info in enumerate(self._register_map):
if reg_info['name'] == reg_string:
return reg_number
raise NameError("Invalid register value {}".format(reg_string_orig))
def _check_radio(self):
value1 = self.get_register(0);
value2 = self.get_register(1);
if value1 == 0x6fe0 and value2 == 0x5681:
return True
self._debug(f'Expected 0x6fe0, 0x5681 and got 0x{value1:04x}, 0x{value2:04x}')
return False
def _get_default_register_value(self, register):
return self._default_register_values.get(register, {})
def _set_default_register_values(self):
self._last_format_config = {}
for register_name, register_value in self._default_register_values.items():
if register_name == 'format_config':
self._apply_packet_format_config({})
continue
self.put_register_bits(register_name, register_value)
return True
def _put_register_high_low(self, reg, high, low, delay = None):
if delay is None:
delay = 10
reg = self._register_number(reg)
result = self._spi.xfer([reg, high, low], self._spi.max_speed_hz, delay)
if reg & 0x80 == 0x80:
self._debug(" regRead[%02X] = %s" % ((reg & 0x7f), result))
else:
self._debug("regWrite[%02X:0x%02X%02X] = %s" % (reg, high, low, result))
return result
def put_register(self, reg, value, delay = None):
high = (value >> 8) & 0xff
low = value & 0xff
return self._put_register_high_low(reg, high, low, delay = delay)
def put_register_bits(self, reg, bits_dict, delay = None):
# Convert register to an integer
reg = self._register_number(reg)
# Lookup register in the register map
register_info = self._register_map[reg]
# Create a dictionary to hold the parsed results
value = 0
for key in bits_dict:
if key == "name":
continue
bit_range = register_info[key]
mask = ((1 << (bit_range[1] - bit_range[0] + 1)) - 1) << bit_range[0]
key_value = (bits_dict[key] << bit_range[0]) & mask
value = value | key_value
result = self.put_register(reg, value, delay = delay)
return result
def get_register(self, reg):
# Convert register to an integer
reg = self._register_number(reg)
# Reading of a register is indicated by setting high bit
read_reg = reg | 0b10000000
# Put the request with space for the reply
value = self._put_register_high_low(read_reg, 0, 0)
# The reply is stored in the lower two bytes
result = value[1] << 8 | value[2]
# Return result
return result
def get_register_bits(self, reg, value = None):
# Convert register to an integer
reg = self._register_number(reg)
# Get the register's value (unless one was supplied)
if value is None:
value = self.get_register(reg)
# Lookup register in the register map
register_info = self._register_map[reg]
# Create a dictionary to hold the parsed results
result = {'name': register_info['name']}
for key in register_info:
if key == "name":
continue
bit_range = register_info[key]
mask = ((1 << (bit_range[1] - bit_range[0] + 1)) - 1) << bit_range[0]
key_value = (value & mask) >> bit_range[0]
result[key] = key_value
# Return the filled in structure
return result
def configure(self, config, update = True):
if config is None:
config = {}
if update:
self._config.update(config)
else:
self._config = config
with self._get_mutex():
self._spi.max_speed_hz = self._config.get('frequency', 4000000)
self._spi.bits_per_word = self._config.get('bits_per_word', 8)
self._spi.cshigh = self._config.get('csigh', False)
self._spi.no_cs = self._config.get('no_cs', False)
self._spi.lsbfirst = self._config.get('lsbfirst', False)
self._spi.threewire = self._config.get('threewire', False)
self._spi.mode = self._config.get('mode', 1)
# If using a queue, start a thread to run the queue
if self._should_use_queue():
if self._dequeue_thread is None:
self._dequeue_thread = threading.Thread(target = self._run_queue, daemon = True)
self._software_tx_queue_mutex = threading.Lock()
self._dequeue_thread.start()
else:
if self._dequeue_thread is not None:
self._debug("Joining existing thread to wait for termination")
self._dequeue_thread.join()
self._dequeue_thread = None
self._software_tx_queue_mutex = None
return None
def initialize(self):
self._reset_device()
self._set_default_register_values()
if not self._check_radio():
return False
return True
def _reinitialize(self):
self.initialize()
self.set_syncword(self._last_syncword, submit_queue = None, force = True)
self._apply_packet_format_config(self._last_format_config)
def set_channel(self, channel):
state = self.get_register_bits('radio_state')
state['channel'] = channel
self.put_register_bits('radio_state', state, delay = 130)
return state
def set_syncword(self, syncword, force = False, submit_queue = '__DEFAULT__'):
# If queuing is being used, just store this message
if submit_queue is not None and self._should_use_queue():
self._enqueue(submit_queue, syncword, None, None, post_delay = 0)
return None
# Do not set the syncword again if it's not needed
if not force:
if self._last_syncword is not None:
if syncword == self._last_syncword:
return None
self._last_syncword = syncword
packet_config = self.get_register_bits('packet_config')
packet_config['syncword_len'] = len(syncword) - 1
self.put_register_bits('packet_config', packet_config)
if len(syncword) == 1:
self.put_register("syncword_0", syncword[0])
elif len(syncword) == 2:
self.put_register("syncword_0", syncword[1])
self.put_register("syncword_3", syncword[0])
elif len(syncword) == 3:
self.put_register("syncword_0", syncword[2])
self.put_register("syncword_2", syncword[1])
self.put_register("syncword_3", syncword[0])
elif len(syncword) == 4:
self.put_register("syncword_0", syncword[3])
self.put_register("syncword_1", syncword[2])
self.put_register("syncword_2", syncword[1])
self.put_register("syncword_3", syncword[0])
elif len(syncword) > 4:
raise ValueError("SyncWord length must be less than 5")
return None
def fill_fifo(self, message, include_length = True, lock = True):
new_message = [self._register_number('fifo')]
if include_length:
new_message = new_message + [len(message)]
new_message = new_message + message
log_message = new_message.copy()
delay = 10 * len(message)
# Transfer the message
with self._get_mutex(lock):
result = self._spi.xfer(new_message, self._spi.max_speed_hz, delay)
self._debug("Writing: {} = {}".format(log_message, result))
need_reset = False
for check_result in result:
if check_result != 1:
need_reset = True
if need_reset:
self._error("While transmitting we got an error, reinitializing everything")
self._reinitialize()
return new_message
def transmit(self, message, channel = None, lock = True, post_delay = 0, syncword = None, submit_queue = '__DEFAULT__', format_config = None):
# If we are using a radio transmit queue, just queue this message
# (unless we are called from the dequeue procedure)
if submit_queue is not None and self._should_use_queue():
if syncword is None:
syncword = self._last_syncword
self._enqueue(submit_queue, syncword, message, channel, post_delay = post_delay, format_config = format_config)
return True
sent_packet = True
with self._get_mutex(lock):
# Set the syncword
if syncword is not None:
self.set_syncword(syncword, submit_queue = None)
# Apply any format changes
radio_format_config = self._apply_packet_format_config(format_config)
self._debug("Radio format_config = {}".format(radio_format_config))
# Determine if the length should be included
if radio_format_config['packet_length_encoded'] == 1:
include_length = True
else:
include_length = False
if radio_format_config['auto_term_tx'] == 1:
manual_terminate = False
else:
manual_terminate = True
if channel is None:
state = self.get_register_bits('radio_state')
channel = state['channel']
# Initialize the transmitter
self.put_register_bits('radio_state', {
'tx_enabled': 0,
'rx_enabled': 0,
'channel': 0
})
self.put_register_bits('fifo_state', {
'clear_read': 1,
'clear_write': 1
})
# Format message to send to fifo
self.fill_fifo(message, include_length = include_length, lock = False)
# Tell the radio to transmit the FIFO buffer to the specified channel
self.put_register_bits('radio_state', {
'tx_enabled': 1,
'rx_enabled': 0,
'channel': channel
}, delay = 1000)
while not manual_terminate:
radio_status = self.get_register_bits('status')
self._debug("radio_status={}".format(radio_status))
if radio_status['packet_flag'] == 1:
break
if radio_status['framer_status'] == 0:
sent_packet = False
break
time.sleep(0.001)
# Stop transmitting, if needed
if manual_terminate:
self.put_register_bits('radio_state', {
'tx_enabled': 0,
'rx_enabled': 0,
'channel': channel
})
if post_delay != 0:
time.sleep(post_delay)
return sent_packet
def multi_transmit(self, message, channels, retries = 3, delay = 0.1, syncword = None, submit_queue = '__DEFAULT__', format_config = None):
if len(channels) == 0 or retries == 0:
self._error("Asked to send the message {} a total of zero times ({} channels, {} retries)".format(message, channels, retries))
# Wait at-least 650 microseconds between frames
min_delay = 650.0 / 1000000.0
post_delay = min_delay
final_delay = delay
for channel_idx in range(len(channels)):
if channel_idx == (len(channels) - 1):
retries -= 1
channel = channels[channel_idx]
for i in range(retries):
if not self.transmit(message, channel, post_delay = post_delay, syncword = syncword, submit_queue = submit_queue, format_config = format_config):
return False
if not self.transmit(message, channel, post_delay = final_delay, syncword = syncword, submit_queue = submit_queue, format_config = format_config):
return False
return True
def _enqueue(self, submit_queue, syncword, message, channel, post_delay = 0, format_config = None):
if not self._should_use_queue():
raise ValueError('internal error: _enqueue called with queueing disabled')
with self._software_tx_queue_mutex:
if submit_queue not in self._software_tx_queue:
self._software_tx_queue[submit_queue] = collections.deque([])
self._software_tx_queue[submit_queue].append({
'syncword': syncword,
'message': message,
'channel': channel,
'post_delay': post_delay,
'format_config': format_config
})
return None
def _run_queue(self):
self._debug("Started run_queue process")
sleep_time = 0
while True:
if sleep_time != 0:
self._debug("Sleeping for {} seconds".format(sleep_time))
time.sleep(sleep_time)
with self._software_tx_queue_mutex:
for queue in self._software_tx_queue:
if len(self._software_tx_queue[queue]) != 0:
self._debug("Running the queue named {}: {} items left".format(queue, len(self._software_tx_queue[queue])))
try:
[processed_items, remaining_items] = self._run_queue_once()
except Exception as error_info:
self._error("Failed to run queue: {}".format(error_info.args))
processed_items = 0
remaining_items = 0
self._debug("Completed running the queue, did {} items and {} items left (continue queue = {})".format(processed_items, remaining_items, self._should_use_queue()))
if remaining_items == 0:
# If the queue is empty and we are no longer queuing
# events, exit this function (which should be joined)
if not self._should_use_queue():
self._debug("Request to stop run_queue process, exiting")
return None
# If there are no events, wait a bit
# longer before trying again
sleep_time = 0.5
continue
if processed_items == 0:
# If we processed no items, but there are items left to
# process, back off slightly
if sleep_time == 0:
sleep_time = 0.001
else:
sleep_time = min(0.5, sleep_time * 2)
continue
# If there are more events to process, try again
sleep_time = 0
return None
def _run_queue_once(self):
to_transmit = []
remaining_items = 0
now = time.time()
with self._software_tx_queue_mutex:
for submit_queue in self._software_tx_queue:
# Determine if we should run this queue yet
if submit_queue not in self._software_tx_queue_next_time:
self._software_tx_queue_next_time[submit_queue] = now
queue_next_time = self._software_tx_queue_next_time[submit_queue]
if now < queue_next_time:
remaining_items += len(self._software_tx_queue[submit_queue])
continue
# Record how many items to pop off this queue
pop_items = 0
for item in self._software_tx_queue[submit_queue]:
pop_items += 1
# If the last item we're about to transmit requires a delay, make
# a note of it in the queue time and don't pull anything else
# from this queue
item['submit_queue'] = submit_queue
if item['post_delay'] != 0:
break
# Pop off the items to transmit in this run into a list
if pop_items != 0:
self._debug("Found {} items to transmit in the {} queue".format(pop_items, submit_queue))
while pop_items != 0:
to_transmit.append(self._software_tx_queue[submit_queue].popleft())
pop_items -= 1
remaining_items += len(self._software_tx_queue[submit_queue])
to_transmit_ordered = {}
default_syncword = None
for item in to_transmit:
syncword = item['syncword']
channel = item['channel']
format_config = item['format_config']
message = item['message']
if syncword is not None:
default_syncword = syncword
else:
syncword = default_syncword
item['syncword'] = syncword
if message is None or channel is None:
continue
key = str([syncword, channel])
if key not in to_transmit_ordered:
to_transmit_ordered[key] = []
to_transmit_ordered[key].append(item)
self._debug("Getting ready to transmit {} items".format(len(to_transmit)))
with self._get_mutex():
for (key, items) in to_transmit_ordered.items():
for item in items:
self._debug("Transmitting item {}".format(item))
syncword = item['syncword']
channel = item['channel']
format_config = item['format_config']
message = item['message']
self.transmit(message, channel, lock = False, submit_queue = None, syncword = syncword, post_delay = 0, format_config = format_config)
self._software_tx_queue_next_time[item['submit_queue']] = time.time() + item['post_delay']
return [len(to_transmit), remaining_items]
def start_listening(self, channel):
# Initialize the receiver
self.stop_listening()
# Go into listening mode
self.put_register_bits('radio_state', {
'tx_enabled': 0,
'rx_enabled': 1,
'channel': channel
})
return True
def stop_listening(self):
# Initialize the receiver
self.put_register_bits('radio_state', {
'tx_enabled': 0,
'rx_enabled': 0,
'channel': 0
})
self.put_register_bits('fifo_state', {
'clear_read': 1,
'clear_write': 1
})
return True
def _apply_packet_format_config(self, format_config):
# Apply radio format configuration difference from baseline
radio_format_config = self._get_default_register_value('format_config').copy()
# If a configuration was supplied, update what we want to apply
if format_config is not None:
radio_format_config.update(format_config)
if radio_format_config == self._last_format_config:
return radio_format_config
self._last_format_config = radio_format_config
self.put_register_bits('format_config', radio_format_config, delay = 5000)
new_config = self.get_register_bits('format_config')
self._info("Updated format_config to be {}".format(new_config))
return radio_format_config
def receive(self, channel = None, wait = False, length = None, format_config = None, wait_time = 0.1):
# If a length is supplied, assume that the packet is not length encoded
# but allow the user to override that by supplying a format config
if length is not None:
if format_config is None:
format_config = {}
if 'packet_length_encoded' not in format_config:
format_config = format_config.copy()
format_config['packet_length_encoded'] = 0
with self._get_mutex():
# Apply the current configuration, if it is already applied
# this will be a no-op
self._apply_packet_format_config(format_config)
if wait:
if channel is None:
state = self.get_register_bits('radio_state')
channel = state['channel']
self.start_listening(channel)
message = []
crc_error_count = 0
while True:
radio_status = self.get_register_bits('status')
self._debug("radio_status={}".format(radio_status))
if radio_status['crc_error'] == 1:
crc_error_count += 1
if crc_error_count > 30:
self._reinitialize()
self.start_listening(channel)
continue
crc_error_count = 0
if radio_status['packet_flag'] == 0:
if wait:
time.sleep(wait_time)
continue
else:
self._unlock_radio()
return None
# Data is available, read it from the FIFO register
# The first result will include the length
fifo_data = self.get_register('fifo')
if length is not None:
message_length = length
message += [fifo_data >> 8]
message_length -= 1
else:
message_length = fifo_data >> 8
if message_length == 0:
self.start_listening(channel)
continue
# Keep track of the total message length to truncate it
final_message_length = message_length
message += [fifo_data & 0xff]
message_length -= 1
# Read subsequent bytes from the FIFO register until
# there are no more bytes to read
while message_length > 0:
fifo_data = self.get_register('fifo')
message += [fifo_data >> 8, fifo_data & 0xff]
message_length -= 2
# Truncate the message to its final size, since we have
# to read in 16-bit words, we may have an extra byte
message = message[0:final_message_length]
break
return message
|
logging_util.py
|
__package__ = 'archivebox'
import re
import os
import sys
import stat
import time
import argparse
from math import log
from multiprocessing import Process
from pathlib import Path
from datetime import datetime, timezone
from dataclasses import dataclass
from typing import Any, Optional, List, Dict, Union, IO, TYPE_CHECKING
if TYPE_CHECKING:
from .index.schema import Link, ArchiveResult
from .system import get_dir_size
from .util import enforce_types
from .config import (
ConfigDict,
OUTPUT_DIR,
PYTHON_ENCODING,
VERSION,
ANSI,
IS_TTY,
IN_DOCKER,
TERM_WIDTH,
SHOW_PROGRESS,
SOURCES_DIR_NAME,
stderr,
)
@dataclass
class RuntimeStats:
"""mutable stats counter for logging archiving timing info to CLI output"""
skipped: int = 0
succeeded: int = 0
failed: int = 0
parse_start_ts: Optional[datetime] = None
parse_end_ts: Optional[datetime] = None
index_start_ts: Optional[datetime] = None
index_end_ts: Optional[datetime] = None
archiving_start_ts: Optional[datetime] = None
archiving_end_ts: Optional[datetime] = None
# globals are bad, mmkay
_LAST_RUN_STATS = RuntimeStats()
def debug_dict_summary(obj: Dict[Any, Any]) -> None:
stderr(' '.join(f'{key}={str(val).ljust(6)}' for key, val in obj.items()))
def get_fd_info(fd) -> Dict[str, Any]:
NAME = fd.name[1:-1]
FILENO = fd.fileno()
MODE = os.fstat(FILENO).st_mode
IS_TTY = hasattr(fd, 'isatty') and fd.isatty()
IS_PIPE = stat.S_ISFIFO(MODE)
IS_FILE = stat.S_ISREG(MODE)
IS_TERMINAL = not (IS_PIPE or IS_FILE)
IS_LINE_BUFFERED = fd.line_buffering
IS_READABLE = fd.readable()
return {
'NAME': NAME, 'FILENO': FILENO, 'MODE': MODE,
'IS_TTY': IS_TTY, 'IS_PIPE': IS_PIPE, 'IS_FILE': IS_FILE,
'IS_TERMINAL': IS_TERMINAL, 'IS_LINE_BUFFERED': IS_LINE_BUFFERED,
'IS_READABLE': IS_READABLE,
}
# # Log debug information about stdin, stdout, and stderr
# sys.stdout.write('[>&1] this is python stdout\n')
# sys.stderr.write('[>&2] this is python stderr\n')
# debug_dict_summary(get_fd_info(sys.stdin))
# debug_dict_summary(get_fd_info(sys.stdout))
# debug_dict_summary(get_fd_info(sys.stderr))
class SmartFormatter(argparse.HelpFormatter):
"""Patched formatter that prints newlines in argparse help strings"""
def _split_lines(self, text, width):
if '\n' in text:
return text.splitlines()
return argparse.HelpFormatter._split_lines(self, text, width)
def reject_stdin(caller: str, stdin: Optional[IO]=sys.stdin) -> None:
"""Tell the user they passed stdin to a command that doesn't accept it"""
if not stdin:
return None
if IN_DOCKER:
# when TTY is disabled in docker we cant tell if stdin is being piped in or not
# if we try to read stdin when its not piped we will hang indefinitely waiting for it
return None
if not stdin.isatty():
# stderr('READING STDIN TO REJECT...')
stdin_raw_text = stdin.read()
if stdin_raw_text.strip():
# stderr('GOT STDIN!', len(stdin_str))
stderr(f'[!] The "{caller}" command does not accept stdin (ignoring).', color='red')
stderr(f' Run archivebox "{caller} --help" to see usage and examples.')
stderr()
# raise SystemExit(1)
return None
def accept_stdin(stdin: Optional[IO]=sys.stdin) -> Optional[str]:
"""accept any standard input and return it as a string or None"""
if not stdin:
return None
if not stdin.isatty():
# stderr('READING STDIN TO ACCEPT...')
stdin_str = stdin.read()
if stdin_str:
# stderr('GOT STDIN...', len(stdin_str))
return stdin_str
return None
class TimedProgress:
"""Show a progress bar and measure elapsed time until .end() is called"""
def __init__(self, seconds, prefix=''):
self.SHOW_PROGRESS = SHOW_PROGRESS
if self.SHOW_PROGRESS:
self.p = Process(target=progress_bar, args=(seconds, prefix))
self.p.start()
self.stats = {'start_ts': datetime.now(timezone.utc), 'end_ts': None}
def end(self):
"""immediately end progress, clear the progressbar line, and save end_ts"""
end_ts = datetime.now(timezone.utc)
self.stats['end_ts'] = end_ts
if self.SHOW_PROGRESS:
# terminate if we havent already terminated
try:
# kill the progress bar subprocess
try:
self.p.close() # must be closed *before* its terminnated
except (KeyboardInterrupt, SystemExit):
print()
raise
except BaseException: # lgtm [py/catch-base-exception]
pass
self.p.terminate()
self.p.join()
# clear whole terminal line
try:
sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH()), ANSI['reset']))
except (IOError, BrokenPipeError):
# ignore when the parent proc has stopped listening to our stdout
pass
except ValueError:
pass
@enforce_types
def progress_bar(seconds: int, prefix: str='') -> None:
"""show timer in the form of progress bar, with percentage and seconds remaining"""
chunk = '█' if PYTHON_ENCODING == 'UTF-8' else '#'
last_width = TERM_WIDTH()
chunks = last_width - len(prefix) - 20 # number of progress chunks to show (aka max bar width)
try:
for s in range(seconds * chunks):
max_width = TERM_WIDTH()
if max_width < last_width:
# when the terminal size is shrunk, we have to write a newline
# otherwise the progress bar will keep wrapping incorrectly
sys.stdout.write('\r\n')
sys.stdout.flush()
chunks = max_width - len(prefix) - 20
pct_complete = s / chunks / seconds * 100
log_pct = (log(pct_complete or 1, 10) / 2) * 100 # everyone likes faster progress bars ;)
bar_width = round(log_pct/(100/chunks))
last_width = max_width
# ████████████████████ 0.9% (1/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
prefix,
ANSI['green' if pct_complete < 80 else 'lightyellow'],
(chunk * bar_width).ljust(chunks),
ANSI['reset'],
round(pct_complete, 1),
round(s/chunks),
seconds,
))
sys.stdout.flush()
time.sleep(1 / chunks)
# ██████████████████████████████████ 100.0% (60/60sec)
sys.stdout.write('\r{0}{1}{2}{3} {4}% ({5}/{6}sec)'.format(
prefix,
ANSI['red'],
chunk * chunks,
ANSI['reset'],
100.0,
seconds,
seconds,
))
sys.stdout.flush()
# uncomment to have it disappear when it hits 100% instead of staying full red:
# time.sleep(0.5)
# sys.stdout.write('\r{}{}\r'.format((' ' * TERM_WIDTH()), ANSI['reset']))
# sys.stdout.flush()
except (KeyboardInterrupt, BrokenPipeError):
print()
def log_cli_command(subcommand: str, subcommand_args: List[str], stdin: Optional[str], pwd: str):
cmd = ' '.join(('archivebox', subcommand, *subcommand_args))
stderr('{black}[i] [{now}] ArchiveBox v{VERSION}: {cmd}{reset}'.format(
now=datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
VERSION=VERSION,
cmd=cmd,
**ANSI,
))
stderr('{black} > {pwd}{reset}'.format(pwd=pwd, **ANSI))
stderr()
### Parsing Stage
def log_importing_started(urls: Union[str, List[str]], depth: int, index_only: bool):
_LAST_RUN_STATS.parse_start_ts = datetime.now(timezone.utc)
print('{green}[+] [{}] Adding {} links to index (crawl depth={}){}...{reset}'.format(
_LAST_RUN_STATS.parse_start_ts.strftime('%Y-%m-%d %H:%M:%S'),
len(urls) if isinstance(urls, list) else len(urls.split('\n')),
depth,
' (index only)' if index_only else '',
**ANSI,
))
def log_source_saved(source_file: str):
print(' > Saved verbatim input to {}/{}'.format(SOURCES_DIR_NAME, source_file.rsplit('/', 1)[-1]))
def log_parsing_finished(num_parsed: int, parser_name: str):
_LAST_RUN_STATS.parse_end_ts = datetime.now(timezone.utc)
print(' > Parsed {} URLs from input ({})'.format(num_parsed, parser_name))
def log_deduping_finished(num_new_links: int):
print(' > Found {} new URLs not already in index'.format(num_new_links))
def log_crawl_started(new_links):
print()
print('{green}[*] Starting crawl of {} sites 1 hop out from starting point{reset}'.format(len(new_links), **ANSI))
### Indexing Stage
def log_indexing_process_started(num_links: int):
start_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.index_start_ts = start_ts
print()
print('{black}[*] [{}] Writing {} links to main index...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
**ANSI,
))
def log_indexing_process_finished():
end_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.index_end_ts = end_ts
def log_indexing_started(out_path: str):
if IS_TTY:
sys.stdout.write(f' > ./{Path(out_path).relative_to(OUTPUT_DIR)}')
def log_indexing_finished(out_path: str):
print(f'\r √ ./{Path(out_path).relative_to(OUTPUT_DIR)}')
### Archiving Stage
def log_archiving_started(num_links: int, resume: Optional[float]=None):
start_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.archiving_start_ts = start_ts
print()
if resume:
print('{green}[▶] [{}] Resuming archive updating for {} pages starting from {}...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
resume,
**ANSI,
))
else:
print('{green}[▶] [{}] Starting archiving of {} snapshots in index...{reset}'.format(
start_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
**ANSI,
))
def log_archiving_paused(num_links: int, idx: int, timestamp: str):
end_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.archiving_end_ts = end_ts
print()
print('\n{lightyellow}[X] [{now}] Downloading paused on link {timestamp} ({idx}/{total}){reset}'.format(
**ANSI,
now=end_ts.strftime('%Y-%m-%d %H:%M:%S'),
idx=idx+1,
timestamp=timestamp,
total=num_links,
))
print()
print(' Continue archiving where you left off by running:')
print(' archivebox update --resume={}'.format(timestamp))
def log_archiving_finished(num_links: int):
from core.models import Snapshot
end_ts = datetime.now(timezone.utc)
_LAST_RUN_STATS.archiving_end_ts = end_ts
assert _LAST_RUN_STATS.archiving_start_ts is not None
seconds = end_ts.timestamp() - _LAST_RUN_STATS.archiving_start_ts.timestamp()
if seconds > 60:
duration = '{0:.2f} min'.format(seconds / 60)
else:
duration = '{0:.2f} sec'.format(seconds)
print()
print('{}[√] [{}] Update of {} pages complete ({}){}'.format(
ANSI['green'],
end_ts.strftime('%Y-%m-%d %H:%M:%S'),
num_links,
duration,
ANSI['reset'],
))
print(' - {} links skipped'.format(_LAST_RUN_STATS.skipped))
print(' - {} links updated'.format(_LAST_RUN_STATS.succeeded + _LAST_RUN_STATS.failed))
print(' - {} links had errors'.format(_LAST_RUN_STATS.failed))
if Snapshot.objects.count() < 50:
print()
print(' {lightred}Hint:{reset} To manage your archive in a Web UI, run:'.format(**ANSI))
print(' archivebox server 0.0.0.0:8000')
def log_link_archiving_started(link: "Link", link_dir: str, is_new: bool):
# [*] [2019-03-22 13:46:45] "Log Structured Merge Trees - ben stopford"
# http://www.benstopford.com/2015/02/14/log-structured-merge-trees/
# > output/archive/1478739709
print('\n[{symbol_color}{symbol}{reset}] [{symbol_color}{now}{reset}] "{title}"'.format(
symbol_color=ANSI['green' if is_new else 'black'],
symbol='+' if is_new else '√',
now=datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
title=link.title or link.base_url,
**ANSI,
))
print(' {blue}{url}{reset}'.format(url=link.url, **ANSI))
print(' {} {}'.format(
'>' if is_new else '√',
pretty_path(link_dir),
))
def log_link_archiving_finished(link: "Link", link_dir: str, is_new: bool, stats: dict, start_ts: datetime):
total = sum(stats.values())
if stats['failed'] > 0 :
_LAST_RUN_STATS.failed += 1
elif stats['skipped'] == total:
_LAST_RUN_STATS.skipped += 1
else:
_LAST_RUN_STATS.succeeded += 1
size = get_dir_size(link_dir)
end_ts = datetime.now(timezone.utc)
duration = str(end_ts - start_ts).split('.')[0]
print(' {black}{} files ({}) in {}s {reset}'.format(size[2], printable_filesize(size[0]), duration, **ANSI))
def log_archive_method_started(method: str):
print(' > {}'.format(method))
def log_archive_method_finished(result: "ArchiveResult"):
"""quote the argument with whitespace in a command so the user can
copy-paste the outputted string directly to run the cmd
"""
# Prettify CMD string and make it safe to copy-paste by quoting arguments
quoted_cmd = ' '.join(
'"{}"'.format(arg) if ' ' in arg else arg
for arg in result.cmd
)
if result.status == 'failed':
if result.output.__class__.__name__ == 'TimeoutExpired':
duration = (result.end_ts - result.start_ts).seconds
hint_header = [
'{lightyellow}Extractor timed out after {}s.{reset}'.format(duration, **ANSI),
]
else:
hint_header = [
'{lightyellow}Extractor failed:{reset}'.format(**ANSI),
' {reset}{} {red}{}{reset}'.format(
result.output.__class__.__name__.replace('ArchiveError', ''),
result.output,
**ANSI,
),
]
# Prettify error output hints string and limit to five lines
hints = getattr(result.output, 'hints', None) or ()
if hints:
if isinstance(hints, (list, tuple, type(_ for _ in ()))):
hints = [hint.decode() for hint in hints if isinstance(hint, bytes)]
else:
if isinstance(hints, bytes):
hints = hints.decode()
hints = hints.split('\n')
hints = (
' {}{}{}'.format(ANSI['lightyellow'], line.strip(), ANSI['reset'])
for line in hints[:5] if line.strip()
)
# Collect and prefix output lines with indentation
output_lines = [
*hint_header,
*hints,
'{}Run to see full output:{}'.format(ANSI['lightred'], ANSI['reset']),
*([' cd {};'.format(result.pwd)] if result.pwd else []),
' {}'.format(quoted_cmd),
]
print('\n'.join(
' {}'.format(line)
for line in output_lines
if line
))
print()
def log_list_started(filter_patterns: Optional[List[str]], filter_type: str):
print('{green}[*] Finding links in the archive index matching these {} patterns:{reset}'.format(
filter_type,
**ANSI,
))
print(' {}'.format(' '.join(filter_patterns or ())))
def log_list_finished(links):
from .index.csv import links_to_csv
print()
print('---------------------------------------------------------------------------------------------------')
print(links_to_csv(links, cols=['timestamp', 'is_archived', 'num_outputs', 'url'], header=True, ljust=16, separator=' | '))
print('---------------------------------------------------------------------------------------------------')
print()
def log_removal_started(links: List["Link"], yes: bool, delete: bool):
print('{lightyellow}[i] Found {} matching URLs to remove.{reset}'.format(len(links), **ANSI))
if delete:
file_counts = [link.num_outputs for link in links if Path(link.link_dir).exists()]
print(
f' {len(links)} Links will be de-listed from the main index, and their archived content folders will be deleted from disk.\n'
f' ({len(file_counts)} data folders with {sum(file_counts)} archived files will be deleted!)'
)
else:
print(
' Matching links will be de-listed from the main index, but their archived content folders will remain in place on disk.\n'
' (Pass --delete if you also want to permanently delete the data folders)'
)
if not yes:
print()
print('{lightyellow}[?] Do you want to proceed with removing these {} links?{reset}'.format(len(links), **ANSI))
try:
assert input(' y/[n]: ').lower() == 'y'
except (KeyboardInterrupt, EOFError, AssertionError):
raise SystemExit(0)
def log_removal_finished(all_links: int, to_remove: int):
if all_links == 0:
print()
print('{red}[X] No matching links found.{reset}'.format(**ANSI))
else:
print()
print('{red}[√] Removed {} out of {} links from the archive index.{reset}'.format(
to_remove,
all_links,
**ANSI,
))
print(' Index now contains {} links.'.format(all_links - to_remove))
def log_shell_welcome_msg():
from .cli import list_subcommands
print('{green}# ArchiveBox Imports{reset}'.format(**ANSI))
print('{green}from core.models import Snapshot, User{reset}'.format(**ANSI))
print('{green}from archivebox import *\n {}{reset}'.format("\n ".join(list_subcommands().keys()), **ANSI))
print()
print('[i] Welcome to the ArchiveBox Shell!')
print(' https://github.com/ArchiveBox/ArchiveBox/wiki/Usage#Shell-Usage')
print()
print(' {lightred}Hint:{reset} Example use:'.format(**ANSI))
print(' print(Snapshot.objects.filter(is_archived=True).count())')
print(' Snapshot.objects.get(url="https://example.com").as_json()')
print(' add("https://example.com/some/new/url")')
### Helpers
@enforce_types
def pretty_path(path: Union[Path, str]) -> str:
"""convert paths like .../ArchiveBox/archivebox/../output/abc into output/abc"""
pwd = Path('.').resolve()
# parent = os.path.abspath(os.path.join(pwd, os.path.pardir))
return str(path).replace(str(pwd) + '/', './')
@enforce_types
def printable_filesize(num_bytes: Union[int, float]) -> str:
for count in ['Bytes','KB','MB','GB']:
if num_bytes > -1024.0 and num_bytes < 1024.0:
return '%3.1f %s' % (num_bytes, count)
num_bytes /= 1024.0
return '%3.1f %s' % (num_bytes, 'TB')
@enforce_types
def printable_folders(folders: Dict[str, Optional["Link"]],
with_headers: bool=False) -> str:
return '\n'.join(
f'{folder} {link and link.url} "{link and link.title}"'
for folder, link in folders.items()
)
@enforce_types
def printable_config(config: ConfigDict, prefix: str='') -> str:
return f'\n{prefix}'.join(
f'{key}={val}'
for key, val in config.items()
if not (isinstance(val, dict) or callable(val))
)
@enforce_types
def printable_folder_status(name: str, folder: Dict) -> str:
if folder['enabled']:
if folder['is_valid']:
color, symbol, note, num_files = 'green', '√', 'valid', ''
else:
color, symbol, note, num_files = 'red', 'X', 'invalid', '?'
else:
color, symbol, note, num_files = 'lightyellow', '-', 'disabled', '-'
if folder['path']:
if Path(folder['path']).exists():
num_files = (
f'{len(os.listdir(folder["path"]))} files'
if Path(folder['path']).is_dir() else
printable_filesize(Path(folder['path']).stat().st_size)
)
else:
num_files = 'missing'
if folder.get('is_mount'):
# add symbol @ next to filecount if path is a remote filesystem mount
num_files = f'{num_files} @' if num_files else '@'
path = str(folder['path']).replace(str(OUTPUT_DIR), '.') if folder['path'] else ''
if path and ' ' in path:
path = f'"{path}"'
# if path is just a plain dot, replace it back with the full path for clarity
if path == '.':
path = str(OUTPUT_DIR)
return ' '.join((
ANSI[color],
symbol,
ANSI['reset'],
name.ljust(21),
num_files.ljust(14),
ANSI[color],
note.ljust(8),
ANSI['reset'],
path.ljust(76),
))
@enforce_types
def printable_dependency_version(name: str, dependency: Dict) -> str:
version = None
if dependency['enabled']:
if dependency['is_valid']:
color, symbol, note, version = 'green', '√', 'valid', ''
parsed_version_num = re.search(r'[\d\.]+', dependency['version'])
if parsed_version_num:
version = f'v{parsed_version_num[0]}'
if not version:
color, symbol, note, version = 'red', 'X', 'invalid', '?'
else:
color, symbol, note, version = 'lightyellow', '-', 'disabled', '-'
path = str(dependency["path"]).replace(str(OUTPUT_DIR), '.') if dependency["path"] else ''
if path and ' ' in path:
path = f'"{path}"'
return ' '.join((
ANSI[color],
symbol,
ANSI['reset'],
name.ljust(21),
version.ljust(14),
ANSI[color],
note.ljust(8),
ANSI['reset'],
path.ljust(76),
))
|
idf_monitor.py
|
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make flash" (Ctrl-T Ctrl-F)
# - Run "make app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.0"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix':
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
import fcntl, termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except:
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace(b"\n", b"\r\n"),
"CR": lambda c: c.replace(b"\n", b"\r"),
"LF": lambda c: c.replace(b"\r", b"\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._read_line = b""
self._gdb_buffer = b""
self._output_enabled = True
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
except:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
def handle_serial_input(self, data):
# this may need to be made more efficient, as it pushes out a byte
# at a time to the console
for b in data:
if self._output_enabled:
self.console.write_bytes(b)
if b == b'\n': # end of line
self.handle_serial_input_line(self._read_line.strip())
self._read_line = b""
else:
self._read_line += b
self.check_gdbstub_trigger(b)
def handle_serial_input_line(self, line):
for m in re.finditer(MATCH_PCADDR, line):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [ CTRL_H, 'h', 'H', '?' ]:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
self.output_enable(True)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
elif c == CTRL_Y: # Toggle output display
self.output_toggle()
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {make:7} Run 'make flash' to build & flash
--- {appmake:7} Run 'make app-flash to build & flash app
--- {output:7} Toggle output display
--- {pause:7} Reset target into bootloader to pause app via RTS line
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
make=key_description(CTRL_F),
appmake=key_description(CTRL_A),
output=key_description(CTRL_Y),
pause=key_description(CTRL_P),
)
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to run 'make flash'.
--- Press {} to run 'make app-flash'.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [ CTRL_F, CTRL_A ]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
yellow_print("Running make %s..." % target)
p = subprocess.Popen([self.make,
target ])
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
translation = subprocess.check_output(
["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr],
cwd=".")
if not "?? ??:0" in translation:
yellow_print(translation)
def check_gdbstub_trigger(self, c):
self._gdb_buffer = self._gdb_buffer[-6:] + c # keep the last 7 characters seen
m = re.match(b"\\$(T..)#(..)", self._gdb_buffer) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(p) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
process = subprocess.Popen(["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file], cwd=".")
process.wait()
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [ 0, 4, 2, 6, 1, 5, 3, 7 ]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output):
self.output = output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly fails
# (but usually succeeds afterwards, it seems.)
# Ref https://github.com/espressif/esp-idf/issues/1136
for tries in range(3):
try:
self.output.write(data)
return
except IOError:
pass
def write(self, data):
for b in data:
l = len(self.matched)
if b == '\033': # ESC
self.matched = b
elif (l == 1 and b == '[') or (1 < l < 7):
self.matched += b
if self.matched == ANSI_NORMAL: # reset console
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
|
process_connection.py
|
from multiprocessing.connection import Listener, Client
from multiprocessing import Process
from array import array
def server():
address = ('localhost', 6000) # family is deduced to be 'AF_INET'
with Listener(address, authkey=b'secret password') as listener:
with listener.accept() as conn:
print('connection accepted from', listener.last_accepted)
conn.send([2.25, None, 'junk', float])
conn.send_bytes(b'hello')
conn.send_bytes(array('i', [42, 1729]))
def client():
address = ('localhost', 6000)
with Client(address, authkey=b'secret password') as conn:
print(conn.recv()) # => [2.25, None, 'junk', float]
print(conn.recv_bytes()) # => b'hello'
arr = array('i', [0, 0, 0, 0, 0])
print(conn.recv_bytes_into(arr)) # => 8
print(arr) # => array('i', [42, 1729, 0, 0, 0])
if __name__ == "__main__":
my_server = Process(target=server)
my_client = Process(target=client)
my_server.start()
my_client.start()
|
sentence_extractor.py
|
import argparse
import json
import neuralcoref
import wikipedia
import spacy
import sys
import time
import random
import re
from os.path import exists
from multiprocessing import Lock
from SPARQLWrapper import SPARQLWrapper, JSON
from queue import Queue
from threading import Thread
from distant_supervision.es_client import ElasticClient
from distant_supervision.ds_utils import DistantSupervisionUtils
# setup the libraries
# Please replace this with a private DBpedia endpoint
DBPEDIA_SPARQL = 'https://dbpedia.org/sparql'
DBP_RES_PREFIX = 'http://dbpedia.org/resource/'
number_regex = '\d+\.*\d*'
nlp_coref = spacy.load('en')
neuralcoref.add_to_pipe(nlp_coref)
nlp_sent = spacy.load('en')
nlp_sent.add_pipe(nlp_sent.create_pipe('sentencizer'))
nlp = spacy.load("en")
wikipedia.set_lang("en")
possessive_pronouns = ['my', 'our', 'your', 'his', 'her', 'its', 'their']
wiki_page_cache = dict()
# queues and locks for parallel processing
relation_queue = Queue()
relation_queue_lock, wiki_page_cache_lock, output_lock = Lock(), Lock(), Lock()
num_of_threads, completed_count = 40, 0
es_client = ElasticClient()
rel_contexts = {rel.strip().split('\t')[0]: rel.strip().split('\t')[1:] for rel in open('../data/dbpedia/expanded_terms.tsv')}
for rel in rel_contexts:
terms = rel_contexts[rel]
rel_contexts[rel] = [term.lower().replace('_', ' ') for term in terms]
print('{} rel contexts loaded '.format(len(rel_contexts)))
def get_antecedent(token):
if type(token) != spacy.tokens.token.Token:
return
if not token._.in_coref or token.text.lower() in possessive_pronouns:
return
token_start = token.idx
token_end = token.idx + len(token.text)
for cluster in token._.coref_clusters:
if cluster.main.text == token.text:
return
for mention in cluster.mentions:
if token_start == mention.start_char and token_end == mention.end_char:
return cluster.main.text
def resolve_corefences(sentence):
doc = nlp_coref(sentence)
new_string = ''
for token in doc:
res = get_antecedent(token)
if res:
new_string += res
else:
new_string += token.text
if token.whitespace_:
new_string += token.whitespace_
return new_string
def get_relation_triples(relation, limit=1000, thread_id=0):
answer_records = list()
sparql = SPARQLWrapper(DBPEDIA_SPARQL)
sparql.setQuery(" PREFIX dbo: <http://dbpedia.org/ontology/> " +
" PREFIX dbp: <http://dbpedia.org/property/> " +
" PREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#> " +
" SELECT DISTINCT ?subject ?pageID ?object ?subjectLabel ?objectLabel WHERE { "
" SELECT ?subject ?pageID ?object ?subjectLabel ?objectLabel ?subjectDegree { " +
" ?subject <" + relation + "> ?object . " +
" ?subject dbo:wikiPageID ?pageID . " +
" ?subject rdfs:label ?subjectLabel . " +
" OPTIONAL { ?object rdfs:label ?objectLabel } " +
" ?subject dbo:wikiPageLength ?pageLength . } ORDER BY DESC(?pageLength)}" +
" LIMIT " + str(limit) + " ")
print(sparql.queryString)
sparql.setReturnFormat(JSON)
try:
results = sparql.query().convert()
for result in results["results"]["bindings"]:
subject_label, object_label = None, None
subject = result['subject']['value']
subject_label = result['subjectLabel']['value']
pageID = result['pageID']['value']
if result['object']['type'] == 'typed-literal' and result['object']['datatype'] == 'http://www.w3.org/2001/XMLSchema#date':
object_type = 'date'
object_value = result['object']['value']
elif result['object']['type'] == 'typed-literal' and result['object']['datatype'] == 'http://www.w3.org/2001/XMLSchema#gYear':
object_type = 'year'
object_value = str(result['object']['value']).lstrip("0")
elif result['object']['type'] == 'typed-literal' and re.match(number_regex, result['object']['value']):
object_type = 'number'
object_value = result['object']['value']
elif result['object']['type'] == 'uri':
object_type = 'uri'
object_value = result['object']['value']
else:
object_type = 'other'
object_value = result['object']['value']
if 'objectLabel' in result:
object_label = result['objectLabel']['value']
answer_records.append([subject, pageID, object_value, object_type, subject_label, object_label])
print("t{}: {} - found {} triples!".format(thread_id, relation, len(answer_records)))
except Exception as ex:
print("SPARQL error: {}".format(ex))
return answer_records
def get_page(page_id):
with wiki_page_cache_lock:
if page_id in wiki_page_cache:
return wiki_page_cache[page_id]
try:
page = wikipedia.page(page_id)
except Exception:
try:
page = wikipedia.page(page_id.replace('_', ' '))
except Exception as ex:
print(str(ex))
return list()
paragraphs = page.content.split('\n')
sentences = list()
index = -1
for paragraph in paragraphs:
paragraph = paragraph.strip()
if paragraph == '' or paragraph.startswith('='):
continue
else:
res_para = resolve_corefences(re.sub('\(.*\)', '', paragraph))
doc = nlp_sent(res_para)
for sent in doc.sents:
sentences.append(sent.text)
index += 1
with wiki_page_cache_lock:
wiki_page_cache[page_id] = sentences
return sentences
def get_relation_sentences(relation, relation_triples, limit=1000, thread_id=0):
index = 0
relation_instances = list()
for subject_uri, page_id, object_value, object_type, subject_label, object_label in relation_triples:
try:
if not subject_uri.startswith(DBP_RES_PREFIX):
continue
index += 1
print('t{}: checking:\n\t{}\n\t{}\n\t{}\n\t{}'.format(thread_id, subject_uri, relation, object_value,
subject_uri.replace('http://dbpedia.org/resource/',
'https://en.wikipedia.org/wiki/')))
subj_alt_labels = DistantSupervisionUtils.get_link_text(sparql_endpoint=DBPEDIA_SPARQL,
dbpedia_uri=subject_uri)
subj_alt_labels_scores = CorpusGenUtils.sort_by_similarity(subject_label, subj_alt_labels)
subj_alt_labels = [term[0] for term in subj_alt_labels_scores]
if object_type == 'uri':
if not object_label:
object_label = object_value.replace(DBP_RES_PREFIX, '').replace('_', ' ')
obj_alt_labels = DistantSupervisionUtils.get_link_text(sparql_endpoint=DBPEDIA_SPARQL,
dbpedia_uri=object_value)
obj_alt_labels_scores = DistantSupervisionUtils.sort_by_similarity(object_label, obj_alt_labels)
obj_alt_labels = [term[0] for term in obj_alt_labels_scores]
elif object_type == 'number':
obj_alt_labels = DistantSupervisionUtils.get_all_number_variants(object_value)
elif object_type == 'date':
obj_alt_labels = DistantSupervisionUtils.get_all_date_variants(object_value)
else:
obj_alt_labels = [object_value]
subj_sentences = es_client.query_flexible(page_id, subj_alt_labels)
obj_sentences = es_client.query_flexible(page_id, obj_alt_labels)
final_sentences = ElasticClient.get_best_matching_setence(subj_sentences, obj_sentences, subj_alt_labels,
obj_alt_labels)
if final_sentences:
sentence = final_sentences[0][3]
print('\t {}'.format(sentence))
sent_id = final_sentences[0][5]
spacy_doc = nlp(sentence)
tokenized_sentence = [token.text.lower() for token in spacy_doc]
subject_label = final_sentences[0][1]
object_label = final_sentences[0][2]
num_verbs = 0
for token in spacy_doc:
if token.pos_ == 'VERB':
num_verbs += 1
if num_verbs == 0 or num_verbs > 4:
continue
if len(tokenized_sentence) > 50:
continue
if not sentence.endswith('.'):
continue
subject_tokens = [token.text.lower() for token in nlp(subject_label[0])]
object_tokens = [token.text.lower() for token in nlp(object_label[0])]
try:
subj_start, subj_end = tokenized_sentence.index(subject_tokens[0]), \
tokenized_sentence.index(subject_tokens[-1]) + 1
obj_start, obj_end = tokenized_sentence.index(object_tokens[0]), \
tokenized_sentence.index(object_tokens[-1]) + 1
# check if subject nd object overlap and ignore those cases
if obj_start >= subj_start and obj_end <= subj_end:
continue
elif subj_start >= obj_start and subj_end <= obj_end:
continue
# check for incorrect cases where it accidentally find random start and end tokens
if obj_end < obj_start or subj_end < subj_end:
continue
relation_instances.append((' '.join(tokenized_sentence[subj_start:subj_end]), subj_start, subj_end,
' '.join(tokenized_sentence[obj_start:obj_end]), obj_start, obj_end,
subject_label, object_label, object_value, object_type,
tokenized_sentence, sentence, sent_id))
print("t{}: {} ({}) - ({}/{})".format(thread_id, relation, len(relation_triples),
len(relation_instances), index))
except ValueError:
continue
if len(relation_instances) >= limit:
return relation_instances
except Exception as ex:
print("Error {}".format(str(ex)))
return relation_instances
def relation_sent_extractor_worker(thread_id, sparql_limit, sentence_limit):
global completed_count
while True:
try:
with relation_queue_lock:
if relation_queue.qsize() == 0:
break
relation = relation_queue.get()
start_time = time.time()
print('t{}: starting {}'.format(thread_id, relation))
relation_triples = get_relation_triples(relation, sparql_limit, thread_id)
relation_instances = get_relation_sentences(relation, relation_triples, sentence_limit, thread_id)
instances = list()
for inst in relation_instances:
token = inst[6]
h = dict(name=inst[0], id=inst[0].replace(' ', '_'), pos=[inst[1], inst[2]])
t = dict(name=inst[3], id=inst[3].replace(' ', '_'), pos=[inst[4], inst[5]])
instances.append(dict(token=token, h=h, t=t, relation=relation.replace('http://dbpedia.org/ontology/', 'dbo:')))
with open('{}.txt'.format(relation.replace('http://dbpedia.org/ontology/', 'dbo_').replace('/', '_')), 'w') as relation_file:
for inst in instances:
relation_file.write(json.dumps(inst))
relation_file.write('\n')
print("t{} - COMPLETED {}, took {:.2f} minutes.".format(thread_id, relation, (time.time()-start_time)/60))
except Exception as ex:
print("t{}\tError occurred! {}".format(thread_id, str(ex)))
with relation_queue_lock:
completed_count += 1
def original_sent_extractor_worker(thread_id, sparql_limit, sentence_limit):
global completed_count
while True:
try:
with relation_queue_lock:
if relation_queue.qsize() == 0:
break
relation = relation_queue.get()
file_name = '{}.txt'.format(relation.replace('http://dbpedia.org/ontology/', 'dbo_')
.replace('http://dbpedia.org/property/', 'dbp_')
.replace('/', '_'))
output_file = '/Users/nandana.sampath.mihindukulasooriya@ibm.com/Src/relation-linking/data/lc-qald/sent/' \
+ file_name
if exists(output_file):
continue
if exists("/Users/nandana.sampath.mihindukulasooriya@ibm.com/Src/relation-linking/data/lc-qald/sent_completed/v3/" + file_name):
continue
if exists("/Users/nandana.sampath.mihindukulasooriya@ibm.com/Src/relation-linking/data/lc-qald/sent_amr_done/v3/" + file_name):
continue
start_time = time.time()
print('t{}: starting {}'.format(thread_id, relation))
relation_triples = get_relation_triples(relation, sparql_limit, thread_id)
relation_instances = get_relation_sentences(relation, relation_triples, sentence_limit, thread_id)
output_list = list()
for relation_instance in relation_instances:
rel_ins_data = dict()
rel_ins_data['id'] = relation_instance[12]
rel_ins_data['text'] = relation_instance[11]
rel_ins_data['relation'] = relation
rel_ins_data['subject'] = relation_instance[0]
rel_ins_data['subject_labels'] = relation_instance[6]
rel_ins_data['object'] = relation_instance[8]
rel_ins_data['object_type'] = relation_instance[9]
rel_ins_data['object_labels'] = relation_instance[7]
output_list.append(rel_ins_data)
with open(output_file, 'w', encoding='utf-8') as output_file:
json.dump(output_list, output_file, indent=2)
print("t{} - COMPLETED {}, took {:.2f} minutes.".format(thread_id, relation, (time.time() - start_time) / 60))
except Exception as ex:
print("t{}\tError occurred! {}".format(thread_id, str(ex)))
with relation_queue_lock:
completed_count += 1
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--rel_file')
parser.add_argument('--output_dir')
args = parser.parse_args()
print('reading relations from "{}"'.format(args.rel_file))
# adding all relations in the rel file to a queue to be used by relation sentence extractor workers
file_list = [rel.strip().split('\t')[0] for rel in open(args.rel_file)]
random.shuffle(file_list)
for file in file_list:
relation_queue.put(file)
print('\t {} relations found.'.format(relation_queue.qsize()))
# limits for the number of SPARQL results (triples) and the number of sentences per relation. We can't find
# sentences for some triples, for that, the limit for the triples are higher than the sentences
sparql_limit = 80000
sentence_limit = 1000
# start all workers and wait for completion
for i in range(num_of_threads):
print('starting thread: {}'.format(i))
worker = Thread(target=original_sent_extractor_worker, args=[i, sparql_limit, sentence_limit])
worker.setDaemon(True)
worker.start()
time.sleep(60)
while completed_count < num_of_threads:
time.sleep(10)
if __name__ == "__main__":
sys.exit(main())
|
HiyaCFW_Helper.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# HiyaCFW Helper
# Version 3.4.1
# Author: mondul <mondul@huyzona.com>
from tkinter import (Tk, Frame, LabelFrame, PhotoImage, Button, Entry, Checkbutton, Radiobutton,
Label, Toplevel, Scrollbar, Text, StringVar, IntVar, RIGHT, W, X, Y, DISABLED, NORMAL, SUNKEN,
END)
from tkinter.messagebox import askokcancel, showerror, showinfo, WARNING
from tkinter.filedialog import askopenfilename, askdirectory
from platform import system
from os import path, remove, chmod, listdir
from sys import exit
from threading import Thread
from queue import Queue, Empty
from hashlib import sha1
from urllib.request import urlopen
from urllib.error import URLError
from json import loads as jsonify
from subprocess import Popen
from struct import unpack_from
from shutil import rmtree, copyfile, copyfileobj
from distutils.dir_util import copy_tree, _path_created
####################################################################################################
# Thread-safe text class
class ThreadSafeText(Text):
def __init__(self, master, **options):
Text.__init__(self, master, **options)
self.queue = Queue()
self.update_me()
def write(self, line):
self.queue.put(line)
def update_me(self):
try:
while 1:
self.insert(END, str(self.queue.get_nowait()) + '\n')
self.see(END)
self.update_idletasks()
except Empty:
pass
self.after(500, self.update_me)
####################################################################################################
# Main application class
class Application(Frame):
def __init__(self, master=None):
super().__init__(master)
self.pack()
# First row
f1 = LabelFrame(self, text='NAND file with No$GBA footer', padx=10, pady=10)
# NAND Button
self.nand_mode = False
nand_icon = PhotoImage(data=('R0lGODlhEAAQAIMAAAAAADMzM2ZmZpmZmczMzP///wAAAAAAAAA'
'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAACH5BAMAAAYALAAAAAAQAB'
'AAAARG0MhJaxU4Y2sECAEgikE1CAFRhGMwSMJwBsU6frIgnR/bv'
'hTPrWUSDnGw3JGU2xmHrsvyU5xGO8ql6+S0AifPW8kCKpcpEQA7'))
self.nand_button = Button(f1, image=nand_icon, command=self.change_mode, state=DISABLED)
self.nand_button.image = nand_icon
self.nand_button.pack(side='left')
self.nand_file = StringVar()
Entry(f1, textvariable=self.nand_file, state='readonly', width=40).pack(side='left')
Button(f1, text='...', command=self.choose_nand).pack(side='left')
f1.pack(padx=10, pady=10, fill=X)
# Second row
f2 = Frame(self)
# Check box
self.twilight = IntVar()
self.twilight.set(1)
self.chk = Checkbutton(f2, text='Install latest TWiLight Menu++ on custom firmware',
variable=self.twilight)
self.chk.pack(padx=10, anchor=W)
# NAND operation frame
self.nand_frame = LabelFrame(f2, text='NAND operation', padx=10, pady=10)
self.nand_operation = IntVar()
self.nand_operation.set(0)
Radiobutton(self.nand_frame, text='Remove No$GBA footer', variable=self.nand_operation,
value=0, command=lambda: self.enable_entries(False)).pack(anchor=W)
Radiobutton(self.nand_frame, text='Add No$GBA footer', variable=self.nand_operation,
value=1, command=lambda: self.enable_entries(True)).pack(anchor=W)
fl = Frame(self.nand_frame)
self.cid_label = Label(fl, text='eMMC CID', state=DISABLED)
self.cid_label.pack(anchor=W, padx=(24, 0))
self.cid = StringVar()
self.cid_entry = Entry(fl, textvariable=self.cid, width=20, state=DISABLED)
self.cid_entry.pack(anchor=W, padx=(24, 0))
fl.pack(side='left')
fr = Frame(self.nand_frame)
self.console_id_label = Label(fr, text='Console ID', state=DISABLED)
self.console_id_label.pack(anchor=W)
self.console_id = StringVar()
self.console_id_entry = Entry(fr, textvariable=self.console_id, width=20, state=DISABLED)
self.console_id_entry.pack(anchor=W)
fr.pack(side='right')
f2.pack(fill=X)
# Third row
f3 = Frame(self)
self.start_button = Button(f3, text='Start', width=16, command=self.hiya, state=DISABLED)
self.start_button.pack(side='left', padx=(0, 5))
Button(f3, text='Quit', command=root.destroy, width=16).pack(side='left', padx=(5, 0))
f3.pack(pady=(10, 20))
self.folders = []
self.files = []
################################################################################################
def change_mode(self):
if (self.nand_mode):
self.nand_frame.pack_forget()
self.chk.pack(padx=10, anchor=W)
self.nand_mode = False
else:
if askokcancel('Warning', ('You are about to enter NAND mode. Do it only if you know '
'what you are doing. Proceed?'), icon=WARNING):
self.chk.pack_forget()
self.nand_frame.pack(padx=10, pady=(0, 10), fill=X)
self.nand_mode = True
################################################################################################
def enable_entries(self, status):
self.cid_label['state'] = (NORMAL if status else DISABLED)
self.cid_entry['state'] = (NORMAL if status else DISABLED)
self.console_id_label['state'] = (NORMAL if status else DISABLED)
self.console_id_entry['state'] = (NORMAL if status else DISABLED)
################################################################################################
def choose_nand(self):
name = askopenfilename(filetypes=( ( 'nand.bin', '*.bin' ), ( 'DSi-1.mmc', '*.mmc' ) ))
self.nand_file.set(name)
self.nand_button['state'] = (NORMAL if name != '' else DISABLED)
self.start_button['state'] = (NORMAL if name != '' else DISABLED)
################################################################################################
def hiya(self):
if not self.nand_mode:
showinfo('Info', 'Now you will be asked to choose the SD card path that will be used '
'for installing the custom firmware (or an output folder).\n\nIn order to avoid '
'boot errors please assure it is empty before continuing.')
self.sd_path = askdirectory()
# Exit if no path was selected
if self.sd_path == '':
return
# If adding a No$GBA footer, check if CID and ConsoleID values are OK
elif self.nand_operation.get() == 1:
cid = self.cid.get()
console_id = self.console_id.get()
# Check lengths
if len(cid) != 32:
showerror('Error', 'Bad eMMC CID')
return
elif len(console_id) != 16:
showerror('Error', 'Bad Console ID')
return
# Parse strings to hex
try:
cid = bytearray.fromhex(cid)
except ValueError:
showerror('Error', 'Bad eMMC CID')
return
try:
console_id = bytearray(reversed(bytearray.fromhex(console_id)))
except ValueError:
showerror('Error', 'Bad Console ID')
return
dialog = Toplevel(self)
# Open as dialog (parent disabled)
dialog.grab_set()
dialog.title('Status')
# Disable maximizing
dialog.resizable(0, 0)
frame = Frame(dialog, bd=2, relief=SUNKEN)
scrollbar = Scrollbar(frame)
scrollbar.pack(side=RIGHT, fill=Y)
self.log = ThreadSafeText(frame, bd=0, width=52, height=20,
yscrollcommand=scrollbar.set)
self.log.pack()
scrollbar.config(command=self.log.yview)
frame.pack()
Button(dialog, text='Close', command=dialog.destroy, width=16).pack(pady=10)
# Center in window
dialog.update_idletasks()
width = dialog.winfo_width()
height = dialog.winfo_height()
dialog.geometry('%dx%d+%d+%d' % (width, height, root.winfo_x() + (root.winfo_width() / 2) -
(width / 2), root.winfo_y() + (root.winfo_height() / 2) - (height / 2)))
# Check if we'll be adding a No$GBA footer
if self.nand_mode and self.nand_operation.get() == 1:
Thread(target=self.add_footer, args=(cid, console_id)).start()
else:
Thread(target=self.check_nand).start()
################################################################################################
def check_nand(self):
self.log.write('Checking NAND file...')
# Read the NAND file
try:
with open(self.nand_file.get(), 'rb') as f:
# Go to the No$GBA footer offset
f.seek(-64, 2)
# Read the footer's header :-)
bstr = f.read(0x10)
if bstr == b'DSi eMMC CID/CPU':
# Read the CID
bstr = f.read(0x10)
self.cid.set(bstr.hex().upper())
self.log.write('- eMMC CID: ' + self.cid.get())
# Read the console ID
bstr = f.read(8)
self.console_id.set(bytearray(reversed(bstr)).hex().upper())
self.log.write('- Console ID: ' + self.console_id.get())
# Check we are removing the No$GBA footer
if self.nand_mode:
Thread(target=self.remove_footer).start()
else:
Thread(target=self.get_latest_hiyacfw).start()
else:
self.log.write('ERROR: No$GBA footer not found')
except IOError as e:
print(e)
self.log.write('ERROR: Could not open the file ' +
path.basename(self.nand_file.get()))
################################################################################################
def get_latest_hiyacfw(self):
# Try to use already downloaded HiyaCFW archive
filename = 'HiyaCFW.7z'
try:
if path.isfile(filename):
self.log.write('\nPreparing HiyaCFW...')
else:
self.log.write('\nDownloading latest HiyaCFW release...')
conn = urlopen('https://api.github.com/repos/RocketRobz/hiyaCFW/releases/latest')
latest = jsonify(conn.read().decode('utf-8'))
conn.close()
with urlopen(latest['assets'][0]
['browser_download_url']) as src, open(filename, 'wb') as dst:
copyfileobj(src, dst)
self.log.write('- Extracting HiyaCFW archive...')
proc = Popen([ _7z, 'x', '-bso0', '-y', filename, 'for PC', 'for SDNAND SD card' ])
ret_val = proc.wait()
if ret_val == 0:
self.files.append(filename)
self.folders.append('for PC')
self.folders.append('for SDNAND SD card')
# Got to decrypt NAND if bootloader.nds is present
Thread(target=self.decrypt_nand if path.isfile('bootloader.nds')
else self.extract_bios).start()
else:
self.log.write('ERROR: Extractor failed')
except (URLError, IOError) as e:
print(e)
self.log.write('ERROR: Could not get HiyaCFW')
except OSError as e:
print(e)
self.log.write('ERROR: Could not execute ' + exe)
################################################################################################
def extract_bios(self):
self.log.write('\nExtracting ARM7/ARM9 BIOS from NAND...')
try:
proc = Popen([ twltool, 'boot2', '--in', self.nand_file.get() ])
ret_val = proc.wait()
if ret_val == 0:
# Hash arm7.bin
sha1_hash = sha1()
with open('arm7.bin', 'rb') as f:
sha1_hash.update(f.read())
self.log.write('- arm7.bin SHA1:\n ' +
sha1_hash.digest().hex().upper())
# Hash arm9.bin
sha1_hash = sha1()
with open('arm9.bin', 'rb') as f:
sha1_hash.update(f.read())
self.log.write('- arm9.bin SHA1:\n ' +
sha1_hash.digest().hex().upper())
self.files.append('arm7.bin')
self.files.append('arm9.bin')
Thread(target=self.patch_bios).start()
else:
self.log.write('ERROR: Extractor failed')
Thread(target=self.clean, args=(True,)).start()
except OSError as e:
print(e)
self.log.write('ERROR: Could not execute ' + exe)
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def patch_bios(self):
self.log.write('\nPatching ARM7/ARM9 BIOS...')
try:
self.patcher(path.join('for PC', 'bootloader files', 'bootloader arm7 patch.ips'),
'arm7.bin')
self.patcher(path.join('for PC', 'bootloader files', 'bootloader arm9 patch.ips'),
'arm9.bin')
# Hash arm7.bin
sha1_hash = sha1()
with open('arm7.bin', 'rb') as f:
sha1_hash.update(f.read())
self.log.write('- Patched arm7.bin SHA1:\n ' +
sha1_hash.digest().hex().upper())
# Hash arm9.bin
sha1_hash = sha1()
with open('arm9.bin', 'rb') as f:
sha1_hash.update(f.read())
self.log.write('- Patched arm9.bin SHA1:\n ' +
sha1_hash.digest().hex().upper())
Thread(target=self.arm9_prepend).start()
except IOError as e:
print(e)
self.log.write('ERROR: Could not patch BIOS')
Thread(target=self.clean, args=(True,)).start()
except Exception as e:
print(e)
self.log.write('ERROR: Invalid patch header')
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def arm9_prepend(self):
self.log.write('\nPrepending data to ARM9 BIOS...')
try:
with open('arm9.bin', 'rb') as f:
data = f.read()
with open('arm9.bin', 'wb') as f:
with open(path.join('for PC', 'bootloader files',
'bootloader arm9 append to start.bin'), 'rb') as pre:
f.write(pre.read())
f.write(data)
# Hash arm9.bin
sha1_hash = sha1()
with open('arm9.bin', 'rb') as f:
sha1_hash.update(f.read())
self.log.write('- Prepended arm9.bin SHA1:\n ' +
sha1_hash.digest().hex().upper())
Thread(target=self.make_bootloader).start()
except IOError as e:
print(e)
self.log.write('ERROR: Could not prepend data to ARM9 BIOS')
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def make_bootloader(self):
self.log.write('\nGenerating new bootloader...')
exe = (path.join('for PC', 'bootloader files', 'ndstool.exe') if sysname == 'Windows' else
path.join(sysname, 'ndsblc'))
try:
proc = Popen([ exe, '-c', 'bootloader.nds', '-9', 'arm9.bin', '-7', 'arm7.bin', '-t',
path.join('for PC', 'bootloader files', 'banner.bin'), '-h',
path.join('for PC', 'bootloader files', 'header.bin') ])
ret_val = proc.wait()
if ret_val == 0:
self.files.append('bootloader.nds')
# Hash bootloader.nds
sha1_hash = sha1()
with open('bootloader.nds', 'rb') as f:
sha1_hash.update(f.read())
self.log.write('- bootloader.nds SHA1:\n ' +
sha1_hash.digest().hex().upper())
Thread(target=self.decrypt_nand).start()
else:
self.log.write('ERROR: Generator failed')
Thread(target=self.clean, args=(True,)).start()
except OSError as e:
print(e)
self.log.write('ERROR: Could not execute ' + exe)
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def decrypt_nand(self):
self.log.write('\nDecrypting NAND...')
try:
proc = Popen([ twltool, 'nandcrypt', '--in', self.nand_file.get(), '--out',
self.console_id.get() + '.img' ])
ret_val = proc.wait()
print("\n")
if ret_val == 0:
self.files.append(self.console_id.get() + '.img')
Thread(target=self.win_extract_nand if sysname == 'Windows'
else self.extract_nand).start()
else:
self.log.write('ERROR: Decryptor failed')
Thread(target=self.clean, args=(True,)).start()
except OSError as e:
print(e)
self.log.write('ERROR: Could not execute ' + exe)
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def win_extract_nand(self):
self.log.write('\nExtracting files from NAND...')
try:
proc = Popen([ _7z, 'x', '-bso0', '-y', self.console_id.get() + '.img', '0.fat' ])
ret_val = proc.wait()
if ret_val == 0:
self.files.append('0.fat')
proc = Popen([ _7z, 'x', '-bso0', '-y', '-o' + self.sd_path, '0.fat' ])
ret_val = proc.wait()
if ret_val == 0:
Thread(target=self.get_launcher).start()
else:
self.log.write('ERROR: Extractor failed, please update 7-Zip')
Thread(target=self.clean, args=(True,)).start()
else:
self.log.write('ERROR: Extractor failed')
Thread(target=self.clean, args=(True,)).start()
except OSError as e:
print(e)
self.log.write('ERROR: Could not execute ' + exe)
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def extract_nand(self):
self.log.write('\nExtracting files from NAND...')
try:
# DSi first partition offset: 0010EE00h
proc = Popen([ fatcat, '-O', '1109504', '-x', self.sd_path,
self.console_id.get() + '.img' ])
ret_val = proc.wait()
if ret_val == 0:
Thread(target=self.get_launcher).start()
else:
self.log.write('ERROR: Extractor failed')
Thread(target=self.clean, args=(True,)).start()
except OSError as e:
print(e)
self.log.write('ERROR: Could not execute ' + exe)
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def get_launcher(self):
app = self.detect_region()
# Stop if no supported region was found
if not app:
Thread(target=self.clean, args=(True,)).start()
return
# Delete contents of the launcher folder as it will be replaced by the one from HiyaCFW
launcher_folder = path.join(self.sd_path, 'title', '00030017', app, 'content')
# Walk through all files in the launcher content folder
for file in listdir(launcher_folder):
file = path.join(launcher_folder, file)
# Set current file as read/write in case we are in Windows and unlaunch was installed
# in the NAND. For Linux and MacOS fatcat doesn't keep file attributes
if sysname == 'Windows':
chmod(file, 438)
# Delete current file
remove(file)
# Try to use already downloaded launcher
try:
if path.isfile(self.launcher_region):
self.log.write('\nPreparing ' + self.launcher_region + ' launcher...')
else:
self.log.write('\nDownloading ' + self.launcher_region + ' launcher...')
with urlopen('https://raw.githubusercontent.com'
'/mondul/HiyaCFW-Helper/master/launchers/' +
self.launcher_region) as src, open(self.launcher_region, 'wb') as dst:
copyfileobj(src, dst)
self.log.write('- Decrypting launcher...')
proc = Popen([ _7z, 'x', '-bso0', '-y', '-p' + app, self.launcher_region,
'00000002.app' ])
ret_val = proc.wait()
if ret_val == 0:
self.files.append(self.launcher_region)
self.files.append('00000002.app')
# Hash 00000002.app
sha1_hash = sha1()
with open('00000002.app', 'rb') as f:
sha1_hash.update(f.read())
self.log.write('- Patched launcher SHA1:\n ' +
sha1_hash.digest().hex().upper())
Thread(target=self.install_hiyacfw, args=(launcher_folder,)).start()
else:
self.log.write('ERROR: Extractor failed')
Thread(target=self.clean, args=(True,)).start()
except IOError as e:
print(e)
self.log.write('ERROR: Could not download ' + self.launcher_region + ' launcher')
Thread(target=self.clean, args=(True,)).start()
except OSError as e:
print(e)
self.log.write('ERROR: Could not execute ' + exe)
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def install_hiyacfw(self, launcher_folder):
self.log.write('\nCopying HiyaCFW files...')
# Reset copied files cache
_path_created.clear()
copy_tree('for SDNAND SD card', self.sd_path, update=1)
copyfile('bootloader.nds', path.join(self.sd_path, 'hiya', 'bootloader.nds'))
copyfile('00000002.app', path.join(launcher_folder, '00000002.app'))
Thread(target=self.get_latest_twilight if self.twilight.get() == 1 else self.clean).start()
################################################################################################
def get_latest_twilight(self):
filename = 'TWiLightMenu.7z'
try:
self.log.write('\nDownloading latest TWiLight Menu++ release...')
#conn = urlopen('https://api.github.com/repos/DS-Homebrew/TWiLightMenu/releases/'
# 'latest')
#latest = jsonify(conn.read().decode('utf-8'))
#conn.close()
with urlopen('https://github.com/DS-Homebrew/TWiLightMenu/releases/latest/download/TWiLightMenu.7z') as src, open(filename, 'wb') as dst:
copyfileobj(src, dst)
self.log.write('- Extracting ' + filename[:-3] + ' archive...')
proc = Popen([ _7z, 'x', '-bso0', '-y', filename, '_nds', 'DSi - CFW users',
'DSi&3DS - SD card users', 'roms' ])
ret_val = proc.wait()
if ret_val == 0:
self.files.append(filename)
self.folders.append('DSi - CFW users')
self.folders.append('_nds')
self.folders.append('DSi&3DS - SD card users')
self.folders.append('roms')
Thread(target=self.install_twilight, args=(filename[:-3],)).start()
else:
self.log.write('ERROR: Extractor failed')
Thread(target=self.clean, args=(True,)).start()
except (URLError, IOError) as e:
print(e)
self.log.write('ERROR: Could not get TWiLight Menu++')
Thread(target=self.clean, args=(True,)).start()
except OSError as e:
print(e)
self.log.write('ERROR: Could not execute ' + exe)
Thread(target=self.clean, args=(True,)).start()
################################################################################################
def install_twilight(self, name):
self.log.write('\nCopying ' + name + ' files...')
copy_tree(path.join('DSi - CFW users', 'SDNAND root'), self.sd_path, update=1)
copy_tree('_nds', path.join(self.sd_path, '_nds'))
copy_tree('DSi&3DS - SD card users', self.sd_path, update=1)
copy_tree('roms', path.join(self.sd_path, 'roms'))
Thread(target=self.clean).start()
################################################################################################
def clean(self, err=False):
self.log.write('\nCleaning...')
while len(self.folders) > 0:
rmtree(self.folders.pop(), ignore_errors=True)
while len(self.files) > 0:
try:
remove(self.files.pop())
except:
pass
if err:
self.log.write('Done')
return
self.log.write('Done!\nEject your SD card and insert it into your DSi')
################################################################################################
def patcher(self, patchpath, filepath):
patch_size = path.getsize(patchpath)
patchfile = open(patchpath, 'rb')
if patchfile.read(5) != b'PATCH':
patchfile.close()
raise Exception()
target = open(filepath, 'r+b')
# Read First Record
r = patchfile.read(3)
while patchfile.tell() not in [ patch_size, patch_size - 3 ]:
# Unpack 3-byte pointers.
offset = self.unpack_int(r)
# Read size of data chunk
r = patchfile.read(2)
size = self.unpack_int(r)
if size == 0: # RLE Record
r = patchfile.read(2)
rle_size = self.unpack_int(r)
data = patchfile.read(1) * rle_size
else:
data = patchfile.read(size)
# Write to file
target.seek(offset)
target.write(data)
# Read Next Record
r = patchfile.read(3)
if patch_size - 3 == patchfile.tell():
trim_size = self.unpack_int(patchfile.read(3))
target.truncate(trim_size)
# Cleanup
target.close()
patchfile.close()
################################################################################################
def unpack_int(self, bstr):
# Read an n-byte big-endian integer from a byte string
( ret_val, ) = unpack_from('>I', b'\x00' * (4 - len(bstr)) + bstr)
return ret_val
################################################################################################
def detect_region(self):
REGION_CODES = {
'484e4145': 'USA',
'484e414a': 'JAP',
'484e414b': 'KOR',
'484e4150': 'EUR',
'484e4155': 'AUS'
}
# Autodetect console region
try:
for app in listdir(path.join(self.sd_path, 'title', '00030017')):
for file in listdir(path.join(self.sd_path, 'title', '00030017', app, 'content')):
if file.endswith('.app'):
try:
self.log.write('- Detected ' + REGION_CODES[app] + ' console NAND dump')
self.launcher_region = REGION_CODES[app]
return app
except KeyError:
self.log.write('ERROR: Unsupported console region')
return False
self.log.write('ERROR: Could not detect console region')
except OSError as e:
self.log.write('ERROR: ' + e.strerror + ': ' + e.filename)
return False
################################################################################################
def remove_footer(self):
self.log.write('\nRemoving No$GBA footer...')
file = self.console_id.get() + '-no-footer.bin'
try:
copyfile(self.nand_file.get(), file)
# Back-up footer info
with open(self.console_id.get() + '-info.txt', 'w') as f:
f.write('eMMC CID: ' + self.cid.get() + '\r\n')
f.write('Console ID: ' + self.console_id.get() + '\r\n')
with open(file, 'r+b') as f:
# Go to the No$GBA footer offset
f.seek(-64, 2)
# Remove footer
f.truncate()
self.log.write('\nDone!\nModified NAND stored as\n' + file +
'\nStored footer info in ' + self.console_id.get() + '-info.txt')
except IOError as e:
print(e)
self.log.write('ERROR: Could not open the file ' +
path.basename(self.nand_file.get()))
################################################################################################
def add_footer(self, cid, console_id):
self.log.write('Adding No$GBA footer...')
file = self.console_id.get() + '-footer.bin'
try:
copyfile(self.nand_file.get(), file)
with open(file, 'r+b') as f:
# Go to the No$GBA footer offset
f.seek(-64, 2)
# Read the footer's header :-)
bstr = f.read(0x10)
# Check if it already has a footer
if bstr == b'DSi eMMC CID/CPU':
self.log.write('ERROR: File already has a No$GBA footer')
f.close()
remove(file)
return
# Go to the end of file
f.seek(0, 2)
# Write footer
f.write(b'DSi eMMC CID/CPU')
f.write(cid)
f.write(console_id)
f.write(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
self.log.write('\nDone!\nModified NAND stored as\n' + file)
except IOError as e:
print(e)
self.log.write('ERROR: Could not open the file ' +
path.basename(self.nand_file.get()))
####################################################################################################
# Entry point
print('Opening HiyaCFW Helper...')
sysname = system()
print('Initializing GUI...')
root = Tk()
if sysname == 'Windows':
from winreg import OpenKey, QueryValueEx, HKEY_LOCAL_MACHINE, KEY_READ, KEY_WOW64_64KEY
print('Searching for 7-Zip in the Windows registry...')
try:
with OpenKey(HKEY_LOCAL_MACHINE, 'SOFTWARE\\7-Zip', 0, KEY_READ | KEY_WOW64_64KEY) as hkey:
_7z = path.join(QueryValueEx(hkey, 'Path')[0], '7z.exe')
if not path.exists(_7z):
raise WindowsError
except WindowsError:
print('Searching for 7-Zip in the 32-bit Windows registry...')
try:
with OpenKey(HKEY_LOCAL_MACHINE, 'SOFTWARE\\7-Zip') as hkey:
_7z = path.join(QueryValueEx(hkey, 'Path')[0], '7z.exe')
if not path.exists(_7z):
raise WindowsError
except WindowsError:
root.withdraw()
showerror('Error', 'This script needs 7-Zip to run. Please install it.')
root.destroy()
exit(1)
twltool = path.join('for PC', 'twltool.exe')
else: # Linux and MacOS
twltool = path.join(sysname, 'twltool')
if not path.exists(twltool):
root.withdraw()
showerror('Error', 'TWLTool not found. Please make sure the ' + sysname +
' folder is at the same location as this script, or run it again from the terminal:' +
"\n\n$ ./HiyaCFW_Helper.py")
root.destroy()
exit(1)
fatcat = path.join(sysname, 'fatcat')
_7z = path.join(sysname, '7za')
root.title('HiyaCFW Helper v3.4.1')
# Disable maximizing
root.resizable(0, 0)
# Center in window
root.eval('tk::PlaceWindow %s center' % root.winfo_toplevel())
app = Application(master=root)
app.mainloop()
|
plot_from_pp_interp_p_levs_temp_geop_sp_hum_diff.py
|
"""
Load pp, plot and save
"""
import os, sys
#%matplotlib inline
#%pylab inline
import matplotlib
matplotlib.use('Agg')
# Must be before importing matplotlib.pyplot or pylab!
from matplotlib import rc
from matplotlib.font_manager import FontProperties
from matplotlib import rcParams
from mpl_toolkits.basemap import Basemap
rc('font', family = 'serif', serif = 'cmr10')
rc('text', usetex=True)
rcParams['text.usetex']=True
rcParams['text.latex.unicode']=True
rcParams['font.family']='serif'
rcParams['font.serif']='cmr10'
import matplotlib.pyplot as plt
#from matplotlib import figure
import matplotlib as mpl
import matplotlib.cm as mpl_cm
import numpy as np
import iris
import iris.coords as coords
import iris.quickplot as qplt
import iris.plot as iplt
import iris.coord_categorisation
import iris.unit as unit
import cartopy.crs as ccrs
import cartopy.io.img_tiles as cimgt
import matplotlib.ticker as mticker
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import datetime
from mpl_toolkits.basemap import cm
import imp
from textwrap import wrap
import re
import iris.analysis.cartography
import math
from dateutil import tz
#import multiprocessing as mp
import gc
import types
import pdb
save_path='/nfs/a90/eepdw/Figures/EMBRACE/'
model_name_convert_title = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/model_name_convert_title.py')
unrotate = imp.load_source('util', '/nfs/see-fs-01_users/eepdw/python_scripts/modules/unrotate_pole.py')
#pp_file = ''
plot_diags=['temp', 'sp_hum']
plot_levels = [925, 850, 700, 500]
experiment_ids = ['dklyu']
#experiment_ids = ['djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
difference_id = 'dkmbq'
pp_file_path = '/nfs/a90/eepdw/Data/EMBRACE/'
diffidmin1 = difference_id[:-1]
degs_crop_top = 1.7
degs_crop_bottom = 2.5
from iris.coord_categorisation import add_categorised_coord
def add_hour_of_day(cube, coord, name='hour'):
add_categorised_coord(cube, name, coord,
lambda coord, x: coord.units.num2date(x).hour)
figprops = dict(figsize=(8,8), dpi=100)
#cmap=cm.s3pcpn_l
u = unit.Unit('hours since 1970-01-01 00:00:00',calendar='gregorian')
dx, dy = 10, 10
divisor=10 # for lat/lon rounding
lon_high = 101.866
lon_low = 64.115
lat_high = 33.
lat_low =-6.79
lon_low_tick=lon_low -(lon_low%divisor)
lon_high_tick=math.ceil(lon_high/divisor)*divisor
lat_low_tick=lat_low - (lat_low%divisor)
lat_high_tick=math.ceil(lat_high/divisor)*divisor
def main():
#experiment_ids = ['djznw', 'djzny', 'djznq', 'djzns', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkbhu', 'djznu', 'dkhgu' ] # All 12
#experiment_ids = ['djzny', 'djzns', 'djznw', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq' ]
#experiment_ids = ['djzny', 'djzns', 'djznu', 'dkbhu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkhgu']
#experiment_ids = ['djzns', 'djznu', 'dkbhu', 'dkjxq', 'dklyu', 'dkmbq', 'dklwu', 'dklzq', 'dkhgu']
#experiment_ids = ['dklzq', 'dkmbq', 'dkjxq', 'dklwu', 'dklyu', 'djzns']
#experiment_ids = ['djzns' ]
#experiment_ids = ['dkhgu','dkjxq']
for p_level in plot_levels:
# Set pressure height contour min/max
if p_level == 925:
clevgh_min = -24.
clevgh_max = 24.
elif p_level == 850:
clevgh_min = -24.
clev_max = 24.
elif p_level == 700:
clevgh_min = -24.
clev_max = 24.
elif p_level == 500:
clevgh_min = -24.
clevgh_max = 24.
else:
print 'Contour min/max not set for this pressure level'
# Set potential temperature min/max
if p_level == 925:
clevpt_min = -3.
clevpt_max = 100.
elif p_level == 850:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 700:
clevpt_min = -3.
clevpt_max = 3.
elif p_level == 500:
clevpt_min = -3.
clevpt_max = 3.
else:
print 'Potential temperature min/max not set for this pressure level'
# Set specific humidity min/max
if p_level == 925:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 850:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 700:
clevsh_min = -0.0025
clevsh_max = 0.0025
elif p_level == 500:
clevsh_min = -0.0025
clevsh_max = 0.0025
else:
print 'Specific humidity min/max not set for this pressure level'
clevs_lin = np.linspace(clevgh_min, clevgh_max, num=24)
p_level_constraint = iris.Constraint(pressure=p_level)
height_pp_diff = '%s_408_on_p_levs_mean_by_hour.pp' % difference_id
height_pfile_diff = '%s%s/%s/%s' % (pp_file_path, diffidmin1, difference_id, height_pp_diff)
height_cube_diff = iris.load_cube(height_pfile_diff, p_level_constraint)
for plot_diag in plot_diags:
pp_file_diff = '%s_%s_on_p_levs_mean_by_hour.pp' % (difference_id, plot_diag)
pfile_diff = '%s%s/%s/%s' % (pp_file_path, diffidmin1, difference_id, pp_file_diff)
cube_diff = iris.load_cube(pfile_diff, p_level_constraint )
for experiment_id in experiment_ids:
expmin1 = experiment_id[:-1]
pp_file = '%s_%s_on_p_levs_mean_by_hour.pp' % (experiment_id, plot_diag)
pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, pp_file)
pcube = iris.load_cube(pfile, p_level_constraint)
#cube=iris.analysis.maths.multiply(pcube,3600)
# For each hour in cube
height_pp_file = '%s_408_on_p_levs_mean_by_hour.pp' % (experiment_id)
height_pfile = '%s%s/%s/%s' % (pp_file_path, expmin1, experiment_id, height_pp_file)
height_cube = iris.load_cube(height_pfile, p_level_constraint)
#time_coords = cube_f.coord('time')
add_hour_of_day(pcube, pcube.coord('time'))
add_hour_of_day(cube_diff, cube_diff.coord('time'))
add_hour_of_day(height_cube, height_cube.coord('time'))
add_hour_of_day(height_cube_diff, height_cube_diff.coord('time'))
#pcube.remove_coord('time')
#cube_diff.remove_coord('time')
#height_cube.remove_coord('time')
#height_cube_diff.remove_coord('time')
#p_cube_difference = iris.analysis.maths.subtract(pcube, cube_diff, dim='hour')
#height_cube_difference = iris.analysis.maths.subtract(height_cube, height_cube_diff, dim='hour')
#pdb.set_trace()
#del height_cube, pcube, height_cube_diff, cube_diff
for t, time_cube in enumerate(pcube.slices(['grid_latitude', 'grid_longitude'])):
#pdb.set_trace()
cube_diff_slice = cube_diff.extract(iris.Constraint(hour=time_cube.coord('hour').points))
p_cube_difference = time_cube - cube_diff_slice
#pdb.set_trace()
print time_cube
time_cube_408 = height_cube.extract(iris.Constraint(hour=time_cube.coord('hour').points))
height_cube_diff_slice = height_cube_diff.extract(iris.Constraint(hour=time_cube.coord('hour').points))
height_cube_difference = time_cube_408 - height_cube_diff_slice
# Get time of averagesfor plot title
h = u.num2date(np.array(time_cube.coord('hour').points, dtype=float)[0]).strftime('%H%M')
#Convert to India time
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('Asia/Kolkata')
h_utc = u.num2date(np.array(time_cube.coord('hour').points, dtype=float)[0]).replace(tzinfo=from_zone)
h_local = h_utc.astimezone(to_zone).strftime('%H%M')
fig = plt.figure(**figprops)
cmap=plt.cm.RdBu_r
ax = plt.axes(projection=ccrs.PlateCarree(), extent=(lon_low,lon_high,lat_low+degs_crop_bottom,lat_high-degs_crop_top))
m =\
Basemap(llcrnrlon=lon_low,llcrnrlat=lat_low,urcrnrlon=lon_high,urcrnrlat=lat_high, rsphere = 6371229)
#pdb.set_trace()
lat = p_cube_difference.coord('grid_latitude').points
lon = p_cube_difference.coord('grid_longitude').points
cs = p_cube_difference.coord_system('CoordSystem')
lons, lats = np.meshgrid(lon, lat)
lons, lats = iris.analysis.cartography.unrotate_pole\
(lons,lats, cs.grid_north_pole_longitude, cs.grid_north_pole_latitude)
x,y = m(lons,lats)
if plot_diag=='temp':
min_contour = clevpt_min
max_contour = clevpt_max
cb_label='K'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), potential temperature (colours),\
and wind (vectors) %s UTC %s IST' % (h, h_local)
tick_interval=1
elif plot_diag=='sp_hum':
min_contour = clevsh_min
max_contour = clevsh_max
cb_label='kg/kg'
main_title='8km Explicit model (dklyu) minus 8km parametrised model geopotential height (grey contours), specific humidity (colours),\
and wind (vectors) %s-%s UTC %s-%s IST' % (h, h_local)
tick_interval=0.0005
clevs = np.linspace(min_contour, max_contour, 32)
clevs = np.linspace(-3, 3, 32)
cont = plt.contourf(x,y,p_cube_difference.data, clevs, cmap=cmap, extend='both')
#cont = iplt.contourf(time_cube, cmap=cmap, extend='both')
cs_lin = iplt.contour(height_cube_difference, clevs_lin,colors='#262626',linewidths=1.)
plt.clabel(cs_lin, fontsize=14, fmt='%d', color='black')
#del time_cube
#plt.clabel(cont, fmt='%d')
#ax.stock_img()
ax.coastlines(resolution='110m', color='#262626')
gl = ax.gridlines(draw_labels=True,linewidth=0.5, color='#262626', alpha=0.5, linestyle='--')
gl.xlabels_top = False
gl.ylabels_right = False
#gl.xlines = False
dx, dy = 10, 10
gl.xlocator = mticker.FixedLocator(range(int(lon_low_tick),int(lon_high_tick)+dx,dx))
gl.ylocator = mticker.FixedLocator(range(int(lat_low_tick),int(lat_high_tick)+dy,dy))
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
gl.xlabel_style = {'size': 12, 'color':'#262626'}
#gl.xlabel_style = {'color': '#262626', 'weight': 'bold'}
gl.ylabel_style = {'size': 12, 'color':'#262626'}
cbar = fig.colorbar(cont, orientation='horizontal', pad=0.05, extend='both')
cbar.set_label('%s' % cb_label, fontsize=10, color='#262626')
#cbar.set_label(time_cube.units, fontsize=10, color='#262626')
cbar.set_ticks(np.arange(min_contour, max_contour+tick_interval,tick_interval))
ticks = (np.arange(min_contour, max_contour+tick_interval,tick_interval))
cbar.set_ticklabels(['${%.1f}$' % i for i in ticks])
cbar.ax.tick_params(labelsize=10, color='#262626')
#main_title='Mean Rainfall for EMBRACE Period -%s UTC (%s IST)' % (h, h_local)
#main_title=time_cube.standard_name.title().replace('_',' ')
#model_info = re.sub(r'[(\']', ' ', model_info)
#model_info = re.sub(r'[\',)]', ' ', model_info)
#print model_info
file_save_name = '%s_%s_%s_diff_from_%s_%s' % (experiment_id, plot_diag, p_level, difference_id, h)
save_dir = '%s%s/%s' % (save_path, experiment_id, plot_diag)
if not os.path.exists('%s' % save_dir): os.makedirs('%s' % (save_dir))
#plt.show()
fig.savefig('%s/%s_notitle.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
plt.title('%s UTC %s IST' % (h, h_local))
fig.savefig('%s/%s_short_title.png' % (save_dir, file_save_name) , format='png', bbox_inches='tight')
model_info=re.sub('(.{68} )', '\\1\n', str(model_name_convert_title.main(experiment_id)), 0, re.DOTALL)
plt.title('\n'.join(wrap('%s\n%s' % (main_title, model_info), 1000,replace_whitespace=False)), fontsize=16)
fig.savefig('%s/%s.png' % (save_dir, file_save_name), format='png', bbox_inches='tight')
fig.clf()
plt.close()
#del time_cube
gc.collect()
if __name__ == '__main__':
main()
#proc=mp.Process(target=worker)
#proc.daemon=True
#proc.start()
#proc.join()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8816
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
stats_store.py
|
#!/usr/bin/env python
"""Storage implementation for gathered statistics.
Statistics collected by StatsCollector (see lib/stats.py) is stored in AFF4
space. Statistics data for different parts of the system is separated by
process ids. For example, for the frontend, process id may be "frontend",
for worker - "worker", etc.
On the AFF4 statistics data is stored under aff4:/stats_store.
aff4:/stats_store itself is a URN of a StatsStore object that can be used
for querying stored data and saving new stats.
For every process id, aff4:/stats_store/<process id> object of type
StatsStoreProcessData is created. This object stores metadata of all
the metrics in the METRICS_METADATA field. All the collected statistics
data are written as aff4:stats_store/<metric name> attributes to the
aff4:/stats_store/<process id> row. This way we can easily and efficiently
query statistics data for a given set of metrics for a given process id
for a given time range.
Metrics metadata are stored separately from the values themselves for
efficiency reasons. Metadata objects are created when metrics are registered.
They carry extensive information about the metrics, like metric name and
docstring, metric type, etc. This information does not change (unless changes
GRR's source code changes) and so it doesn't make sense to duplicate it
every time we write a new set of statistics data to the datastore. Therefore
metadata for all the metrics is stored in
StatsStoreProcessData.METRICS_METADATA. Metrics' values themselves are
stored as datastore row attributes.
Statistics is written to the data store by StatsStoreWorker. It periodically
fetches values for all the metrics and writes them to corresponding
object on AFF4.
"""
from __future__ import division
import logging
import re
import threading
import time
from future.utils import iteritems
from future.utils import itervalues
from grr_response_core import config
from grr_response_core.lib import rdfvalue
from grr_response_core.lib import registry
from grr_response_core.lib import stats
from grr_response_server import access_control
from grr_response_server import aff4
from grr_response_server import data_store
from grr_response_server import stats_values
from grr_response_server import timeseries
class StatsStoreProcessData(aff4.AFF4Object):
"""Stores stats data for a particular process."""
class SchemaCls(aff4.AFF4Object.SchemaCls):
"""Schema for StatsStoreProcessData."""
METRICS_METADATA = aff4.Attribute(
"aff4:stats_store_process_data/metrics_metadata",
stats_values.StatsStoreMetricsMetadata,
creates_new_object_version=False,
versioned=False)
def WriteMetadataDescriptors(self, metrics_metadata, timestamp=None):
current_metadata = self.Get(
self.Schema.METRICS_METADATA,
default=stats_values.StatsStoreMetricsMetadata())
if current_metadata.AsDict() != metrics_metadata:
store_metadata = stats_values.StatsStoreMetricsMetadata(
metrics=list(itervalues(metrics_metadata)))
self.AddAttribute(
self.Schema.METRICS_METADATA, store_metadata, age=timestamp)
self.Flush()
def WriteStats(self, timestamp=None):
metrics_metadata = stats.STATS.GetAllMetricsMetadata()
self.WriteMetadataDescriptors(metrics_metadata, timestamp=timestamp)
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.StatsWriteMetrics(
self.urn, metrics_metadata, timestamp=timestamp)
def DeleteStats(self, timestamp=data_store.DataStore.ALL_TIMESTAMPS):
"""Deletes all stats in the given time range."""
with data_store.DB.GetMutationPool() as mutation_pool:
mutation_pool.StatsDeleteStatsInRange(self.urn, timestamp)
class StatsStore(aff4.AFF4Volume):
"""Implementation of the long-term storage of collected stats data.
This class allows to write current stats data to the data store, read
and delete them. StatsStore uses data_store to store the data.
All historical stats data are stored in a single data store subject per
process. By process we mean, for example: "admin UI", "worker #1",
"worker #3", etc. Stats data are stored as subject's attributes.
"""
DATA_STORE_ROOT = rdfvalue.RDFURN("aff4:/stats_store")
ALL_TIMESTAMPS = data_store.DataStore.ALL_TIMESTAMPS
NEWEST_TIMESTAMP = data_store.DataStore.NEWEST_TIMESTAMP
def Initialize(self):
super(StatsStore, self).Initialize()
if self.urn is None:
self.urn = self.DATA_STORE_ROOT
def WriteStats(self, process_id=None, timestamp=None):
"""Writes current stats values to the data store with a given timestamp."""
if not process_id:
raise ValueError("process_id can't be None")
process_data = aff4.FACTORY.Create(
self.urn.Add(process_id),
StatsStoreProcessData,
mode="rw",
token=self.token)
process_data.WriteStats(timestamp=timestamp)
def ListUsedProcessIds(self):
"""List process ids that were used when saving data to stats store."""
return [urn.Basename() for urn in self.ListChildren()]
def ReadMetadata(self, process_id=None):
"""Reads metadata of stored values for the given process."""
if not process_id:
raise ValueError("process_id can't be None")
results = self.MultiReadMetadata(process_ids=[process_id])
try:
return results[process_id]
except KeyError:
return {}
def MultiReadMetadata(self, process_ids=None):
"""Reads metadata of stored values for multiple given processes."""
if not process_ids:
process_ids = self.ListUsedProcessIds()
subjects = [
self.DATA_STORE_ROOT.Add(process_id) for process_id in process_ids
]
subjects_data = aff4.FACTORY.MultiOpen(
subjects, mode="r", token=self.token, aff4_type=StatsStoreProcessData)
results = {}
for subject_data in subjects_data:
results[subject_data.urn.Basename()] = subject_data.Get(
subject_data.Schema.METRICS_METADATA)
for process_id in process_ids:
results.setdefault(process_id, stats_values.StatsStoreMetricsMetadata())
return results
def ReadStats(self,
process_id=None,
metric_name=None,
timestamp=ALL_TIMESTAMPS,
limit=10000):
"""Reads stats values from the data store for the current process."""
if not process_id:
raise ValueError("process_id can't be None")
results = self.MultiReadStats(
process_ids=[process_id],
metric_name=metric_name,
timestamp=timestamp,
limit=limit)
try:
return results[process_id]
except KeyError:
return {}
def MultiReadStats(self,
process_ids=None,
metric_name=None,
timestamp=ALL_TIMESTAMPS,
limit=10000):
"""Reads historical data for multiple process ids at once."""
if not process_ids:
process_ids = self.ListUsedProcessIds()
multi_metadata = self.MultiReadMetadata(process_ids=process_ids)
subjects = [
self.DATA_STORE_ROOT.Add(process_id) for process_id in process_ids
]
return data_store.DB.StatsReadDataForProcesses(
subjects, metric_name, multi_metadata, timestamp=timestamp, limit=limit)
def DeleteStats(self, process_id=None, timestamp=ALL_TIMESTAMPS):
"""Deletes all stats in the given time range."""
if not process_id:
raise ValueError("process_id can't be None")
process_data = aff4.FACTORY.Create(
self.urn.Add(process_id),
StatsStoreProcessData,
mode="w",
token=self.token)
process_data.DeleteStats(timestamp=timestamp)
class StatsStoreDataQuery(object):
"""Query class used to results from StatsStore.ReadStats/MultiReadStats.
NOTE: this class is mutable. Although it's designed with call-chaining in
mind, you have to create new query object for every new query.
I.e. - this *will not* work:
query = stats_store.StatsStoreDataQuery(stats_data)
counter1 = query.In("pid1").In("counter").SeriesCount()
counter2 = query.In("pidw").In("counter").SeriesCount()
But this *will* work:
query = stats_store.StatsStoreDataQuery(stats_data)
counter1 = query.In("pid1").In("counter").SeriesCount()
query = stats_store.StatsStoreDataQuery(stats_data)
counter2 = query.In("pidw").In("counter").SeriesCount()
"""
VALUE_QUERY = "value"
DISTRIBUTION_SUM_QUERY = "distribution_sum"
DISTRIBUTION_COUNT_QUERY = "distribution_count"
def __init__(self, stats_data):
super(StatsStoreDataQuery, self).__init__()
self.current_dicts = [stats_data]
self.time_series = None
self.path = []
self.query_type = None
self.aggregate_via = None
self.sample_interval = None
def _TimeSeriesFromData(self, data, attr=None):
"""Build time series from StatsStore data."""
series = timeseries.Timeseries()
for value, timestamp in data:
if attr:
try:
series.Append(getattr(value, attr), timestamp)
except AttributeError:
raise ValueError("Can't find attribute %s in value %s." % (attr,
value))
else:
if hasattr(value, "sum") or hasattr(value, "count"):
raise ValueError(
"Can't treat complext type as simple value: %s" % value)
series.Append(value, timestamp)
return series
@property
def ts(self):
"""Return single timeseries.Timeseries built by this query."""
if self.time_series is None:
raise RuntimeError("Time series weren't built yet.")
if not self.time_series:
return timeseries.Timeseries()
return self.time_series[0]
def In(self, regex):
"""Narrow query's scope."""
self.path.append(regex)
new_current_dicts = []
for current_dict in self.current_dicts:
for key, value in iteritems(current_dict):
m = re.match(regex, key)
if m and m.string == m.group(0):
new_current_dicts.append(value)
self.current_dicts = new_current_dicts
return self
def _GetNestedValues(self, dicts):
"""Get all values nested in the given dictionaries.
Args:
dicts: List of dictionaries to go through.
Returns:
([nested values], status) where status is True if nested values are
dictionaries and False otherwise.
Raises:
RuntimeError: if some nested values are dictionaries and some are not.
"""
new_dicts = []
for current_dict in dicts:
for _, value in iteritems(current_dict):
new_dicts.append(value)
sub_dicts = [x for x in new_dicts if hasattr(x, "iteritems")]
if not sub_dicts:
return (new_dicts, False)
elif len(sub_dicts) == len(new_dicts):
return (new_dicts, True)
else:
raise RuntimeError("Inconsistent values hierarchy.")
def InAll(self):
"""Use all metrics in the current scope."""
self.path.append(":all")
while True:
self.current_dicts, status = self._GetNestedValues(self.current_dicts)
if not status:
break
return self
def MakeIncreasing(self):
"""Fixes the time series so that it does not decrement."""
if self.time_series is None:
raise RuntimeError("MakeIncreasing must be called after Take*().")
for time_serie in self.time_series:
time_serie.MakeIncreasing()
return self
def Normalize(self, period, start_time, stop_time, **kwargs):
"""Resample the query with given sampling interval."""
if self.time_series is None:
raise RuntimeError("Normalize must be called after Take*().")
self.sample_interval = period
self.start_time = start_time
self.stop_time = stop_time
for time_serie in self.time_series:
time_serie.Normalize(period, start_time, stop_time, **kwargs)
return self
def InTimeRange(self, range_start, range_end):
"""Only use data points withing given time range."""
if self.time_series is None:
raise RuntimeError("InTimeRange must be called after Take*().")
if range_start is None:
raise ValueError("range_start can't be None")
if range_end is None:
raise ValueError("range_end can't be None")
for time_serie in self.time_series:
time_serie.FilterRange(start_time=range_start, stop_time=range_end)
return self
def TakeValue(self):
"""Assume metrics in this query are plain values."""
self.query_type = self.VALUE_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict))
return self
def TakeDistributionSum(self):
"""Assume metrics in this query are distributions. Use their sums."""
self.query_type = self.DISTRIBUTION_SUM_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict, "sum"))
return self
def TakeDistributionCount(self):
"""Assume metrics in this query are distributions. Use their counts."""
self.query_type = self.DISTRIBUTION_COUNT_QUERY
self.time_series = []
for current_dict in self.current_dicts:
self.time_series.append(self._TimeSeriesFromData(current_dict, "count"))
return self
def AggregateViaSum(self):
"""Aggregate multiple time series into one by summing them."""
if self.time_series is None:
raise RuntimeError("AggregateViaSum must be called after Take*().")
if self.sample_interval is None:
raise RuntimeError("Resample() must be called prior to "
"AggregateViaSum().")
if not self.time_series:
return self
if len(self.time_series) == 1:
return self
current_serie = self.time_series[0]
for serie in self.time_series[1:]:
current_serie.Add(serie)
self.time_series = [current_serie]
return self
def AggregateViaMean(self):
"""Aggregate multiple time series into one by calculating mean value."""
num_time_series = len(self.time_series)
self.AggregateViaSum()
self.ts.Rescale(1.0 / num_time_series)
return self
def SeriesCount(self):
"""Return number of time series the query was narrowed to."""
if not self.time_series:
if not self.current_dicts:
return 0
else:
return len(self.current_dicts)
else:
return len(self.time_series)
def Rate(self):
"""Apply rate function to all time series in this query."""
if self.time_series is None:
raise RuntimeError("Rate must be called after Take*().")
if self.sample_interval is None:
raise RuntimeError("Normalize() must be called prior to Rate().")
for time_serie in self.time_series:
time_serie.ToDeltas()
time_serie.Rescale(1.0 / self.sample_interval.seconds)
return self
def Scale(self, multiplier):
"""Scale value in all time series in this query."""
if self.time_series is None:
raise RuntimeError("Scale must be called after Take*().")
for time_serie in self.time_series:
time_serie.Rescale(multiplier)
return self
def Mean(self):
"""Calculate mean value of a single time serie in this query."""
if self.time_series is None:
raise RuntimeError("Mean must be called after Take*().")
if not self.time_series:
return 0
if len(self.time_series) != 1:
raise RuntimeError("Can only return mean for a single time serie.")
return self.time_series[0].Mean()
# Global StatsStore object
STATS_STORE = None
class StatsStoreWorker(object):
"""StatsStoreWorker periodically dumps stats data into the stats store."""
def __init__(self,
stats_store,
process_id,
thread_name="grr_stats_saver",
sleep=None):
super(StatsStoreWorker, self).__init__()
self.stats_store = stats_store
self.process_id = process_id
self.thread_name = thread_name
self.sleep = sleep or config.CONFIG["StatsStore.write_interval"]
def _RunLoop(self):
while True:
logging.debug("Writing stats to stats store.")
try:
self.stats_store.WriteStats(process_id=self.process_id)
except Exception as e: # pylint: disable=broad-except
logging.exception("StatsStore exception caught during WriteStats(): %s",
e)
logging.debug("Removing old stats from stats store." "")
# Maximum time we keep stats store data is three days.
stats_store_ttl = 60 * 60 * 24 * 3
try:
now = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
self.stats_store.DeleteStats(
process_id=self.process_id,
timestamp=(0, now - stats_store_ttl * 1000000))
except Exception as e: # pylint: disable=broad-except
logging.exception(
"StatsStore exception caught during DeleteStats(): %s", e)
time.sleep(self.sleep)
def Run(self):
self.RunAsync().join()
def RunAsync(self):
self.running_thread = threading.Thread(
name=self.thread_name, target=self._RunLoop)
self.running_thread.daemon = True
self.running_thread.start()
return self.running_thread
class StatsStoreInit(registry.InitHook):
"""Hook that inits global STATS_STORE object and stats store worker."""
pre = [aff4.AFF4InitHook]
def RunOnce(self):
"""Initializes StatsStore and StatsStoreWorker."""
# SetUID is required to create and write to aff4:/stats_store
token = access_control.ACLToken(username="GRRStatsStore").SetUID()
global STATS_STORE
STATS_STORE = aff4.FACTORY.Create(None, StatsStore, mode="w", token=token)
try:
STATS_STORE.Flush()
except access_control.UnauthorizedAccess:
logging.info("Not writing aff4:/stats_store due to lack of permissions.")
# We don't need StatsStoreWorker if there's no StatsStore.process_id in
# the config.
stats_process_id = config.CONFIG["StatsStore.process_id"]
if not stats_process_id:
return
stats_store_worker = StatsStoreWorker(STATS_STORE, stats_process_id)
stats_store_worker.RunAsync()
|
custom_widgets.py
|
import logging
import os
import subprocess
import time
import warnings
from functools import partial
from typing import Union
import numpy as np
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import Signal, Slot
from deepethogram.file_io import VideoReader
# these define the parameters of the deepethogram colormap below
from deepethogram.viz import Mapper
log = logging.getLogger(__name__)
def numpy_to_qpixmap(image: np.ndarray) -> QtGui.QPixmap:
if image.dtype == np.float:
image = float_to_uint8(image)
H, W, C = int(image.shape[0]), int(image.shape[1]), int(image.shape[2])
if C == 4:
format = QtGui.QImage.Format_RGBA8888
elif C == 3:
format = QtGui.QImage.Format_RGB888
else:
raise ValueError('Aberrant number of channels: {}'.format(C))
qpixmap = QtGui.QPixmap(QtGui.QImage(image, W,
H, image.strides[0],
format))
# print(type(qpixmap))
return (qpixmap)
def float_to_uint8(image: np.ndarray) -> np.ndarray:
if image.dtype == np.float:
image = (image * 255).clip(min=0, max=255).astype(np.uint8)
# print(image)
return image
def initializer(nframes: int):
print('initialized with {}'.format(nframes))
class VideoFrame(QtWidgets.QGraphicsView):
frameNum = Signal(int)
initialized = Signal(int)
def __init__(self, videoFile: Union[str, os.PathLike] = None, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.videoView = QtWidgets.QGraphicsView()
self._scene = QtWidgets.QGraphicsScene(self)
self._photo = QtWidgets.QGraphicsPixmapItem()
self._scene.addItem(self._photo)
# self.videoView.setScene(self._scene)
self.setScene(self._scene)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.sizePolicy().hasHeightForWidth())
self.setSizePolicy(sizePolicy)
self.setMinimumSize(QtCore.QSize(640, 480))
# self.setObjectName("videoView")
if videoFile is not None:
self.initialize_video(videoFile)
self.update()
self.setStyleSheet("background:transparent;")
# print(self.palette())
def initialize_video(self, videofile: Union[str, os.PathLike]):
if hasattr(self, 'vid'):
self.vid.close()
# if hasattr(self.vid, 'cap'):
# self.vid.cap.release()
self.videofile = videofile
self.vid = VideoReader(videofile)
# self.frame = next(self.vid)
self.initialized.emit(len(self.vid))
self.update_frame(0)
def update_frame(self, value, force: bool=False):
# print('updating')
# print('update to: {}'.format(value))
# print(self.current_fnum)
# previous_frame = self.current_fnum
if not hasattr(self, 'vid'):
return
value = int(value)
if hasattr(self, 'current_fnum'):
if self.current_fnum == value and not force:
# print('already there')
return
if value < 0:
# warnings.warn('Desired frame less than 0: {}'.format(value))
value = 0
if value >= self.vid.nframes:
# warnings.warn('Desired frame beyond maximum: {}'.format(self.vid.nframes))
value = self.vid.nframes - 1
self.frame = self.vid[value]
# the frame in the videoreader is the position of the reader. If you've read frame 0, the current reader
# position is 1. This makes cv2.CAP_PROP_POS_FRAMES match vid.fnum. However, we want to keep track of our
# currently displayed image, which is fnum - 1
self.current_fnum = self.vid.fnum - 1
# print('new fnum: {}'.format(self.current_fnum))
self.show_image(self.frame)
self.frameNum.emit(self.current_fnum)
def fitInView(self, scale=True):
rect = QtCore.QRectF(self._photo.pixmap().rect())
if not rect.isNull():
self.setSceneRect(rect)
# if self.hasPhoto():
unity = self.transform().mapRect(QtCore.QRectF(0, 0, 1, 1))
self.scale(1 / unity.width(), 1 / unity.height())
viewrect = self.viewport().rect()
scenerect = self.transform().mapRect(rect)
factor = min(viewrect.width() / scenerect.width(),
viewrect.height() / scenerect.height())
# print(factor, viewrect, scenerect)
self.scale(factor, factor)
self._zoom = 0
def adjust_aspect_ratio(self):
if not hasattr(self, 'vid'):
raise ValueError('Trying to set GraphicsView aspect ratio before video loaded.')
if not hasattr(self.vid, 'width'):
self.vid.width, self.vid.height = self.frame.shape[1], self.frame.shape[0]
video_aspect = self.vid.width / self.vid.height
H, W = self.height(), self.width()
new_width = video_aspect * H
if new_width < W:
self.setFixedWidth(new_width)
new_height = W / self.vid.width * self.vid.height
if new_height < H:
self.setFixedHeight(new_height)
def show_image(self, array):
qpixmap = numpy_to_qpixmap(array)
self._photo.setPixmap(qpixmap)
self.fitInView()
self.update()
# self.show()
def resizeEvent(self, event):
if hasattr(self, 'vid'):
self.fitInView()
class ScrollbarWithText(QtWidgets.QWidget):
position = Signal(int)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.horizontalWidget = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.horizontalWidget.sizePolicy().hasHeightForWidth())
self.horizontalWidget.setSizePolicy(sizePolicy)
self.horizontalWidget.setMaximumSize(QtCore.QSize(16777215, 25))
self.horizontalWidget.setObjectName("horizontalWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalWidget)
self.horizontalLayout.setSizeConstraint(QtWidgets.QLayout.SetMaximumSize)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.horizontalScrollBar = QtWidgets.QScrollBar(self.horizontalWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.horizontalScrollBar.sizePolicy().hasHeightForWidth())
self.horizontalScrollBar.setSizePolicy(sizePolicy)
self.horizontalScrollBar.setMaximumSize(QtCore.QSize(16777215, 25))
self.horizontalScrollBar.setOrientation(QtCore.Qt.Horizontal)
self.horizontalScrollBar.setObjectName("horizontalScrollBar")
self.horizontalLayout.addWidget(self.horizontalScrollBar)
self.plainTextEdit = QtWidgets.QPlainTextEdit(self.horizontalWidget)
self.plainTextEdit.setEnabled(True)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.plainTextEdit.sizePolicy().hasHeightForWidth())
self.plainTextEdit.setSizePolicy(sizePolicy)
self.plainTextEdit.setMaximumSize(QtCore.QSize(100, 25))
font = QtGui.QFont()
font.setPointSize(8)
self.plainTextEdit.setFont(font)
self.plainTextEdit.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.plainTextEdit.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.plainTextEdit.setObjectName("plainTextEdit")
self.horizontalLayout.addWidget(self.plainTextEdit)
self.setLayout(self.horizontalLayout)
# self.ui.plainTextEdit.textChanged.connect
self.plainTextEdit.textChanged.connect(self.text_change)
self.horizontalScrollBar.sliderMoved.connect(self.scrollbar_change)
self.horizontalScrollBar.valueChanged.connect(self.scrollbar_change)
self.update()
# self.show()
def sizeHint(self):
return QtCore.QSize(480, 25)
def text_change(self):
value = self.plainTextEdit.document().toPlainText()
value = int(value)
self.position.emit(value)
def scrollbar_change(self):
value = self.horizontalScrollBar.value()
self.position.emit(value)
@Slot(int)
def update_state(self, value: int):
if self.plainTextEdit.document().toPlainText() != '{}'.format(value):
self.plainTextEdit.setPlainText('{}'.format(value))
if self.horizontalScrollBar.value() != value:
self.horizontalScrollBar.setValue(value)
@Slot(int)
def initialize_state(self, value: int):
# print('nframes: ', value)
self.horizontalScrollBar.setMaximum(value - 1)
self.horizontalScrollBar.setMinimum(0)
# self.horizontalScrollBar.sliderMoved.connect(self.scrollbar_change)
# self.horizontalScrollBar.valueChanged.connect(self.scrollbar_change)
self.horizontalScrollBar.setValue(0)
self.plainTextEdit.setPlainText('{}'.format(0))
# self.plainTextEdit.textChanged.connect(self.text_change)
# self.update()
class VideoPlayer(QtWidgets.QWidget):
# added parent here because python-uic, which turns Qt Creator files into python files, always adds the parent
# widget. so instead of just saying self.videoPlayer = VideoPlayer(), it does
# self.videoPlayer = VideoPlayer(self.centralWidget)
# this just means you are required to pass videoFile as a kwarg
def __init__(self, parent=None, videoFile: Union[str, os.PathLike] = None, *args, **kwargs):
super().__init__(*args, **kwargs)
layout = QtWidgets.QVBoxLayout()
# initialize both widgets and add it to the vertical layout
self.videoView = VideoFrame(videoFile)
layout.addWidget(self.videoView)
self.scrollbartext = ScrollbarWithText()
layout.addWidget(self.scrollbartext)
self.setLayout(layout)
# if you use the scrollbar or the text box, update the video frame
# self.scrollbartext.horizontalScrollBar.sliderMoved.connect(self.videoView.update_frame)
# self.scrollbartext.horizontalScrollBar.valueChanged.connect(self.videoView.update_frame)
# self.scrollbartext.plainTextEdit.textChanged.connect(self.videoView.update_frame)
self.scrollbartext.position.connect(self.videoView.update_frame)
self.scrollbartext.position.connect(self.scrollbartext.update_state)
# if you move the video by any method, update the frame text
self.videoView.initialized.connect(self.scrollbartext.initialize_state)
# self.videoView.initialized.connect(initializer)
self.videoView.frameNum.connect(self.scrollbartext.update_state)
# I have to do this here because I think emitting a signal doesn't work from within the widget's constructor
if hasattr(self.videoView, 'vid'):
self.videoView.initialized.emit(len(self.videoView.vid))
self.update()
# class LabelImage(QtWidgets.QScrollArea):
# def __init__(self, parent=None, *args, **kwargs):
# super().__init__(*args, **kwargs)
#
# layout = QtWidgets.QHBoxLayout()
# self.widget = QtWidgets.QWidget()
#
# buttonlayout = QtWidgets.QVBoxLayout()
# self.labels = []
# sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Maximum)
# sizePolicy.setHorizontalStretch(0)
# sizePolicy.setVerticalStretch(0)
# for i in range(100):
# self.labels.append(QtWidgets.QLabel('testing{}'.format(i)))
# self.labels[i].setMinimumHeight(25)
# buttonlayout.addWidget(self.labels[i])
# # self.labels[i].setLayout(buttonlayout)
#
# self.widget.setLayout(buttonlayout)
# self.setWidget(self.widget)
#
# self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
# self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
#
# self.update()
#
# def sizeHint(self):
# return (QtCore.QSize(720, 250))
# https://stackoverflow.com/questions/29643352/converting-hex-to-rgb-value-in-python
# start = np.array([232,232,232])
class LabelViewer(QtWidgets.QGraphicsView):
X = Signal(int)
saved = Signal(bool)
just_toggled = Signal(bool)
num_changed = Signal(int)
def __init__(self, fixed: bool = False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self._scene = QtWidgets.QGraphicsScene(self)
self._photo = QtWidgets.QGraphicsPixmapItem()
self._scene.addItem(self._photo)
# self.videoView.setScene(self._scene)
self.setScene(self._scene)
color = QtGui.QColor(45, 45, 45)
self.pen = QtGui.QPen(color, 0)
self.setAlignment(QtCore.Qt.AlignTop | QtCore.Qt.AlignLeft)
# self.setAlignment(QtCore.Qt.AlignCenter)
# self.setStyleSheet("background:transparent;")
self.initialized = False
self.fixed = fixed
if self.fixed:
self.fixed_settings()
def initialize(self, n: int = 1, n_timepoints: int = 31, debug: bool = False,
colormap: str = 'Reds', unlabeled_alpha: float = 0.1, desired_pixel_size: int = 25,
array: np.ndarray = None, fixed: bool = False, opacity: np.ndarray = None):
if self.initialized:
raise ValueError('only initialize once!')
if array is not None:
# print(array.shape)
self.n_timepoints = array.shape[0]
self.n = array.shape[1]
# if our input array is -1s, assume that this has not been labeled yet
self.changed = np.any(array != -1, axis=1).astype(np.uint8)
array[array == -1] = 0
self.array = array
self.debug = False
else:
self.n = n
self.array = np.zeros((n_timepoints, self.n), dtype=np.uint8)
self.changed = np.zeros((n_timepoints,), dtype=np.uint8)
self.n_timepoints = n_timepoints
self.debug = debug
self.shape = self.array.shape
self.label_toggled = np.array([False for i in range(n)])
self.desired_pixel_size = desired_pixel_size
try:
self.cmap = Mapper(colormap)
except ValueError as e:
raise ('Colormap not in matplotlib''s defaults! {}'.format(colormap))
if self.debug:
self.make_debug()
self.unlabeled_alpha = unlabeled_alpha
self.opacity = opacity
self.recreate_label_image()
pos_colors = self.cmap(np.ones((self.n, 1)) * 255)
neg_colors = self.cmap(np.zeros((self.n, 1)))
# print('N: {}'.format(self.n))
self.pos_color = np.array([pos_colors[i].squeeze() for i in range(self.n)])
self.neg_color = np.array([neg_colors[i].squeeze() for i in range(self.n)])
# print('pos, neg: {}, {}'.format(self.pos_color, self.neg_color))
draw_rect = QtCore.QRectF(0, 0, 1, self.n)
# print(dir(self.draw_rect))
self.item_rect = self._scene.addRect(draw_rect, self.pen)
self.change_view_x(0)
self.fixed = fixed # initialization overwrides constructor
if self.fixed:
self.fixed_settings()
self.initialized = True
self.update()
self.num_changed.emit(np.sum(self.changed))
def mousePressEvent(self, event):
if not self.initialized:
return
# print(dir(event))
scene_pos = self.mapToScene(event.pos())
x, y = scene_pos.x(), scene_pos.y()
# print('X: {} Y: {}'.format(x,y))
x, y = int(x), int(y)
value = self.array[x, y]
if value == 0:
self._add_behavior([y], x, x)
else:
self._add_behavior([y], x + 1, x)
self.initial_row = y
self.initial_column = x
super().mousePressEvent(event)
def mouseMoveEvent(self, event):
if not self.initialized:
return
scene_pos = self.mapToScene(event.pos())
x, _ = scene_pos.x(), scene_pos.y()
y = self.initial_row
# print('X: {} Y: {}'.format(x,y))
x, y = int(x), int(y)
# value = self.array[x, y]
if x > self.initial_column:
self._add_behavior([y], x, x)
else:
self._add_behavior([y], x + 1, x)
self.last_column = x
super().mouseMoveEvent(event)
def change_rectangle(self, rect):
if not hasattr(self, 'item_rect'):
return
self.item_rect.setRect(rect)
def _fit_label_photo(self):
if not hasattr(self, 'x'):
self.x = 0
if not hasattr(self, 'view_x'):
self.view_x = 0
# gets the bounding rectangle (in pixels) for the image of the label array
geometry = self.geometry()
# print(geometry)
widget_width, widget_height = geometry.width(), geometry.height()
num_pix_high = widget_height / self.desired_pixel_size
aspect = widget_width / widget_height
new_height = num_pix_high
new_width = new_height * aspect
# print('W: {} H: {}'.format(new_width, new_height))
rect = QtCore.QRectF(self.view_x, 0, new_width, new_height)
self.fitInView(rect)
# self.fitInView(rect, aspectRadioMode=QtCore.Qt.KeepAspectRatio)
self.view_height = new_height
self.view_width = new_width
self.update()
def resizeEvent(self, event: QtGui.QResizeEvent):
self._fit_label_photo()
return
@Slot(int)
def change_view_x(self, x: int):
if x < 0 or x >= self.n_timepoints:
# print('return 1')
return
if not hasattr(self, 'view_width'):
self._fit_label_photo()
if not hasattr(self, 'n'):
# print('return 2')
return
view_x = x - self.view_width // 2
if view_x < 0:
# print('desired view x: {} LEFT SIDE'.format(view_x))
new_x = 0
elif view_x >= self.n_timepoints:
# print('desired view x: {} RIGHT SIDE'.format(view_x))
new_x = self.n_timepoints - 1
else:
new_x = view_x
# new_x = max(view_x, 0)
# new_x = min(new_x, self.n_timepoints - 1)
old_x = self.x
self.view_x = new_x
self.x = x
position = QtCore.QPointF(x, 0)
# print('view width: {}'.format(self.view_width))
# print('new_x: {}'.format(new_x))
# print('rec_x: {}'.format(position))
self.item_rect.setPos(position)
self.X.emit(self.x)
rect = QtCore.QRectF(self.view_x, 0, self.view_width, self.view_height)
# print('View rectangle: {}'.format(rect))
self.fitInView(rect)
behaviors = []
for i, v in enumerate(self.label_toggled):
if v:
behaviors.append(i)
if len(behaviors) > 0:
self._add_behavior(behaviors, old_x, x)
# self._fit_label_photo()
self.update()
# self.show()
def fixed_settings(self):
if not hasattr(self, 'changed'):
return
self.changed = np.ones(self.changed.shape)
self.recreate_label_image()
def _add_behavior(self, behaviors: Union[int, np.ndarray, list], fstart: int, fend: int):
# print('adding')
if self.fixed:
return
if not hasattr(self, 'array'):
return
n_behaviors = self.image.shape[0]
if type(behaviors) != np.ndarray:
behaviors = np.array(behaviors)
if max(behaviors) > n_behaviors:
raise ValueError('Not enough behaviors for number: {}'.format(behaviors))
if fstart < 0:
raise ValueError('Behavior start frame must be > 0: {}'.format(fstart))
if fend > self.n_timepoints:
raise ValueError('Behavior end frame must be < nframes: {}'.format(fend))
# log.debug('Behaviors: {} fstart: {} fend: {}'.format(behaviors, fstart, fend))
# go backwards to erase
if fstart <= fend:
value = 1
time_indices = np.arange(fstart, fend + 1) # want it to be
color = self.pos_color
# print('value = 1')
elif fstart - fend == 1:
value = 0
time_indices = np.array([fend, fstart])
color = self.neg_color
else:
# print('value = 0')
value = 0
time_indices = np.arange(fstart, fend, -1)
color = self.neg_color
# log.debug('time indices: {} value: {}'.format(time_indices, value))
# handle background specifically
if len(behaviors) == 1 and behaviors[0] == 0:
# print('0')
self.array[time_indices, 0] = 1
self.array[time_indices, 1:] = 0
# print('l shape: {}'.format(self.image[1:, time_indices, :].shape))
# print('r_shape: {}'.format(np.tile(self.neg_color[1:], [1, len(time_indices), 1]).shape))
self.image[0, time_indices, :] = self.pos_color[0]
self.image[1:, time_indices, :] = np.dstack([self.neg_color[1:] for _
in range(len(time_indices))]).swapaxes(1, 2)
else:
xv, yv = np.meshgrid(time_indices, behaviors, indexing='ij')
xv = xv.flatten()
yv = yv.flatten()
# log.debug('xv: {} yv: {}'.format(xv, yv))
# print('yv: {}'.format(yv))
self.array[xv, yv] = value
# change color
self.image[yv, xv, :] = color[yv]
# if there are any changes to rows 1+, make sure background is false
self.array[time_indices, 0] = np.logical_not(np.any(self.array[time_indices, 1:], axis=1))
# remap the color for the background column just in case
self.image[0, time_indices, :] = self.cmap(self.array[time_indices, 0:1].T*255).squeeze()
# mapped = self.cmap(self.array[time_indices, 0] * 255)
# print('mapped in add behavior: {}'.format(mapped.shape))
# self.image[0, time_indices, :] = mapped
# print(self.label.image[0,time_indices])
# change opacity
self.image[:, time_indices, 3] = 1
self.changed[time_indices] = 1
# change opacity
# self.label.image[:, indices, 3] = 1
self.saved.emit(False)
# self.label.image = im
self.update_image()
self.num_changed.emit(self.changed.sum())
def change_view_dx(self, dx: int):
self.change_view_x(self.x + dx)
def _array_to_image(self, array: np.ndarray, alpha: Union[float, int, np.ndarray] = None):
image = self.cmap(array.T * 255)
image[..., 3] = alpha
return (image)
def _add_row(self):
self.array = np.concatenate((self.array, np.zeros((self.array.shape[0], 1), dtype=self.array.dtype)),
axis=1)
alpha_vector = self.image[0, :, 3:4]
alpha_array = np.tile(alpha_vector, (1, self.array.shape[1]))
self.image = self._array_to_image(self.array, alpha_array.T)
self.n += 1
self.label_toggled = np.append(self.label_toggled, [False])
rect = QtCore.QRectF(self.x, 0, 1, self.n)
self.change_rectangle(rect)
self._fit_label_photo()
def _change_n_timepoints(self, n_timepoints: int):
warnings.warn('Changing number of timepoints will erase any labels!')
self.array = np.zeros((n_timepoints, self.n), dtype=np.uint8)
self.changed = np.zeros((n_timepoints,), dtype=np.uint8)
self.n_timepoints = n_timepoints
self.shape = self.array.shape
self.image = self._array_to_image(self.array, alpha=self.unlabeled_alpha)
def make_debug(self, num_rows: int = 15000):
print('debug')
assert (hasattr(self, 'array'))
rows, cols = self.shape
# print(rows, cols)
# behav = 0
for i in range(rows):
behav = (i % cols)
self.array[i, behav] = 1
# self.array = self.array[:num_rows,:]
# print(self.array)
def calculate_background_class(self, array: np.ndarray):
array[:, 0] = np.logical_not(np.any(array[:, 1:], axis=1))
return (array)
def update_background_class(self):
# import pdb
# pdb.set_trace()
self.array = self.calculate_background_class(self.array)
def update_image(self):
qpixmap = numpy_to_qpixmap(self.image)
self.qpixmap = qpixmap
self._photo.setPixmap(self.qpixmap)
self.update()
def recreate_label_image(self):
# print('array input shape, will be transposed: {}'.format(self.array.shape))
self.image = self.cmap(self.array.T * 255)
if self.opacity is None:
opacity = np.ones((self.image.shape[0], self.image.shape[1])) * self.unlabeled_alpha
opacity[:, np.where(self.changed)[0]] = 1
else:
opacity = self.opacity.copy()
# print('image: {}'.format(self.image))
# print('image shape in recreate label image: {}'.format(self.image.shape))
# print('opacity: {}'.format(opacity))
# print('opacity shape in recreate label image: {}'.format(opacity.shape))
# print('chang: {}'.format(self.changed.shape))
self.image[..., 3] = opacity
self.update_image()
@Slot(int)
def toggle_behavior(self, index: int):
if not hasattr(self, 'array') or self.array is None or self.fixed:
return
n_behaviors = self.image.shape[0]
if index > n_behaviors:
raise ValueError('Not enough behaviors for number: {}'.format(index))
if index < 0:
raise ValueError('Behavior index cannot be below 0')
self.label_toggled[index] = ~ self.label_toggled[index]
if self.label_toggled[index]:
# if background is selected, deselect all others
if index == 0:
self.label_toggled[1:] = False
self.array[self.x, 1:] = 0
self.array[self.x, index] = 1
self.changed[self.x] = 1
self.update_background_class()
self.recreate_label_image()
self.change_view_x(self.x)
# print(self.changed)
self.just_toggled.emit(index)
self.update()
class LabelButtons(QtWidgets.QWidget):
def __init__(self, parent=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.reset()
def reset(self):
self.layout = None
self.buttons = None
self.behaviors = None
self.enabled = None
self.minimum_height = None
def initialize(self, behaviors: Union[list, np.ndarray] = ['background'], enabled: bool = True,
minimum_height: int = 25):
assert (len(behaviors) > 0)
layout = QtWidgets.QVBoxLayout()
self.buttons = []
self.behaviors = behaviors
self.enabled = enabled
self.minimum_height = minimum_height
for i, behavior in enumerate(behaviors):
# not sure if I need the str call but I don't want a weird single-element numpy object
behavior = str(behavior)
button = self._make_button(behavior, i)
self.buttons.append(button)
layout.addWidget(button, 0, alignment=QtCore.Qt.AlignTop)
layout.setMargin(0)
layout.setSpacing(0)
self.layout = layout
self.setLayout(self.layout)
def _make_button(self, behavior: str, index: int):
string = str(behavior)
if index < 10:
string = '[{:01d}] '.format(index) + string
button = QtWidgets.QPushButton(string, parent=self)
button.setEnabled(self.enabled)
button.setMinimumHeight(self.minimum_height)
button.setCheckable(True)
button.setStyleSheet("QPushButton { text-align: left; }"
"QPushButton:checked { background-color: rgb(30, 30, 30)}")
return button
def add_behavior(self, behavior: str):
if behavior in self.behaviors:
warnings.warn('behavior {} already in list'.format(behavior))
else:
self.behaviors.append(behavior)
button = self._make_button(behavior, len(self.behaviors))
self.buttons.append(button)
self.layout.addWidget(button, 0, alignment=QtCore.Qt.AlignTop)
self.update()
def fix(self):
for button in self.buttons:
button.setEnabled(False)
class LabelImg(QtWidgets.QScrollArea):
def __init__(self, parent=None, *args, **kwargs):
super().__init__(parent=parent, *args, **kwargs)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
self.setSizePolicy(sizePolicy)
self.setMaximumSize(QtCore.QSize(16777215, 200))
self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)
self.setWidgetResizable(True)
self.reset()
def reset(self):
self.label = None
self.behaviors = None
self.n = None
self.buttons = None
self.toggle_shortcuts = None
self.widget = None
self.layout = None
def update_buttons(self):
if self.label is None or self.buttons is None:
return
toggled = self.label.label_toggled
for toggle, button in zip(toggled, self.buttons.buttons):
if toggle != button.isChecked():
button.setChecked(toggle)
self.update()
def initialize(self, behaviors: Union[list, np.ndarray] = ['background'],
n_timepoints: int = 31, debug: bool = False, colormap: str = 'Reds', unlabeled_alpha: float = 0.1,
desired_pixel_size: int = 25, array: np.ndarray = None, fixed: bool = False,
opacity: np.ndarray = None):
layout = QtWidgets.QHBoxLayout()
# assert (n == len(behaviors))
assert (behaviors[0] == 'background')
self.label = LabelViewer()
# print(behaviors)
self.behaviors = behaviors
self.n = len(self.behaviors)
self.label.initialize(len(self.behaviors), n_timepoints, debug, colormap, unlabeled_alpha, desired_pixel_size,
array, fixed, opacity=opacity)
self.buttons = LabelButtons()
enabled = not fixed
self.buttons.initialize(self.behaviors, enabled, desired_pixel_size)
if not fixed:
tmp_buttons = []
for i, button in enumerate(self.buttons.buttons):
button.clicked.connect(partial(self.label.toggle_behavior, i))
tmp_buttons.append(button)
# self.toggle_shortcuts[i].activated.connect(partial(self.label.toggle_behavior, i))
self.label.just_toggled.connect(self.update_buttons)
self.setMinimumHeight(desired_pixel_size)
# this syntax figured out from here
# https://www.learnpyqt.com/courses/adanced-ui-features/qscrollarea/
self.widget = QtWidgets.QWidget()
layout.addWidget(self.buttons, alignment=QtCore.Qt.AlignTop)
layout.addWidget(self.label, alignment=QtCore.Qt.AlignTop)
self.widget.setLayout(layout)
self.setWidget(self.widget)
self.update()
def add_behavior(self, behavior: str):
print('1: ', self.behaviors, behavior)
if behavior in self.behaviors:
warnings.warn('behavior {} already in list'.format(behavior))
# add a button
self.buttons.add_behavior(behavior)
print('2: {}'.format(self.behaviors))
print('2 buttons: {}'.format(self.buttons.behaviors))
# add to our list of behaviors
# self.behaviors.append(behavior)
print('3: {}'.format(self.behaviors))
# hook up button to toggling behavior
i = len(self.behaviors) - 1
print(self.behaviors)
print(len(self.behaviors))
print(len(self.buttons.buttons))
self.buttons.buttons[i].clicked.connect(partial(self.label.toggle_behavior, i))
self.label._add_row()
if i < 10:
self.toggle_shortcuts.append(QtWidgets.QShortcut(QtGui.QKeySequence(str(i)), self))
self.toggle_shortcuts[i].activated.connect(self.buttons.buttons[i].click)
class ListenForPipeCompletion(QtCore.QThread):
has_finished = Signal(bool)
def __init__(self, pipe):
QtCore.QThread.__init__(self)
# super().__init__(self)
self.pipe = pipe
def __del__(self):
self.should_continue = False
def run(self):
while self.should_continue:
time.sleep(1)
if self.pipe.poll() is None:
pass
# print('still running...')
else:
self.has_finished.emit(True)
break
class SubprocessChainer(QtCore.QThread):
def __init__(self, calls: list):
QtCore.QThread.__init__(self)
for call in calls:
assert type(call) == list
self.calls = calls
self.should_continue = True
def stop(self):
self.should_continue = False
# self.pipe.terminate()
def run(self):
for call in self.calls:
if self.should_continue:
self.pipe = subprocess.Popen(call)
while True:
time.sleep(1)
if self.pipe.poll() is not None or not self.should_continue:
self.pipe.terminate()
self.pipe.wait()
break
# def chained_subprocess_calls(calls: list) -> None:
# def _run(calls):
# for call in calls:
# assert type(call) == list
#
# for call in calls:
# print(call)
# pipe = subprocess.run(call, shell=True)
# thread = threading.Thread(target=_run, args=(calls,))
# thread.start()
# return thread
class UnclickButtonOnPipeCompletion(QtCore.QThread):
def __init__(self, button, pipe):
QtCore.QThread.__init__(self)
# super().__init__(self)
self.button = button
self.pipe = pipe
self.should_continue = True
self.has_been_clicked = False
self.button.clicked.connect(self.get_click)
def __del__(self):
self.should_continue = False
@Slot(bool)
def get_click(self, value):
print('clicked')
self.has_been_clicked = True
def run(self):
while self.should_continue:
time.sleep(1)
if self.pipe.poll() is None:
pass
# print('still running...')
else:
if not self.has_been_clicked:
# print('ischecked: ', self.button.isChecked())
if self.button.isChecked():
# print('listener clicking button')
self.button.click()
break
class MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.label = LabelImg(self)
self.label.initialize(behaviors=['background', 'itch', 'lick', 'scratch', 'shit', 'fuck', 'ass', 'bitch'],
n_timepoints=500, debug=True, fixed=False)
# self.label = LabelViewer()
# self.label.initialize(n=4, n_timepoints=40, debug=True, fixed=True)
# # self.labelImg = DebuggingDrawing()
#
next_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence('Right'), self)
next_shortcut.activated.connect(partial(self.label.label.change_view_dx, 1))
# next_shortcut.activated.connect(partial(self.label.change_view_dx, 1))
back_shortcut = QtWidgets.QShortcut(QtGui.QKeySequence('Left'), self)
back_shortcut.activated.connect(partial(self.label.label.change_view_dx, -1))
#
# if hasattr(self, 'label'):
# n = self.label.n
# else:
# n = 1
# self.toggle_shortcuts = []
# for i in range(n):
# self.toggle_shortcuts.append(QtWidgets.QShortcut(QtGui.QKeySequence(str(i)), self))
# self.toggle_shortcuts[i].activated.connect(partial(self.label.toggle_behavior, i))
# self.buttons = LabelButtons(behaviors = ['background', 'itch', 'scratch', 'poop'])
# back_shortcut.activated.connect(partial(self.labelImg.move_rect, -1))
# self.labelImg.make_debug(10)
self.setCentralWidget(self.label)
self.setMaximumHeight(480)
self.update()
def sizeHint(self):
return (QtCore.QSize(600, 600))
if __name__ == '__main__':
app = QtWidgets.QApplication([])
# volume = VideoPlayer(r'C:\DATA\mouse_reach_processed\M134_20141203_v001.h5')
testing = LabelImg()
testing.initialize(behaviors=['background', 'a', 'b', 'c', 'd', 'e'], n_timepoints=15000, debug=True)
# testing = ShouldRunInference(['M134_20141203_v001',
# 'M134_20141203_v002',
# 'M134_20141203_v004'],
# [True, True, False])
# testing = MainWindow()
# testing.setMaximumHeight(250)
testing.update()
testing.show()
app.exec_()
|
cluster.py
|
# Copyright (c) 2015-2020 Avere Systems, Inc. All Rights Reserved.
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in the project root for license information.
'''vFXT Cluster management
Cookbook/examples:
# A cluster is built with a service object (aws or gce)
service = vFXT.aws.Service() | vFXT.gce.Service()
# create a cluster
cluster = Cluster.create(service, ...)
# load from an existing, online cluster (queries xmlrpc)
cluster = Cluster.load(service, mgmt_ip='xxx', admin_password='xxx')
# offline with node instance ids provided
cluster = Cluster(service=service,
nodes=['node-1', 'node-2', 'node-1'],
admin_password='password',
mgmt_ip='10.10.10.10')
serializeme = cluster.export()
cluster = Cluster(service, **serializeme)
cluster.start()
cluster.stop()
cluster.restart()
cluster.destroy()
cluster.shelve()
cluster.unshelve()
cluster.is_on()
cluster.is_off()
cluster.is_shelved()
cluster.status()
cluster.wait_for_healthcheck()
cluster.wait_for_service_checks()
cluster.wait_for_cluster_activity()
cluster.wait_for_nodes_to_join()
cluster_cfg = cluster.cluster_config()
joincfg = cluster.cluster_config(joining=True)
cluster.in_use_addresses()
rpc = cluster.xmlrpc()
cluster.verify_license()
cluster.upgrade('http://path/to/armada.pkg')
# buckets
cluster.make_test_bucket(bucketname='unique_bucket', corefiler='cloudfiler')
# or
service.create_bucket('unique_bucket')
cluster.attach_bucket('cloudfiler', 'mypassword', 'unique_bucket')
cluster.add_vserver('vserver')
cluster.add_vserver_junction('vserver','cloudfiler')
# NFS filer
cluster.attach_corefiler('grapnel', 'grapnel.lab.avere.net')
cluster.add_vserver_junction('vserver', 'grapnel', path='/nfs', export='/vol/woodwardj')
# maint
cluster.enable_ha()
cluster.rebalance_directory_managers()
cluster.refresh()
cluster.reload()
# Full AWS example
cluster = Cluster.create(aws, 'r3.2xlarge', 'mycluster', 'PLACEHOLDER',
subnet='subnet-f99a618e',
placement_group='perf1',
wait_for_state='yellow')
try:
cluster.make_test_bucket(bucketname='mycluster-bucket', corefiler='aws')
cluster.add_vserver('vserver')
cluster.add_vserver_junction('vserver', 'aws')
except Exception as e:
cluster.destroy()
raise
'''
from builtins import range #pylint: disable=redefined-builtin
from future.utils import raise_from
import base64
import threading
import queue as Queue
import time
import logging
import uuid
import re
import socket
from xmlrpc.client import Fault as xmlrpclib_Fault
import math
import itertools
import vFXT.xmlrpcClt
from vFXT.serviceInstance import ServiceInstance
from vFXT.service import vFXTServiceFailure, vFXTConfigurationException, vFXTCreateFailure, vFXTStatusFailure, vFXTConnectionFailure, ServiceBase, validate_proxy
from vFXT.cidr import Cidr
log = logging.getLogger(__name__)
class Cluster(object): #pylint: disable=useless-object-inheritance
'''Cluster representation
Cluster composes the backend service object and performs all
operations through it or the XMLRPC client.
'''
CONFIGURATION_EXPIRATION = 1800
JOIN_CONFIGURATION_EXPIRATION = 7200
LICENSE_TIMEOUT = 120
def __init__(self, service, **options):
'''Constructor
The only required argument is the service backend.
To create a cluster, use Cluster.create()
To load a cluster, use Cluster.load()
Arguments:
service: the backend service
nodes ([], optional): optional list of node IDs
mgmt_ip (str, optional): management address
admin_password (str, optional): administration password
name (str, optional): cluster name
machine_type (str, optional): machine type of nodes in the cluster
mgmt_netmask (str, optional): netmask of management network
proxy_uri (str, optional): URI of proxy resource (e.g. http://user:pass@172.16.16.20:8080)
If called with mgmt_ip and admin_password, the cluster object will
query the management address and fill in all of the details required.
If called with just a list of node IDs, the cluster will lookup the
service instance backing objects associated with the node IDs.
This is handy for offline clusters.
'''
self.service = service
self.nodes = options.get('nodes', [])
self.mgmt_ip = options.get('mgmt_ip', None)
self.admin_password = options.get('admin_password', None)
self.name = options.get('name', None)
self.machine_type = options.get('machine_type', None)
self.mgmt_netmask = options.get('mgmt_netmask', None)
self.cluster_ip_start = options.get('cluster_ip_start', None)
self.cluster_ip_end = options.get('cluster_ip_end', None)
self.proxy = options.get('proxy_uri', None)
self.join_mgmt = True
self.trace_level = None
self.node_rename = True
self.first_node_error = None
self.timezone = None
self.instance_addresses = []
if self.proxy:
self.proxy = validate_proxy(self.proxy) # imported from vFXT.service
# we may be passed a list of instance IDs for offline clusters that we
# can't query
if self.service and self.nodes and all([not isinstance(i, ServiceInstance) for i in self.nodes]):
instances = []
for node_id in self.nodes:
log.debug("Loading node {}".format(node_id))
instance = service.get_instance(node_id)
if not instance:
raise vFXTConfigurationException("Unable to find instance {}".format(node_id))
instances.append(ServiceInstance(service=self.service, instance=instance))
self.nodes = instances
if self.mgmt_ip and self.admin_password and self.nodes and self.is_on():
# might as well if we can, otherwise use the load() constructor
self.load_cluster_information()
@classmethod
def create(cls, service, machine_type, name, admin_password, **options):
'''Create a cluster
Arguments:
service: the backend service
machine_type (str): service specific machine type
name (str): cluster name (used or all subsequent resource naming)
admin_password (str): administration password to assign to the cluster
wait_for_state (str, optional): red, yellow, green cluster state (defaults to yellow)
wait_for_state_duration (int, optional): number of seconds state must be maintained, defaults to 30
proxy_uri (str, optional): URI of proxy resource (e.g. http://user:pass@172.16.16.20:8080)
skip_cleanup (bool, optional): do not clean up on failure
management_address (str, optional): management address for the cluster
trace_level (str, optional): trace configuration
timezone (str, optional): Set cluster timezone
join_instance_address (bool, optional): Join cluster using instance rather than management address (defaults to True)
skip_node_renaming (bool optional): Do not automatically configure and enforce node naming convention (defaults to False)
size (int, optional): size of cluster (node count), defaults to 3
root_image (str, optional): root disk image name
address_range_start (str, optional): The first of a custom range of addresses to use for the cluster
address_range_end (str, optional): The last of a custom range of addresses to use for the cluster
address_range_netmask (str, optional): cluster address range netmask
instance_addresses ([str], optional): list of instance IP addresses to assign to the cluster nodes
**options: passed to Service.create_cluster()
'''
c = cls(service)
c.admin_password = admin_password or '' # could be empty
c.machine_type = machine_type
c.name = name
c.proxy = options.get('proxy_uri', None)
c.trace_level = options.get('trace_level', None)
c.timezone = options.get('timezone', None)
c.join_mgmt = not options.get('join_instance_address', True)
if c.proxy:
c.proxy = validate_proxy(c.proxy) # imported from vFXT.service
if options.get('skip_node_renaming'):
c.node_rename = False
if not options.get('size'):
options['size'] = 3
cluster_size = int(options['size'])
if not name:
raise vFXTConfigurationException("A cluster name is required")
if not cls.valid_cluster_name(name):
raise vFXTConfigurationException("{} is not a valid cluster name".format(name))
if options.get('management_address'):
c.mgmt_ip = options.get('management_address')
if service.in_use_addresses('{}/32'.format(c.mgmt_ip)):
raise vFXTConfigurationException("The requested management address {} is already in use".format(c.mgmt_ip))
# Need to validate if instance_addresses passed in are already in use before creating the cluster
if options.get('instance_addresses'):
try:
already_in_use = []
for address in options['instance_addresses']:
if service.in_use_addresses('{}/32'.format(address)):
already_in_use.append(address)
if already_in_use:
raise vFXTConfigurationException("The requested instance addresses are already in use: {}".format(', '.join(already_in_use)))
if len(options['instance_addresses']) != cluster_size:
raise vFXTConfigurationException("Not enough instance addresses provided, require {}".format(cluster_size))
except vFXTConfigurationException:
raise
except Exception as e:
log.debug(e)
raise_from(vFXTConfigurationException("Invalid instance addresses: {}".format(options['instance_addresses'])), e)
c.instance_addresses = options['instance_addresses']
# determine how many addresses we need
instance_count = cluster_size if (service.ALLOCATE_INSTANCE_ADDRESSES and not c.instance_addresses) else 0
management_count = 0 if options.get('management_address') else 1
ip_count = cluster_size + instance_count + management_count
if all([options.get(_) for _ in ['address_range_start', 'address_range_end', 'address_range_netmask']]):
try:
already_in_use = []
cluster_range = Cidr.expand_address_range(options.get('address_range_start'), options.get('address_range_end'))
for address in cluster_range:
if c.service.in_use_addresses('{}/32'.format(address)):
already_in_use.append(address)
if already_in_use:
raise vFXTConfigurationException("The requested instance addresses are already in use: {}".format(', '.join(already_in_use)))
if len(cluster_range) < ip_count:
raise vFXTConfigurationException("Not enough addresses provided, require {}".format(ip_count))
log.debug("Using overrides for cluster management and address range")
if management_count:
c.mgmt_ip = cluster_range[0]
if instance_count:
c.instance_addresses = cluster_range[management_count:instance_count + management_count]
c.cluster_ip_start = cluster_range[management_count + instance_count]
c.cluster_ip_end = cluster_range[-1]
c.mgmt_netmask = options['address_range_netmask']
except vFXTConfigurationException:
raise
except Exception as e:
log.debug(e)
raise_from(vFXTConfigurationException("Invalid instance addresses: {}".format(options['instance_addresses'])), e)
else:
in_use_addresses = []
if c.mgmt_ip:
in_use_addresses.append(c.mgmt_ip)
if c.instance_addresses:
in_use_addresses.extend(c.instance_addresses)
avail, mask = service.get_available_addresses(count=ip_count, contiguous=True, in_use=in_use_addresses)
if management_count:
c.mgmt_ip = avail[0]
if instance_count:
c.instance_addresses = avail[management_count:instance_count + management_count]
c.cluster_ip_start = avail[management_count + instance_count]
c.cluster_ip_end = avail[-1]
c.mgmt_netmask = mask
# machine type is validated by service create_cluster
try:
service.create_cluster(c, **options)
if options.get('skip_configuration'):
return c
except KeyboardInterrupt:
if not options.get('skip_cleanup', False):
c.destroy(quick_destroy=True)
raise
try:
# any service specific instance checks should happen here... the checks
# might have to restart the nodes
c.wait_for_service_checks()
xmlrpc = c.xmlrpc()
retries = int(options.get('join_wait', 500 + (500 * math.log(len(c.nodes)))))
# should get all the nodes joined by now
c.allow_node_join(retries=retries, xmlrpc=xmlrpc)
c.wait_for_nodes_to_join(retries=retries, xmlrpc=xmlrpc)
c.allow_node_join(enable=False, retries=retries, xmlrpc=xmlrpc)
c.set_node_naming_policy(xmlrpc=xmlrpc)
if len(c.nodes) > 1:
c.enable_ha(xmlrpc=xmlrpc)
c.verify_license(xmlrpc=xmlrpc)
log.info("Waiting for cluster healthcheck")
c.wait_for_healthcheck(state=options.get('wait_for_state', 'yellow'),
duration=int(options.get('wait_for_state_duration', 30)), xmlrpc=xmlrpc)
except (KeyboardInterrupt, Exception) as e:
log.error("Cluster configuration failed: {}".format(e))
if not options.get('skip_cleanup', False):
c.destroy(quick_destroy=True)
else:
try:
c.telemetry()
except Exception as te:
log.debug(te)
raise_from(vFXTCreateFailure(e), e)
return c
def wait_for_healthcheck(self, state='green', retries=ServiceBase.WAIT_FOR_HEALTH_CHECKS, duration=1, conn_retries=1, xmlrpc=None):
'''Poll for cluster maxConditions
This requires the cluster to be on and be accessible via RPC
Arguments:
state (str='green'): red, yellow, green
retries (int, optional): number of retries
duration (int, optional): number of consecutive seconds condition was observed
conn_retries (int, optional): number of connection retries
xmlrpc (xmlrpcClt, optional): xmlrpc client
Sleeps Service.POLLTIME between each retry.
'''
retries = int(retries)
conn_retries = int(conn_retries)
duration = int(duration)
log.info("Waiting for healthcheck")
xmlrpc = self.xmlrpc(conn_retries) if xmlrpc is None else xmlrpc
start_time = int(time.time())
observed = 0 # observed time in the requested state
# cluster health check
acceptable_states = [state, 'green']
if state == 'red':
acceptable_states.append('yellow')
while True:
alertstats = {}
try:
alertstats = xmlrpc.cluster.maxActiveAlertSeverity()
except Exception as e:
log.debug("Ignoring cluster.maxActiveAlertSeverity() failure: {}".format(e))
xmlrpc = self.xmlrpc(conn_retries)
if 'maxCondition' in alertstats and alertstats['maxCondition'] in acceptable_states:
observed = int(time.time()) - start_time
if observed >= duration:
log.debug("{} for {}s({})... alertStats: {}".format(state, duration, observed, alertstats))
break
else:
observed = 0
start_time = int(time.time())
if retries % 10 == 0:
self._log_conditions(xmlrpc)
log.debug("Not {} for {}s({})... alertStats: {}".format(state, duration, observed, alertstats))
retries -= 1
if retries == 0:
alert_codes = []
try:
conditions = xmlrpc.alert.conditions()
alert_codes = [c['name'] for c in conditions if c['severity'] != state]
except Exception as e:
log.debug("Failed to get alert conditions: {}".format(e))
xmlrpc = self.xmlrpc(conn_retries)
if alert_codes:
raise vFXTStatusFailure("Healthcheck for state {} failed: {}".format(state, alert_codes))
raise vFXTStatusFailure("Healthcheck for state {} failed".format(state))
self._sleep()
@classmethod
def load(cls, service, mgmt_ip, admin_password):
'''Load an existing cluster over RPC
Arguments:
mgmt_ip (str): management address
admin_password (str): administration password
'''
cluster = cls(service)
cluster.mgmt_ip = mgmt_ip
cluster.admin_password = admin_password
cluster.load_cluster_information()
return cluster
def load_cluster_information(self):
'''Load cluster information through XMLRPC and the service backend
Raises: vFXTConfigurationException
'''
log.debug("Connecting to {} to load cluster data".format(self.mgmt_ip))
xmlrpc = self.xmlrpc()
cluster_data = self._xmlrpc_do(xmlrpc.cluster.get)
self.name = cluster_data['name']
self.mgmt_netmask = cluster_data['mgmtIP']['netmask']
expected_count = len(self._xmlrpc_do(xmlrpc.node.list))
log.debug("Loading {} nodes".format(self.name))
self.service.load_cluster_information(self)
if not self.nodes:
raise vFXTConfigurationException("No nodes found for cluster")
found_count = len(self.nodes)
if expected_count != found_count:
raise vFXTStatusFailure("Failed to load all {} nodes (found {})".format(expected_count, found_count))
def cluster_config(self, joining=False, expiration=CONFIGURATION_EXPIRATION, joining_expiration=JOIN_CONFIGURATION_EXPIRATION):
'''Return cluster configuration for master and slave nodes
Arguments:
joining (bool, optional): configuration for a joining node
expiration (int, optional): configuration expiration for a joining node
Raises: vFXTConfigurationException
'''
if joining:
expiry = str(int(time.time()) + (joining_expiration or self.JOIN_CONFIGURATION_EXPIRATION))
mgmt_ip = (self.nodes[0].ip() if self.nodes and not self.join_mgmt else self.mgmt_ip)
return '# cluster.cfg\n[basic]\njoin cluster={}\nexpiration={}\n'.format(mgmt_ip, expiry)
expiry = str(int(time.time()) + (expiration or self.CONFIGURATION_EXPIRATION))
dns_servs = self.service.get_dns_servers()
ntp_servs = self.service.get_ntp_servers()
router = self.service.get_default_router()
if not all([self.mgmt_ip, self.mgmt_netmask, self.cluster_ip_start, self.cluster_ip_end]):
raise vFXTConfigurationException("Management IP/Mask and the cluster IP range is required")
# generate config
config = '''# cluster.cfg''' \
'''\n[basic]''' \
'''\ncluster name={}''' \
'''\npassword={}''' \
'''\nexpiration={}''' \
'''\n[management network]''' \
'''\naddress={}''' \
'''\nnetmask={}''' \
'''\ndefault router={}''' \
'''\n[cluster network]''' \
'''\nfirst address={}''' \
'''\nlast address={}''' \
.format(self.name,
self.admin_password,
expiry,
self.mgmt_ip,
self.mgmt_netmask,
router,
self.cluster_ip_start,
self.cluster_ip_end)
config += '\n[dns]\n'
dns_count = len(dns_servs)
for idx in range(3):
v = dns_servs[idx] if idx < dns_count else ''
config += 'server{}={}\n'.format(idx + 1, v)
config += 'domain=\n'
config += '\n[ntp]\n'
ntp_count = len(ntp_servs)
for idx in range(3):
v = ntp_servs[idx] if idx < ntp_count else ''
config += 'server{}={}\n'.format(idx + 1, v)
return config
def verify_license(self, wait=LICENSE_TIMEOUT, xmlrpc=None):
'''Verify a license has been provisioned for the cluster
Arguments:
wait (int): time to wait in seconds for the license provisioning (default 60)
xmlrpc (xmlrpcClt, optional): xmlrpc client
Raises: vFXTConfigurationException
'''
if self.service.AUTO_LICENSE:
return
log.info('Waiting for FlashCloud licensing feature')
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
while wait > 0:
try:
licenses = xmlrpc.cluster.listLicenses()
if 'FlashCloud' in licenses['features']:
log.info('Feature FlashCloud enabled.')
return
except Exception as e:
log.debug(e)
if wait % 10 == 0:
log.debug('Waiting for the FlashCloud license feature to become enabled')
wait -= 1
self._sleep()
raise vFXTConfigurationException("Unable to verify cluster licensing")
def xmlrpc(self, retries=1, password=None):
'''Connect and return a new RPC connection object
Arguments:
retries (int, optional): number of retries
password (str, optional): defaults to the cluster admin_password
Raises: vFXTConnectionFailure
'''
addrs = []
if self.join_mgmt:
addrs.append(self.mgmt_ip)
if self.nodes:
addrs.append(self.nodes[0].ip())
if not addrs:
raise vFXTConfigurationException("No usable connection address for xmlrpc calls")
password = password or self.admin_password
if not password:
raise vFXTConnectionFailure("Unable to make remote API connection without a password")
while True:
# try our mgmt address or the first nodes instance address
for addr in addrs:
try:
xmlrpc = vFXT.xmlrpcClt.getXmlrpcClient("https://{}/cgi-bin/rpc2.py".format(addr), do_cert_checks=False)
xmlrpc('transport').user_agent = 'vFXT/{}'.format(vFXT.__version__)
xmlrpc.system.login(base64.b64encode('admin'.encode('utf-8')).decode(), base64.b64encode(password.encode('utf-8')).decode())
if addr != self.mgmt_ip and self.join_mgmt:
log.warning("Connected via instance address {} instead of management address {}".format(addr, self.mgmt_ip))
self._log_conditions(xmlrpc)
return xmlrpc
except Exception as e:
log.debug("Retrying failed XMLRPC connection to {}: {}".format(addr, e))
if retries == 0:
raise_from(vFXTConnectionFailure("Failed to make remote API connection: {}".format(e)), e)
retries -= 1
self._sleep()
def _xmlrpc_do(self, f, *args, **kwargs):
'''Run an xmlrpc function, retrying depending on the xmlrpc Fault
Arguments:
f (callable): rpc proxy function to call
*args: rpc arg list
**kwargs: rpc arg keywords
_xmlrpc_do_retries kwarg is special, defaults to XMLRPC_RETRIES
Retry errors include
100 AVERE_ERROR
102 AVERE_ENOENT
109 AVERE_EBUSY
'''
retry_errors = [100, 102, 109]
retries = kwargs.pop('_xmlrpc_do_retries', self.service.XMLRPC_RETRIES)
while True:
try:
return f(*args, **kwargs)
except xmlrpclib_Fault as e:
log.debug("avere xmlrpc failure: {}".format(e))
if retries == 0 or int(e.faultCode) not in retry_errors:
raise
except Exception as e:
log.debug("avere xmlrpc failure: {}".format(e))
if retries == 0:
raise
retries -= 1
self._sleep()
def _xmlrpc_wait_for_activity(self, activity, error_msg, retries=None):
'''Wait for a xmlrpc activity to complete
Arguments:
activity (str): cluster activity UUID
error_msg (str): Exception text on error
retries (int, optional): max retries, otherwise loops indefinitely
'''
if activity == 'success':
return
xmlrpc = self.xmlrpc()
tries = 0
while True:
response = {}
try:
if xmlrpc is None:
xmlrpc = self.xmlrpc()
response = xmlrpc.cluster.getActivity(activity)
log.debug(response)
except Exception as e:
log.exception("Failed to get activity {}: {}".format(activity, e))
xmlrpc = None
if 'state' in response:
if response['state'] == 'success':
break
if response['state'] == 'failure':
err = '{}: {}'.format(error_msg, response.get('status', 'Unknown'))
raise vFXTConfigurationException(err)
if retries is not None:
if retries == 0:
err = '{}: Timed out while {}'.format(error_msg, response['status'])
raise vFXTConfigurationException(err)
retries -= 1
if tries % 10 == 0 and 'status' in response:
log.info(response['status'])
self._log_conditions(xmlrpc)
self._sleep()
tries += 1
def _enable_maintenance_api(self, xmlrpc):
response = self._xmlrpc_do(xmlrpc.system.enableAPI, 'maintenance')
if response != 'success':
raise vFXTConfigurationException('Failed to enable maintenance API')
@classmethod
def _log_conditions(cls, xmlrpc):
'''Debug log the conditions
This is useful when we are polling and want to show what is going
on with the cluster while we wait.
Arguments:
xmlrpc (xmlrpcClt): xmlrpc client
'''
if not log.isEnabledFor(logging.DEBUG):
return
try:
conditions = xmlrpc.alert.conditions()
log.debug("Current conditions: {}".format(conditions))
except Exception as e:
log.debug("Failed to get condition list: {}".format(e))
def telemetry(self, wait=True, retries=ServiceBase.WAIT_FOR_TELEMETRY, mode='gsimin'):
'''Kick off a minimal telemetry reporting
Arguments:
wait (bool, optional): wait until complete
retries (int, optional): number of retries to wait (if wait is disabled)
mode (str, optional): telemetry mode (valid from support.listNormalModes)
Raises vFXTStatusFailure on failure while waiting.
'''
if mode not in self.xmlrpc().support.listNormalModes()[0]:
raise vFXTConfigurationException("Invalid support mode {}".format(mode))
try:
log.info("Kicking off {} telemetry reporting.".format(mode))
response = self.xmlrpc().support.executeNormalMode('cluster', mode)
log.debug('{} response {}'.format(mode, response))
if not wait:
return
if response != 'success':
while True:
try:
is_done = self.xmlrpc().support.taskIsDone(response) # returns bool
if is_done:
break
except Exception as e:
log.debug("Error while checking for telemetry status: {}".format(e))
if retries % 10 == 0:
log.debug('Waiting for {} to complete'.format(response))
retries -= 1
if retries == 0:
raise vFXTConfigurationException("Time out waiting for telemetry upload to finish")
self._sleep()
except Exception as e:
log.debug("Telemetry failed: {}".format(e))
raise_from(vFXTStatusFailure('Telemetry failed: {}'.format(e)), e)
def upgrade_alternate_image(self, upgrade_url, retries=None):
'''Upgrade the cluster alternate image
Arguments:
upgrade_url (str): URL for armada package
retries (int, optional): retry count for switching active images
'''
retries = retries or int(500 + (500 * math.log(len(self.nodes))))
xmlrpc = self.xmlrpc()
cluster = self._xmlrpc_do(xmlrpc.cluster.get)
alt_image = cluster['alternateImage']
upgrade_status = self._xmlrpc_do(xmlrpc.cluster.upgradeStatus)
if not upgrade_status.get('allowDownload', False):
raise vFXTConfigurationException("Upgrade downloads are not allowed at this time")
# note any existing activities to skip
existing_activities = [a['id'] for a in self._xmlrpc_do(xmlrpc.cluster.listActivities)]
log.info("Fetching alternate image from {}".format(upgrade_url))
response = self._xmlrpc_do(xmlrpc.cluster.upgrade, upgrade_url)
if response != 'success':
raise vFXTConfigurationException("Failed to start upgrade download: {}".format(response))
op_retries = retries
while cluster['alternateImage'] == alt_image:
self._sleep()
try:
cluster = self._xmlrpc_do(xmlrpc.cluster.get)
activities = [act for act in self._xmlrpc_do(xmlrpc.cluster.listActivities)
if act['id'] not in existing_activities # skip existing
if act['process'] == 'Cluster upgrade' # look for cluster upgrade or download
or 'software download' in act['process']]
failures = [_ for _ in activities if 'failure' in _['state']]
if failures:
errmsg = ', '.join([': '.join([_['process'], _['status']]) for _ in failures])
raise vFXTConfigurationException("Failed to download upgrade image: {}".format(errmsg))
if op_retries % 10 == 0:
log.debug('Current activities: {}'.format(', '.join([act['status'] for act in activities])))
# check for double+ upgrade to same version
existing_ver_msg = 'Download {} complete'.format(alt_image)
if existing_ver_msg in [act['status'] for act in activities]:
log.debug("Redownloaded existing version")
break
except vFXTConfigurationException as e:
log.debug(e)
raise
except Exception as e:
if op_retries % 10 == 0:
log.debug("Retrying install check: {}".format(e))
op_retries -= 1
if op_retries == 0:
raise vFXTConnectionFailure("Timeout waiting for alternate image")
log.info("Updated alternate image to {}".format(cluster['alternateImage']))
def activate_alternate_image(self, retries=None, ha=True):
'''Activate the alternate image
Arguments:
retries (int, optional): retry count for switching active images, default is no retries
ha (bool, optional): do an HA upgrade, True
'''
cluster = self._xmlrpc_do(self.xmlrpc().cluster.get)
if cluster['alternateImage'] == cluster['activeImage']:
log.info("Skipping upgrade since this version is active")
return
alt_image = cluster['alternateImage']
if not ha: # if not HA, at least suspend the vservers
vservers = self._xmlrpc_do(self.xmlrpc().vserver.list)
for vserver in vservers:
log.info("Suspending vserver {} on cluster {}".format(vserver, cluster['name']))
activity = self._xmlrpc_do(self.xmlrpc().vserver.suspend, vserver)
self._xmlrpc_wait_for_activity(activity, "Failed to suspend vserver {}".format(vserver))
log.debug("Waiting for alternateImage to settle (FIXME)...")
self._sleep(15) # time to settle?
upgrade_status = self._xmlrpc_do(self.xmlrpc().cluster.upgradeStatus)
if not upgrade_status.get('allowActivate', False):
raise vFXTConfigurationException("Alternate image activation is not allowed at this time")
log.info("Activating alternate image")
response = self._xmlrpc_do(self.xmlrpc().cluster.activateAltImage, ha)
log.debug("activateAltImage response: {}".format(response))
existing_activities = [a['id'] for a in self._xmlrpc_do(self.xmlrpc().cluster.listActivities)]
log.debug("existing activities prior to upgrade: {}".format(existing_activities))
tries = 0
while cluster['activeImage'] != alt_image:
self._sleep()
try:
# we may end up with hung connections as our VIFs move...
def signal_handler(signum, stack):
log.debug("Signal handler for sig {}: {}".format(signum, stack))
raise vFXTConnectionFailure("Connection alarm raised")
import signal #pylint: disable=import-outside-toplevel
if hasattr(signal, 'alarm') and hasattr(signal, 'SIGALRM'):
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(60)
cluster = self._xmlrpc_do(self.xmlrpc().cluster.get)
activities = [act for act in self._xmlrpc_do(self.xmlrpc().cluster.listActivities)
if act['id'] not in existing_activities # skip existing
if act['process'] == 'Cluster upgrade' # look for cluster upgrade or activate
or 'software activate' in act['process']]
if 'failed' in [a['state'] for a in activities]:
raise vFXTConfigurationException("Failed to activate alternate image")
if tries % 10 == 0:
log.info('Waiting for active image to switch to {}'.format(alt_image))
activity_status = ', '.join([act['status'] for act in activities])
if activity_status:
log.debug('Current activities: {}'.format(activity_status))
tries += 1
except vFXTConfigurationException as e:
log.debug(e)
raise
except Exception as e:
log.debug("Retrying upgrade check: {}".format(e))
finally:
# reset SIGALRM handler
if hasattr(signal, 'alarm') and hasattr(signal, 'SIGALRM'):
signal.alarm(0)
signal.signal(signal.SIGALRM, signal.SIG_DFL)
if retries is not None:
retries -= 1
if retries == 0:
raise vFXTConnectionFailure("Timeout waiting for active image")
if not ha: # if not HA, we suspended the vservers.... undo here
vservers = self._xmlrpc_do(self.xmlrpc().vserver.list)
for vserver in vservers:
log.info("Unsuspending vserver {} on cluster {}".format(vserver, cluster['name']))
activity = self._xmlrpc_do(self.xmlrpc().vserver.unsuspend, vserver)
self._xmlrpc_wait_for_activity(activity, "Failed to unsuspend vserver {}".format(vserver))
log.info("Upgrade to {} complete".format(alt_image))
def upgrade(self, upgrade_url, retries=None, ha=True):
'''Upgrade a cluster from the provided URL
Arguments:
upgrade_url (str): URL for armada package
retries (int, optional): retry count for switching active images
ha (bool, optional): do an HA upgrade, True
Raises: vFXTConnectionFailure
'''
self.upgrade_alternate_image(upgrade_url, retries=retries)
self.activate_alternate_image(ha=ha, retries=retries)
def add_nodes(self, count=1, **options):
'''Add nodes to the cluster
This extends the address ranges of the cluster and all configured
vservers (if required) to accommodate the new nodes.
Arguments:
count (int, optional): number of nodes to add
skip_cleanup (bool, optional): do not clean up on failure
join_wait (int, optional): join wait time (defaults to wait_for_nodes_to_join default)
skip_node_renaming (bool optional): Do not automatically configure and enforce node naming convention (defaults to False)
address_range_start (str, optional): Specify the first of a custom range of addresses to use
address_range_end (str, optional): Specify the last of a custom range of addresses to use
address_range_netmask (str, optional): Specify the netmask of the custom address range to use
vserver_home_addresses (bool, optional): Update address home configuration for all vservers
**options: options to pass to the service backend
Raises: vFXTCreateFailure
On failure, undoes cluster and vserver configuration changes.
'''
self.reload()
log.info("Extending cluster {} by {}".format(self.name, count))
node_count = len(self.nodes)
if not node_count:
raise vFXTConfigurationException("Cannot add a node to an empty cluster")
self.service._add_cluster_nodes_setup(self, count, **options)
# check to see if we can add nodes with the current licensing information
xmlrpc = self.xmlrpc()
license_data = self._xmlrpc_do(xmlrpc.cluster.listLicenses)
licensed_count = int(license_data['maxNodes'])
if (node_count + count) > licensed_count:
msg = "Cannot expand cluster to {} nodes as the current licensed maximum is {}"
raise vFXTConfigurationException(msg.format(node_count + count, licensed_count))
cluster_data = self._xmlrpc_do(xmlrpc.cluster.get)
cluster_ips_per_node = int(cluster_data['clusterIPNumPerNode'])
vserver_count = len(self._xmlrpc_do(xmlrpc.vserver.list))
existing_vserver = self.in_use_addresses('vserver', xmlrpc=xmlrpc)
existing_cluster = self.in_use_addresses('cluster', xmlrpc=xmlrpc)
need_vserver = ((node_count + count) * vserver_count) - len(existing_vserver)
need_cluster = ((node_count + count) * cluster_ips_per_node) - len(existing_cluster)
need_cluster = need_cluster if need_cluster > 0 else 0
need_vserver = need_vserver if need_vserver > 0 else 0
need_instance = count if self.service.ALLOCATE_INSTANCE_ADDRESSES else 0
in_use_addrs = self.in_use_addresses(xmlrpc=xmlrpc)
if options.get('instance_addresses'):
# check that the instance addresses are not already used by the cluster
try:
existing = []
for address in options['instance_addresses']:
if address in in_use_addrs:
existing.append(address)
else:
# otherwise we should note our intent to use it
in_use_addrs.append(address)
# also check if another instance is using the address
if self.service.in_use_addresses('{}/32'.format(address)):
existing.append(address)
if existing:
raise vFXTConfigurationException("Instance addresses are already in use: {}".format(existing))
if len(options['instance_addresses']) < count:
raise vFXTConfigurationException("Not enough instance addresses provided, require {}".format(count))
except vFXTConfigurationException:
raise
except Exception as e:
log.debug(e)
raise_from(vFXTConfigurationException("Invalid instance addresses: {}".format(options['instance_addresses'])), e)
need_instance = 0
added = [] # cluster and vserver extensions (for undo)
ip_count = need_vserver + need_cluster + need_instance
if ip_count > 0: # if we need more, extend ourselves
custom_ip_config_reqs = ['address_range_start', 'address_range_end', 'address_range_netmask']
if all([options.get(_) for _ in custom_ip_config_reqs]):
avail_ips = Cidr.expand_address_range(options.get('address_range_start'), options.get('address_range_end'))
mask = options.get('address_range_netmask')
if len(avail_ips) < ip_count:
raise vFXTConfigurationException("Not enough addresses provided, require {}".format(ip_count))
if any([_ in in_use_addrs for _ in avail_ips]):
raise vFXTConfigurationException("Specified address range conflicts with existing cluster addresses")
existing = []
for address in avail_ips:
if self.service.in_use_addresses('{}/32'.format(address)):
existing.append(address)
if existing:
raise vFXTConfigurationException("Cluster addresses are already in use: {}".format(existing))
else:
avail_ips, mask = self.service.get_available_addresses(count=ip_count, contiguous=True, in_use=in_use_addrs)
if need_instance:
options['instance_addresses'] = avail_ips[0:need_instance]
del avail_ips[0:need_instance]
if need_cluster > 0:
addresses = avail_ips[0:need_cluster]
del avail_ips[0:need_cluster]
body = {'firstIP': addresses[0], 'netmask': mask, 'lastIP': addresses[-1]}
log.info("Extending cluster address range by {}".format(need_cluster))
log.debug("{}".format(body))
activity = self._xmlrpc_do(xmlrpc.cluster.addClusterIPs, body)
self._xmlrpc_wait_for_activity(activity, "Failed to extend cluster addresses")
added.append({'cluster': body})
if need_vserver > 0:
for vserver in self._xmlrpc_do(xmlrpc.vserver.list):
v_len = len([a for r in self._xmlrpc_do(xmlrpc.vserver.get, vserver)[vserver]['clientFacingIPs']
for a in range(Cidr.from_address(r['firstIP']), Cidr.from_address(r['lastIP']) + 1)])
to_add = (node_count + count) - v_len
if to_add < 1:
continue
addresses = avail_ips[0:to_add]
del avail_ips[0:to_add]
body = {'firstIP': addresses[0], 'netmask': mask, 'lastIP': addresses[-1]}
log.info("Extending vserver {} address range by {}".format(vserver, need_vserver))
log.debug("{}".format(body))
activity = self._xmlrpc_do(xmlrpc.vserver.addClientIPs, vserver, body)
self._xmlrpc_wait_for_activity(activity, "Failed to extend vserver {} addresses".format(vserver))
added.append({'vserver': body})
# now add the node(s)
try:
self.service.add_cluster_nodes(self, count, **options)
self.wait_for_service_checks()
# book keeping... may have to wait for a node to update image
wait = int(options.get('join_wait', 500 + (500 * math.log(count))))
self.allow_node_join(retries=wait)
self.wait_for_nodes_to_join(retries=wait)
self.allow_node_join(enable=False, retries=wait)
self.refresh()
self.enable_ha()
if not options.get('skip_node_renaming'):
self.set_node_naming_policy()
if options.get('vserver_home_addresses'):
self.vserver_home_addresses()
except (KeyboardInterrupt, Exception) as e:
log.error(e)
if options.get('skip_cleanup', False):
try:
self.telemetry()
except Exception as te:
log.debug(te)
raise_from(vFXTCreateFailure(e), e)
log.info("Undoing configuration changes for node addition")
# our current list
expected_nodes = [n.id() for n in self.nodes]
# refresh and get what the cluster sees
self.service.load_cluster_information(self)
joined_nodes = [n.id() for n in self.nodes]
# find the difference
unjoined = list(set(expected_nodes) ^ set(joined_nodes))
unjoined_nodes = [ServiceInstance(self.service, i) for i in unjoined]
# exclude those in the middle of joining
joining_node_addresses = [_['address'] for _ in self._xmlrpc_do(self.xmlrpc().node.listUnconfiguredNodes) if 'joining' in _['status']]
unjoined_nodes = [_ for _ in unjoined_nodes if _.ip() not in joining_node_addresses]
# destroy the difference
if unjoined_nodes:
try:
self.parallel_call(unjoined_nodes, 'destroy')
except Exception as destroy_e:
log.error('Failed to undo configuration: {}'.format(destroy_e))
# if we added no nodes successfully, clean up addresses added
none_joined = len(unjoined) == count
nothing_created = node_count == len(joined_nodes)
if none_joined or nothing_created:
for a in added:
if 'vserver' in a:
a = a['vserver']
for vserver in self._xmlrpc_do(self.xmlrpc().vserver.list):
for r in self._xmlrpc_do(self.xmlrpc().vserver.get, vserver)[vserver]['clientFacingIPs']:
if r['firstIP'] == a['firstIP'] and r['lastIP'] == a['lastIP']:
log.debug("Removing vserver range {}".format(r))
activity = self._xmlrpc_do(self.xmlrpc().vserver.removeClientIPs, vserver, r['name'])
try:
self._xmlrpc_wait_for_activity(activity, "Failed to undo vserver extension")
except Exception as e:
log.error(e)
if 'cluster' in a:
a = a['cluster']
for r in self._xmlrpc_do(self.xmlrpc().cluster.get)['clusterIPs']:
if r['firstIP'] == a['firstIP'] and r['lastIP'] == a['lastIP']:
log.debug("Removing cluster range {}".format(r))
try:
activity = self._xmlrpc_do(self.xmlrpc().cluster.removeClusterIPs, r['name'])
self._xmlrpc_wait_for_activity(activity, "Failed to undo cluster extension")
except Exception as e:
log.error(e)
raise_from(vFXTCreateFailure(e), e)
def parallel_call(self, serviceinstances, method, **options):
'''Run the named method across all nodes
A thread is spawned to run the method for each instance.
Arguments:
serviceinstances [ServiceInstance]: list of ServiceInstance objects
method (str): method to call on each ServiceInstance
Raises: vFXTServiceFailure
'''
threads = []
failq = Queue.Queue()
def thread_cb(service, instance_id, q):
'''thread callback'''
try:
# create the instance within the thread, retry initial load prior to calling the method
retries = service.CLOUD_API_RETRIES
while True:
try:
instance = ServiceInstance(service=service, instance_id=instance_id)
break
except Exception:
if retries == 0:
raise
retries -= 1
instance.__getattribute__(method)(**options)
except Exception as e:
log.error("Failed to {} {}: {}".format(method, instance_id, e))
if log.isEnabledFor(logging.DEBUG):
log.exception(e)
q.put(("Failed to {} instance {}".format(method, instance_id), e))
for si in serviceinstances:
t = threading.Thread(target=thread_cb, args=(si.service, si.instance_id, failq,))
t.setDaemon(True)
t.start()
threads.append(t)
for t in threads:
t.join()
failed = []
while True:
try:
failed.append(failq.get_nowait())
except Queue.Empty:
break
if failed:
raise vFXTServiceFailure(failed)
def start(self):
'''Start all nodes in the cluster'''
self.parallel_call(self.nodes, 'start')
self.refresh()
def can_stop(self):
'''Some configurations cannot be stopped. Check if this is one.
'''
return all([_.can_stop() for _ in self.nodes])
def stop(self, clean_stop=True, retries=ServiceBase.WAIT_FOR_STOP):
'''Stop all nodes in the cluster
Arguments:
clean_stop (bool, optional): Issues cluster powerdown first (defaults to True)
retries (int, optional): number of retries (default 600)
'''
# we might be only a collection of nodes... make sure we have mgmt ip,
# password, etc... if so we power down the cluster before calling the
# service backend stop.
if clean_stop and (self.admin_password and self.nodes and self.is_on()):
# if we don't have the mgmt ip, use node1
if not self.mgmt_ip:
self.mgmt_ip = self.nodes[0].ip()
if not all([_.can_stop() for _ in self.nodes]):
raise vFXTConfigurationException("Node configuration prevents them from being stopped")
log.info("Powering down the cluster")
response = self._xmlrpc_do(self.xmlrpc().cluster.powerdown)
if response != 'success':
raise vFXTStatusFailure("Failed to power down the cluster: {}".format(response))
log.info("Waiting for cluster to go offline")
while self.is_on():
self._sleep()
self.refresh()
retries -= 1
if retries == 0:
raise vFXTStatusFailure("Timed out waiting for the cluster to go offline")
self.parallel_call(self.nodes, 'stop')
self.refresh()
def restart(self):
'''Calls stop and then start'''
self.stop()
self.start()
def destroy(self, **options):
'''Destroy the cluster
Arguments:
quick_destroy (bool, optional) skip cleanup steps that prevent data loss (defaults to False)
**options: passed to ServiceInstance.destroy()
'''
quick_destroy = options.pop('quick_destroy', False)
if not quick_destroy and self.is_on() and self.admin_password:
xmlrpc = self.xmlrpc()
cluster_name = self.name or 'unknown'
corefilers = {k: v for _ in self._xmlrpc_do(xmlrpc.corefiler.list) for k, v in self._xmlrpc_do(xmlrpc.corefiler.get, _).items()}
if corefilers:
# remove all junctions
for vserver in self._xmlrpc_do(xmlrpc.vserver.list):
log.info("Suspending vserver {} on cluster {}".format(vserver, cluster_name))
activity = self._xmlrpc_do(xmlrpc.vserver.suspend, vserver)
self._xmlrpc_wait_for_activity(activity, "Failed to suspend vserver {}".format(vserver))
for junction in self._xmlrpc_do(xmlrpc.vserver.listJunctions, vserver):
log.info("Removing junction {} from vserver {} on cluster {}".format(junction['path'], vserver, cluster_name))
activity = self._xmlrpc_do(xmlrpc.vserver.removeJunction, vserver, junction['path'])
self._xmlrpc_wait_for_activity(activity, "Failed to remove junction {} from vserver {}".format(junction['path'], vserver))
for corefiler, data in corefilers.items():
# try and call corefiler.flush, note this will raise vFXTConfigurationException
# on error... That will bubble up and prevent the rest of the destroy from
# completing
if data['type'] == 'cloud':
self.flush_corefiler(corefiler)
# otherwise remove corefilers to force a flush
log.info("Removing corefiler {} on cluster {}".format(corefiler, cluster_name))
self.remove_corefiler(corefiler)
if self.service.STOP_BEFORE_DELETE and not quick_destroy:
self.stop()
self.parallel_call(self.nodes, 'destroy', **options)
# any post destroy cleanup activities that may be remaining
self.service.post_destroy_cluster(self)
def shelve(self, **options):
'''Shelve all nodes in the cluster'''
# if we can make rpc calls, try to use maint.setShelve()
if not self.admin_password or not (self.nodes and self.is_on()):
raise vFXTConfigurationException('Unable to shelve cluster without xmlrpc connectivity')
# if we don't have the mgmt ip, use node1
if not self.mgmt_ip:
self.mgmt_ip = self.nodes[0].ip()
if not all([_.can_shelve() for _ in self.nodes]):
raise vFXTConfigurationException("Node configuration prevents them from being shelved")
try:
xmlrpc = self.xmlrpc()
corefilers = xmlrpc.corefiler.list()
if corefilers:
self._enable_maintenance_api(xmlrpc)
activity = self._xmlrpc_do(xmlrpc.maint.suspendAccess)
self._xmlrpc_wait_for_activity(activity, "Failed to suspend access", retries=self.service.WAIT_FOR_SUCCESS)
for corefiler in corefilers:
log.debug("Flushing corefiler {}".format(corefiler))
self.flush_corefiler(corefiler)
except xmlrpclib_Fault as e:
if int(e.faultCode) != 108: # Method not supported
log.debug("Failed to flush corefilers: {}".format(e))
raise vFXTConfigurationException(e)
except Exception as e:
log.debug("Failed to flush corefilers: {}".format(e))
raise
try:
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
response = self._xmlrpc_do(xmlrpc.maint.setShelve)
if response != 'success':
raise vFXTConfigurationException('Failed to notify cluster of intent to shelve')
log.debug('Called maint.setShelve()')
except xmlrpclib_Fault as e:
if int(e.faultCode) != 108: # Method maint.setShelve not supported
raise
log.debug('maint.setShelve not supported in this release')
self.stop(clean_stop=options.get('clean_stop', True))
self.parallel_call(self.nodes, 'shelve', **options)
self.refresh()
def unshelve(self, **options):
'''Unshelve all nodes in the cluster'''
self.parallel_call(self.nodes, 'unshelve', **options)
self.refresh()
# we might be only a collection of nodes... make sure we have mgmt ip,
# password, etc... if so we wait at least until we have api connectivity
if self.mgmt_ip and self.admin_password and self.nodes and self.is_on():
self.wait_for_healthcheck(state='red', duration=1, conn_retries=ServiceBase.WAIT_FOR_SUCCESS)
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
activity = self._xmlrpc_do(xmlrpc.maint.unsuspendAccess)
self._xmlrpc_wait_for_activity(activity, "Failed to unsuspend access", retries=self.service.WAIT_FOR_SUCCESS)
def is_on(self):
'''Returns true if all nodes are on'''
if self.nodes:
return all(i.is_on() for i in self.nodes)
return False
def is_off(self):
'''Returns true if all nodes are off'''
if self.nodes:
return all(i.is_off() for i in self.nodes)
return False
def is_shelved(self):
'''Returns true if all nodes are shelved'''
if self.is_off():
return all([n.is_shelved() for n in self.nodes])
else:
return False
def status(self):
'''Returns a list of node id:status'''
return [{n.id(): n.status()} for n in self.nodes]
def wait_for_service_checks(self):
'''Wait for Service checks to complete for all nodes
This may not be available for all backends and thus may be a noop.
'''
self.parallel_call(self.nodes, 'wait_for_service_checks')
def make_test_bucket(self, bucketname=None, corefiler=None, proxy=None, remove_on_fail=False, **options):
'''Create a test bucket for the cluster
Convenience wrapper function for testing. Calls create_bucket()
and then attach_bucket().
Arguments:
bucketname (str, optional): name of bucket or one is generated
corefiler (str, optional): name of corefiler or bucketname
proxy (str, optional): proxy configuration to use
remove_on_fail (bool, optional): remove the corefiler if the configuration does not finish
tags (dict, optional): tags with key/value labels to apply to the bucket (if supported)
**options: passed through to service.create_bucket and cluster.attach_bucket
Returns:
key (dict): encryption key for the bucket as returned from attach_bucket
'''
bucketname = bucketname or "{}-{}".format(self.name, str(uuid.uuid4()).lower().replace('-', ''))[0:63]
corefiler = corefiler or bucketname
self.service.create_bucket(bucketname, **options)
log.info("Created cloud storage {} ".format(bucketname))
return self.attach_bucket(corefiler, bucketname, proxy=proxy, remove_on_fail=remove_on_fail, **options)
def attach_bucket(self, corefiler, bucketname, master_password=None, credential=None, proxy=None, **options):
'''Attach a named bucket as core filer
Arguments:
corefiler (str): name of the corefiler to create
bucketname (str): name of existing bucket to attach
master_password (str, optional): otherwise cluster admin password is used
credential (str, optional): cloud credential or one is created or reused by the backing service
proxy (str, optional): proxy configuration to use
type (str, optional): type of corefiler (default 'cloud')
cloud_type (str, optional): cloud type (default 's3')
s3_type (str, optional): S3 type (default Service.S3TYPE_NAME)
https (str, optional): 'yes' or 'no' to use HTTPS (default 'yes')
crypto_mode (str, optional): crypto mode (default CBC-AES-256-HMAC-SHA-512)
compress_mode (str, optional): compression mode (default LZ4)
https_verify_mode (str, optional): DISABLED, OCSP, CRL, or OCSP_CRL
remove_on_fail (bool, optional): remove the corefiler if the configuration does not finish
existing_data (bool, optional): the bucket has existing data in it (defaults to False)
Returns:
key (dict): encryption key for the bucket if encryption is enabled
Raises: vFXTConfigurationException
'''
xmlrpc = self.xmlrpc()
if corefiler in self._xmlrpc_do(xmlrpc.corefiler.list):
raise vFXTConfigurationException("Corefiler {} exists".format(corefiler))
if not credential:
log.debug("Looking up credential as none was specified")
credential = self.service.authorize_bucket(self, bucketname, xmlrpc=xmlrpc)
log.debug("Using credential {}".format(credential))
# set proxy if provided
if not proxy:
if self.proxy:
proxy = self.proxy.hostname
data = {
'type': options.get('type') or 'cloud',
'cloudType': options.get('cloud_type') or self.service.COREFILER_TYPE,
'bucket': bucketname,
'cloudCredential': credential,
'https': options.get('https') or 'yes',
'sslVerifyMode': options.get('https_verify_mode') or 'OCSP_CRL',
'compressMode': options.get('compress_mode') or 'LZ4',
'cryptoMode': options.get('crypto_mode') or 'CBC-AES-256-HMAC-SHA-512',
'proxy': proxy or '',
'bucketContents': 'used' if options.get('existing_data', False) else 'empty',
}
if options.get('serverName'):
data['serverName'] = options.get('serverName')
if data['cloudType'] == 's3':
data['s3Type'] = options.get('s3_type') or self.service.S3TYPE_NAME
log.info("Creating corefiler {}".format(corefiler))
log.debug("corefiler.createCloudFiler options {}".format(data))
activity = None
retries = self.LICENSE_TIMEOUT
while True:
try:
activity = xmlrpc.corefiler.createCloudFiler(corefiler, data)
break
except xmlrpclib_Fault as e:
# These errors are non-fatal:
# This cluster is not licensed for cloud core filers. A FlashCloud license is required.
# Cannot modify while a group of nodes is joining
allowed_errors = ['a group of nodes is joining', 'A FlashCloud license is required']
if not any([_ in e.faultString for _ in allowed_errors]):
raise
log.debug("Waiting for error to clear: {}".format(e))
if retries == 0:
raise
retries -= 1
self._sleep()
self._xmlrpc_wait_for_activity(activity, "Failed to create corefiler {}".format(corefiler), retries=self.service.WAIT_FOR_SUCCESS)
def _cleanup():
# try and remove it
if options.get('remove_on_fail'):
try:
self.remove_corefiler(corefiler)
except Exception as e:
log.error("Failed to remove corefiler {}: {}".format(corefiler, e))
# we have to wait for the corefiler to show up... may be blocked by other things
# going on after corefiler.createCloudFiler completes.
retries = self.service.WAIT_FOR_SUCCESS
while True:
try:
if corefiler in xmlrpc.corefiler.list():
break
except xmlrpclib_Fault as xfe:
log.debug(xfe)
xmlrpc = self.xmlrpc()
log.debug("Waiting for corefiler to show up")
if retries == 0:
_cleanup()
raise vFXTConfigurationException('Failed to create corefiler {}: Not found'.format(corefiler))
if retries % 10 == 0:
self._log_conditions(xmlrpc)
retries -= 1
self._sleep()
if options.get('crypto_mode') != 'DISABLED':
if not master_password:
log.info("Generating master key for {} using the admin pass phrase".format(corefiler))
master_password = self.admin_password
else:
log.info("Generating master key for {} using the specified pass phrase".format(corefiler))
retries = self.service.XMLRPC_RETRIES
while True:
try:
key = xmlrpc.corefiler.generateMasterKey(corefiler, master_password)
if 'keyId' in key and 'recoveryFile' in key:
break
except Exception as e:
log.debug(e)
if retries == 0:
_cleanup()
raise vFXTConfigurationException('Failed to generate master key for {}: {}'.format(corefiler, e))
retries -= 1
self._sleep()
log.info("Activating master key {} (signature {}) for {}".format(key['keyId'], key['signature'], corefiler))
response = self._xmlrpc_do(xmlrpc.corefiler.activateMasterKey, corefiler, key['keyId'], key['recoveryFile'])
if response != 'success':
_cleanup()
raise vFXTConfigurationException('Failed to activate master key for {}: {}'.format(corefiler, response))
return key
def attach_corefiler(self, corefiler, networkname, **options):
'''Attach a Corefiler
Arguments:
corefiler (str): name of the corefiler to create
networkname (str): network reachable name/address of the filer
retries (int, optional): defaults to ServiceBase.WAIT_FOR_SUCCESS
remove_on_fail (bool, optional): remove if any post create check fails
ignore_warnings (bool, optional): ignore warnings during create, defaults to False
nfs_type (str, optional): specify the type of the NFS server
nfs_type can be one of:
NetappNonClustered
NetappClustered
EmcIsilon
Other (default)
Raises: vFXTConfigurationException
'''
if corefiler in self._xmlrpc_do(self.xmlrpc().corefiler.list):
raise vFXTConfigurationException("Corefiler {} exists".format(corefiler))
try:
socket.gethostbyname(networkname)
except Exception as e:
raise vFXTConfigurationException("Unknown host {}: {}".format(corefiler, e))
ignore_warnings = options.get('ignore_warnings') or False
create_options = {
'filerClass': options.get('nfs_type') or 'Other'
}
log.info("Creating corefiler {}".format(corefiler))
activity = self._xmlrpc_do(self.xmlrpc().corefiler.create, corefiler, networkname, ignore_warnings, create_options)
self._xmlrpc_wait_for_activity(activity, "Failed to create corefiler {}".format(corefiler), retries=self.service.WAIT_FOR_SUCCESS)
# we have to wait for the corefiler to show up... may be blocked by other things
# going on after corefiler.createCloudFiler completes.
retries = options.get('retries') or self.service.WAIT_FOR_SUCCESS
xmlrpc = self.xmlrpc()
while True:
try:
if corefiler in xmlrpc.corefiler.list():
break
except Exception: pass
log.debug("Waiting for corefiler to show up")
if retries == 0:
if options.get('remove_on_fail'):
try:
self.remove_corefiler(corefiler)
except Exception as e:
log.error("Failed to remove corefiler {}: {}".format(corefiler, e))
raise vFXTConfigurationException('Failed to create corefiler {}'.format(corefiler))
if retries % 10 == 0:
self._log_conditions(xmlrpc)
retries -= 1
self._sleep()
def remove_corefiler(self, corefiler):
'''Remove a corefiler
Arguments:
corefiler (str): the name of the corefiler
Raises vFXTConfigurationException
'''
try:
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
activity = self._xmlrpc_do(xmlrpc.corefiler.remove, corefiler)
self._xmlrpc_wait_for_activity(activity, "Failed to remove corefiler {}".format(corefiler))
except vFXTConfigurationException as e:
log.debug(e)
raise
except Exception as e:
raise vFXTConfigurationException(e)
def flush_corefiler(self, corefiler):
'''Flush a corefiler
Arguments:
corefiler (str): the name of the corefiler
Raises vFXTConfigurationException
'''
try:
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
activity = self._xmlrpc_do(xmlrpc.corefiler.flush, corefiler)
self._xmlrpc_wait_for_activity(activity, "Failed to flush corefiler {}".format(corefiler))
except xmlrpclib_Fault as e:
if int(e.faultCode) != 108: # Method not supported
raise vFXTConfigurationException(e)
except Exception as e:
raise vFXTConfigurationException(e)
def add_vserver(self, name, size=0, netmask=None, start_address=None, end_address=None, home_addresses=False, retries=ServiceBase.WAIT_FOR_OPERATION):
'''Add a Vserver
Arguments:
name (str): name of the vserver
size (int, optional): size of the vserver address range (defaults to cluster size)
netmask (str, optional): Network mask for the vserver range
start_address (str, optional): Starting network address for the vserver range
end_address (str, optional): Ending network address for the vserver range
retries (int, optional): number of retries
Calling with netmask, start_address, and end_address will define the vserver with
those values.
Otherwise, calling with or without a size leads to the addresses being determined via
get_available_addresses().
'''
if name in self._xmlrpc_do(self.xmlrpc().vserver.list):
raise vFXTConfigurationException("Vserver '{}' exists".format(name))
if not all([netmask, start_address, end_address]):
if any([netmask, start_address, end_address]):
log.warning("Ignoring address configuration because missing one of {}(start), {}(end), or {}(netmask)".format(start_address, end_address, netmask))
in_use_addrs = self.in_use_addresses()
vserver_ips, netmask = self.service.get_available_addresses(count=size or len(self.nodes), contiguous=True, in_use=in_use_addrs)
start_address = vserver_ips[0]
end_address = vserver_ips[-1]
else:
# Validate
vserver_ips = Cidr.expand_address_range(start_address, end_address)
if len(vserver_ips) < len(self.nodes):
log.warning("Adding vserver address range without enough addresses for all nodes")
log.info("Creating vserver {} ({}-{}/{})".format(name, start_address, end_address, netmask))
activity = self._xmlrpc_do(self.xmlrpc().vserver.create, name, {'firstIP': start_address, 'lastIP': end_address, 'netmask': netmask})
self._xmlrpc_wait_for_activity(activity, "Failed to create vserver {}".format(name), retries=retries)
# wait for vserver to become available
vserver_retries = retries
log.debug("Waiting for vserver '{}' to show up".format(name))
while True:
try:
if name in self._xmlrpc_do(self.xmlrpc().vserver.list):
break
if vserver_retries % 10 == 0:
log.debug("{} not yet configured".format(name))
except Exception as e:
log.debug(e)
vserver_retries -= 1
if vserver_retries == 0:
raise vFXTConfigurationException("Timed out waiting for vserver '{}' to show up.".format(name))
self._sleep()
if home_addresses:
self.vserver_home_addresses(name)
def add_vserver_junction(self, vserver, corefiler, path=None, export='/', subdir=None, retries=ServiceBase.EXTENDED_XMLRPC_RETRIES):
'''Add a Junction to a Vserver
Arguments:
vserver (str): name of the vserver
corefiler (str): name of the corefiler
path (str, optional): path of the junction (default /{corefiler})
export (str, optional): export path (default /)
subdir (str, optional): subdirectory within the export
retries (int, optional): number of retries
Raises: vFXTConfigurationException
'''
if not path:
path = '/{}'.format(corefiler)
if not path.startswith('/'):
#raise vFXTConfigurationException("Junction path must start with /: {}".format(path))
path = '/{}'.format(path)
advanced = {}
if subdir:
advanced['subdir'] = subdir
log.info("Waiting for corefiler exports to show up")
op_retries = self.service.WAIT_FOR_SUCCESS
while True:
try:
exports = self._xmlrpc_do(self.xmlrpc().nfs.listExports, vserver, corefiler)
if exports:
break
except Exception as e:
log.debug(e)
if op_retries == 0:
raise vFXTConfigurationException("Timed out waiting for {} exports".format(corefiler))
if op_retries % 10 == 0:
self._log_conditions(self.xmlrpc())
op_retries -= 1
self._sleep()
log.info("Creating junction {} to {} for vserver {}".format(path, corefiler, vserver))
try:
activity = self._xmlrpc_do(self.xmlrpc().vserver.addJunction, vserver, path, corefiler, export, advanced, _xmlrpc_do_retries=retries)
self._xmlrpc_wait_for_activity(activity, "Failed to add junction to {}".format(vserver))
except Exception as e:
raise vFXTConfigurationException("Failed to add junction to {}: {}".format(vserver, e))
log.debug("Junctioned vserver {} with corefiler {} (path {}, export {})".format(vserver, corefiler, path, export))
def wait_for_nodes_to_join(self, retries=ServiceBase.WAIT_FOR_HEALTH_CHECKS, xmlrpc=None):
'''This performs a check that the cluster configuration matches the
nodes in the object, otherwise it will wait
Arguments:
retries (int): number of retries (default 600)
xmlrpc (xmlrpcClt, optional): xmlrpc client
Raises: vFXTConfigurationException
'''
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
expected = len(self.nodes)
if expected > len(self._xmlrpc_do(xmlrpc.node.list)):
log.info("Waiting for all nodes to join")
start_time = int(time.time())
node_addresses = [n.ip() for n in self.nodes]
while True:
found = 1 # have to find one node at least
try:
found = len(self._xmlrpc_do(xmlrpc.node.list))
if expected == found:
log.debug("Found {}".format(found))
break
except Exception as e:
log.debug("Error getting node list: {}".format(e))
try:
# if nodes are upgrading, delay the retries.. unjoined node status include:
# 'joining: started'
# 'joining: almost done'
# 'joining: upgrade the image'
# 'joining: switch to the new image'
unjoined_status = [_['status'] for _ in self._xmlrpc_do(xmlrpc.node.listUnconfiguredNodes) if _['address'] in node_addresses]
if any(['image' in _ for _ in unjoined_status]):
log.debug("Waiting for image upgrade to finish: {}".format(unjoined_status))
start_time = int(time.time())
continue
except Exception as e:
log.debug("Failed to check unconfigured node status: {}".format(e))
# for connectivity problems... we end up waiting a long time for
# timeouts on the xmlrpc connection... so if we are taking too long
# we should bail
duration = int(time.time()) - start_time
taking_too_long = duration > int(retries * 1.5)
if retries == 0 or taking_too_long:
diff = expected - found
raise vFXTConfigurationException("Timed out waiting for {} node(s) to join.".format(diff))
retries -= 1
if retries % 10 == 0:
log.debug("Found {}, expected {}".format(found, expected))
self._log_conditions(xmlrpc=xmlrpc)
self._sleep()
log.info("All nodes have joined the cluster.")
def enable_ha(self, retries=ServiceBase.XMLRPC_RETRIES, xmlrpc=None):
'''Enable HA on the cluster
Arguments:
retries (int, optional): number of retries
xmlrpc (xmlrpcClt, optional): xmlrpc client
Raises: vFXTConfigurationException
'''
log.info("Enabling HA mode")
try:
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
status = self._xmlrpc_do(xmlrpc.cluster.enableHA, _xmlrpc_do_retries=retries)
if status != 'success':
raise vFXTConfigurationException(status)
except Exception as ha_e:
raise vFXTConfigurationException("Failed to enable HA: {}".format(ha_e))
def rebalance_directory_managers(self, retries=ServiceBase.XMLRPC_RETRIES):
'''Call rebalanceDirManagers via XMLRPC
Arguments:
retries (int): number of retries
Raises: vFXTConfigurationException
'''
xmlrpc = self.xmlrpc()
self._enable_maintenance_api(xmlrpc)
log.info("Rebalancing directory managers")
try:
status = self._xmlrpc_do(xmlrpc.maint.rebalanceDirManagers, _xmlrpc_do_retries=retries)
if status != 'success':
raise vFXTConfigurationException(status)
except xmlrpclib_Fault as e:
# AVERE_EINVAL, not needed or already in progress
if int(e.faultCode) == 103: #pylint: disable=no-member
return
raise vFXTStatusFailure("Waiting for cluster rebalance failed: {}".format(e))
except Exception as e:
raise vFXTStatusFailure("Waiting for cluster rebalance failed: {}".format(e))
def first_node_configuration(self):
'''Basic configuration for the first cluster node
'''
if not self.mgmt_ip:
raise vFXTConfigurationException("Cannot configure a cluster without a management address")
log.info("Waiting for remote API connectivity")
xmlrpc = None
try:
xmlrpc = self.xmlrpc(retries=ServiceBase.WAIT_FOR_INITIAL_CONNECTION) #pylint: disable=unused-variable
except Exception as e:
self.first_node_error = e
raise
self.set_default_proxy(xmlrpc=xmlrpc)
if self.trace_level:
log.info("Setting trace {}".format(self.trace_level))
support_opts = {'rollingTrace': 'yes', 'traceLevel': self.trace_level}
try:
response = self._xmlrpc_do(xmlrpc.support.modify, support_opts)
if response[0] != 'success':
self.first_node_error = vFXTConfigurationException(response)
raise self.first_node_error #pylint: disable=raising-bad-type
except Exception as e:
log.error("Failed to configure trace options: {}".format(e))
if self.timezone:
log.info("Setting timezone to {}".format(self.timezone))
response = self._xmlrpc_do(xmlrpc.cluster.modify, {'timezone': self.timezone})
if response != 'success':
self.first_node_error = vFXTConfigurationException(response)
raise self.first_node_error #pylint: disable=raising-bad-type
# try and enable HA early if we have support in the AvereOS release for single node
try:
try:
self.enable_ha(xmlrpc=xmlrpc)
except Exception as e:
log.debug("Failed to enable early HA, will retry later: {}".format(e))
except Exception as e:
log.debug("Failed during final first node configuration: {}".format(e))
self.first_node_error = vFXTConfigurationException(e)
raise self.first_node_error #pylint: disable=raising-bad-type
def set_default_proxy(self, name=None, xmlrpc=None):
'''Set the default cluster proxy configuration
Arguments:
name (str, optional): proxy name (defaults to proxy hostname)
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
if not self.proxy:
log.debug("Skipping proxy configuration")
return
name = name or self.proxy.hostname
if not name or not self.proxy.geturl():
raise vFXTConfigurationException("Unable to create proxy configuration: Bad proxy host")
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
body = {'url': self.proxy.geturl(), 'user': self.proxy.username or '', 'password': self.proxy.password or ''}
if name not in self._xmlrpc_do(xmlrpc.cluster.listProxyConfigs):
log.info("Setting proxy configuration")
try:
response = self._xmlrpc_do(xmlrpc.cluster.createProxyConfig, name, body)
if response != 'success':
raise vFXTConfigurationException(response)
except Exception as e:
raise vFXTConfigurationException("Unable to create proxy configuration: {}".format(e))
try:
response = self._xmlrpc_do(xmlrpc.cluster.modify, {'proxy': name})
if response != 'success':
raise vFXTConfigurationException(response)
except Exception as e:
raise vFXTConfigurationException("Unable to configure cluster proxy configuration: {}".format(e))
def allow_node_join(self, enable=True, retries=ServiceBase.WAIT_FOR_HEALTH_CHECKS, xmlrpc=None): #pylint: disable=unused-argument
'''Enable created nodes to join
Arguments:
enable (bool, optional): Allow nodes to join
retries (int): number of retries (default 600)
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
def _compat_allow_node_join(enable, xmlrpc):
setting = 'yes' if enable else 'no'
log.debug("_compat_allow_node_join setting allowAllNodesToJoin to {}".format(setting))
response = self._xmlrpc_do(xmlrpc.cluster.modify, {'allowAllNodesToJoin': setting})
if response != 'success':
raise vFXTConfigurationException("Failed to update allow node join configuration: {}".format(response))
if not enable:
_compat_allow_node_join(enable, xmlrpc)
return
# we have to accumulate all of the nodes we expect to see in node.listUnconfiguredNodes
node_addresses = [_.ip() for _ in self.nodes]
node_count = len(node_addresses)
joined_count = len(self._xmlrpc_do(xmlrpc.node.list))
expected_unjoined_count = node_count - joined_count
unjoined = []
if not expected_unjoined_count:
log.debug("Nodes joined on their own")
return
log.info("Waiting for {} nodes to show up and ask to join cluster".format(expected_unjoined_count))
start_time = int(time.time())
op_retries = retries
while True:
unjoined_count = 0
try:
unjoined = [_ for _ in self._xmlrpc_do(xmlrpc.node.listUnconfiguredNodes) if _['address'] in node_addresses]
unjoined_count = len(unjoined)
if unjoined_count == expected_unjoined_count:
break
except Exception as e:
log.debug("Failed to check unconfigured node status: {}".format(e))
try:
if len(self._xmlrpc_do(xmlrpc.node.list)) == node_count:
log.debug("Nodes joined on their own")
return
except Exception as e:
log.debug("Failed to check joined node status: {}".format(e))
# either we run out of retries or we take too long
duration = int(time.time()) - start_time
taking_too_long = duration > int(retries * 1.5)
if op_retries == 0 or taking_too_long:
diff = expected_unjoined_count - unjoined_count
raise vFXTConfigurationException("Timed out waiting for {} node(s) to come up.".format(diff))
if op_retries % 10 == 0:
unjoined_names = ', '.join([_['name'] for _ in unjoined])
log.debug("Found {} ({}), expected {}".format(unjoined_count, unjoined_names, expected_unjoined_count))
self._log_conditions(xmlrpc=xmlrpc)
op_retries -= 1
self._sleep()
# once we have them, call node.allowToJoin with our nodes in one group
node_names = [_['name'] for _ in unjoined]
log.info("Setting allow join for {} nodes".format(expected_unjoined_count))
log.debug(','.join(node_names))
try:
activity = self._xmlrpc_do(xmlrpc.node.allowToJoin, ','.join(node_names), False)
self._xmlrpc_wait_for_activity(activity, '"Failed to allow multiple node joins', retries=retries)
return
except xmlrpclib_Fault as e:
# older releases cannot accept comma delimited node names
if not any([_ in e.faultString for _ in ['Cannot find node', 'Cannot join the node']]):
raise
# try old way
log.info("Setting node join policy")
_compat_allow_node_join(enable, xmlrpc)
def refresh(self):
'''Refresh instance data of cluster nodes from the backend service'''
for n in self.nodes:
n.refresh()
def reload(self):
'''Reload all cluster information'''
if self.is_on(): # reread configuration, uses xmlrpc so must be on
self.load_cluster_information()
else:
self.refresh()
def export(self):
'''Export the cluster object in an easy to serialize format'''
return {
'name': self.name,
'mgmt_ip': self.mgmt_ip,
'admin_password': self.admin_password,
'nodes': [n.instance_id for n in self.nodes]
}
def _sleep(self, duration=None):
'''General sleep handling'''
time.sleep(duration or self.service.POLLTIME)
@classmethod
def valid_cluster_name(cls, name):
'''Validate the cluster name
Returns: bool
'''
name_len = len(name)
if name_len < 1 or name_len > 128:
return False
if re.search('^[a-z]([-a-z0-9]*[a-z0-9])?$', name):
return True
return False
def in_use_addresses(self, category='all', xmlrpc=None):
'''Get in use addresses from the cluster
Arguments:
category (str): all (default), mgmt, vserver, cluster
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
addresses = set()
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
if category in ['all', 'mgmt']:
addresses.update([self._xmlrpc_do(xmlrpc.cluster.get)['mgmtIP']['IP']])
if category in ['all', 'vserver']:
for vs in self._xmlrpc_do(xmlrpc.vserver.list):
data = self._xmlrpc_do(xmlrpc.vserver.get, vs)
for client_range in data[vs]['clientFacingIPs']:
first = client_range['firstIP']
last = client_range['lastIP']
range_addrs = Cidr.expand_address_range(first, last)
addresses.update(range_addrs)
if category in ['all', 'cluster']:
data = self._xmlrpc_do(xmlrpc.cluster.get)
for cluster_range in data['clusterIPs']:
first = cluster_range['firstIP']
last = cluster_range['lastIP']
range_addrs = Cidr.expand_address_range(first, last)
addresses.update(range_addrs)
return list(addresses)
def set_node_naming_policy(self, xmlrpc=None):
'''Rename nodes internally and set the default node prefix
This sets the node names internally to match the service instance
names. This also sets the node prefix to be the cluster name.
Arguments:
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
if not self.nodes:
log.debug("No nodes to rename, skipping")
return
if not self.node_rename:
log.debug("Skipping node naming configuration")
return
node_ip_map = {ip: n.name() for n in self.nodes for ip in n.in_use_addresses()}
# rename nodes with cluster prefix
log.info("Setting node naming policy")
# first pass, rename new mismatched nodes to their node id
retries = ServiceBase.XMLRPC_RETRIES
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
while True:
try:
node_names = self._xmlrpc_do(xmlrpc.node.list)
nodes = [list(self._xmlrpc_do(xmlrpc.node.get, _).values())[0] for _ in node_names]
for node in nodes:
node_name = node_ip_map.get(node['primaryClusterIP']['IP'], None)
if node_name and node_name != node['name'] and node_name in node_names:
log.debug("Renaming new node {} -> {}".format(node['name'], node['id']))
self._xmlrpc_do(xmlrpc.node.rename, node['name'], node['id'])
break
except Exception as e:
log.debug(e)
if retries == 0:
log.error("Failed to rename nodes: {}".format(e))
break
retries -= 1
# second pass, rename all nodes to their instance names
retries = ServiceBase.XMLRPC_RETRIES
while True:
try:
node_names = self._xmlrpc_do(xmlrpc.node.list)
nodes = [list(self._xmlrpc_do(xmlrpc.node.get, _).values())[0] for _ in node_names]
for node in nodes:
node_name = node_ip_map.get(node['primaryClusterIP']['IP'], None)
if node_name and node_name != node['name'] and node_name not in node_names:
log.debug("Renaming node {} -> {}".format(node['name'], node_name))
self._xmlrpc_do(xmlrpc.node.rename, node['name'], node_name)
break
except Exception as e:
log.debug(e)
if retries == 0:
log.error("Failed to rename nodes: {}".format(e))
break
retries -= 1
def vserver_home_addresses(self, vservers=None, xmlrpc=None):
'''Home the addresses of the vserver across the nodes
Arguments:
vservers (list, optional): list of vservers to home (otherwise all vservers)
xmlrpc (xmlrpcClt, optional): xmlrpc client
'''
xmlrpc = self.xmlrpc() if xmlrpc is None else xmlrpc
vservers = vservers or xmlrpc.vserver.list()
if not isinstance(vservers, list):
vservers = [vservers]
nodes = itertools.cycle(sorted(xmlrpc.node.list()))
for vserver in vservers:
home_cfg = self._xmlrpc_do(xmlrpc.vserver.listClientIPHomes, vserver)
# if all addresses are already homed, bail
if [_ for _ in home_cfg if _['home'] != 'None']:
log.debug("Refusing to override existing home configuration")
continue
# get the address ranges from our vserver
vserver_data = xmlrpc.vserver.get(vserver)[vserver]
vifs = set()
for address_range in vserver_data['clientFacingIPs']:
vifs.update(Cidr.expand_address_range(address_range['firstIP'], address_range['lastIP']))
# sort numerically
vifs = [Cidr.to_address(_) for _ in sorted([Cidr.from_address(_) for _ in vifs])]
# build mapping table
mappings = {vif: next(nodes) for vif in vifs}
old_mappings = {_['ip']: _['current'] for _ in home_cfg}
if not [_ for _ in list(mappings.keys()) if mappings[_] != old_mappings.get(_)]:
log.debug("Address home configuration is up to date for vserver '{}'".format(vserver))
continue
log.debug("Setting up addresses home configuration for vserver '{}': {}".format(vserver, mappings))
retries = self.service.EXTENDED_XMLRPC_RETRIES
while True:
try:
activity = self._xmlrpc_do(xmlrpc.vserver.modifyClientIPHomes, vserver, mappings)
self._xmlrpc_wait_for_activity(activity, "Failed to rebalance vserver {} addresses".format(vserver))
break
except Exception as e:
log.debug(e)
if retries == 0:
raise
retries -= 1
|
objdetector.py
|
"""
Object detector facade module. Run object detection in NN
"""
# pylint: disable=C0103,C0301,W0703,R0903
#import numpy as np
from threading import Thread, Event, Lock
from typing import List
import datetime as dt
import copy
import queue as q
import entities as e
class QueueSeparator:
"""
Separator in queue show that block of images for slice of time has completed
"""
class ObjectDetector:
"""
Facade to real object detection neural network
"""
def __init__(self, nnclass, logger):
"""
nnclass argument should implement following interface
* __init__(logger)
* detectObjects(img) -> List[e.DetectedObject]
* stop()
"""
self._realnn = nnclass
self._logger = logger
self._frames = q.Queue()
self._stopSignal = False
self._imgSetEnded = Event()
self._lock = Lock()
self._detectedObjectSets: List[e.DetectedObjectSet] = []
Thread(target=self._detectObjectsLoop, name='objdetector', args=()).start()
self._logger.info('ObjectDetector started')
def stop(self):
"""
stops detection module
"""
self._logger.info('ObjectDetector stopping...')
self._stopSignal = True
self._imgSetEnded.set()
self._realnn.stop()
def _detectObjectsLoop(self):
while not self._stopSignal:
try:
frame = self._frames.get(block=False)
if isinstance(frame, QueueSeparator):
self._imgSetEnded.set()
continue
self._imgSetEnded.clear()
self._logger.debug(f'Infer from vsid:{frame.vsid}')
rgbimg = frame.img[:, :, [2, 1, 0]] # BGR2RGB
dobjs = self._realnn.detectObjects(rgbimg)
doset = e.DetectedObjectSet(frame.vsid, frame.timestamp, dobjs)
self._lock.acquire()
self._detectedObjectSets.append(doset)
self._lock.release()
except q.Empty:
continue
except Exception as exc:
self._logger.error(exc)
self._logger.info('ObjectDetector stopped')
def pushImage(self, frame: e.CapturedFrame):
"""
push image into processing queue
"""
self._frames.put(frame)
def getDetectedObjectsFrame(self) -> e.DetectedObjectsFrame:
"""
returns current list of all detected objects in DetectedObjectsFrame
"""
self._frames.put(QueueSeparator())
self._imgSetEnded.wait()
self._lock.acquire()
doframe = e.DetectedObjectsFrame("", dt.datetime.now(), copy.deepcopy(self._detectedObjectSets))
self._detectedObjectSets = []
self._lock.release()
return doframe
@staticmethod
def getDetectedObjectsCollection(nnout, hscale, wscale, threshold, tlbr=True) -> List[e.DetectedObject]:
"""
Static helper
Transforms network output to DetectedObject list
nnout should be: (classes, scores, bboxes)
NOTE! by default boxes have to be in (t,l,b,r) coordinate sequence
if they're not set tlbr=False and parse as (l,t,r,b)
"""
dobjs: List[e.DetectedObject] = []
for c, s, bb in nnout:
if s < threshold:
break
if tlbr:
# transform (l,t,b,r) -> (t,l,r,b)
bbox = e.BoundingBox(int(bb[1]*hscale), int(bb[0]*wscale), int(bb[3]*hscale), int(bb[2]*wscale))
else:
# transform (t,l,r,b) -> (t,l,r,b)
bbox = e.BoundingBox(int(bb[0]*wscale), int(bb[1]*hscale), int(bb[2]*wscale), int(bb[3]*hscale))
dobjs.append(e.DetectedObject(int(c), round(float(s), 2), bbox))
return dobjs
|
nessus.py
|
import nessrest
from nessrest import ness6rest
import threading
import Queue
from threading import Semaphore
import time
from output import *
#Dependencies:
#git clone https://github.com/golismero/openvas_lib.git
#python setup.by build
#python setup.py install
#or:
#pip install openvas_lib
class AutoNessus():
def __init__(self):
self.anessus = ness6rest.Scanner(url='https://127.0.0.1:8834', login='admin', password='dinimere', insecure=True)
self.policy = False
self.username = ''
self.pw = ''
self.scanning_queue = Queue.Queue()
self.targets = []
def add_host(self, target):
self.scanning_queue.put(target)
self.targets.append(target)
def start_scan(self, parallelScans):
print_info("starting nessus scans..")
if self.policy:
t = threading.Thread(target=self._nessus_worker)
t.start()
t.join() #finish thread
else:
print "[-] You must define a nessus policy BEFORE you can run a scan. Use method: set_policy first!"
def set_policy(self, name):
self.policy = self.anessus.policy_set(name)
self.policy = True
def _nessus_worker(self):
targets = ','.join(map(str, self.targets))
self.anessus.scan_add(targets=targets)
self.anessus.scan_run()
scan_results = self.anessus.scan_results()
#test cases
#targets = ["192.168.0.1", "192.168.1.1", "192.168.1.202"]
#nessus = AutoNessus()
#nessus.set_policy("nw_full_scan_slow") #scans host by host.
#for target in targets:
# print "adding host: %s" %target
# nessus.add_host(target)
#nessus.start_scan(1)
|
TFLite_cam_edge_gd.py
|
#Main_Author: Evan Juras
# This code is based off the TensorFlow Lite image classification example at:
# https://github.com/tensorflow/tensorflow/blob/master/tensorflow/lite/examples/python/label_image.py
#Modification has done to implement our project....
# Import packages
from tflite_runtime.interpreter import Interpreter
from tflite_runtime.interpreter import load_delegate
import os
import argparse
import cv2
import numpy as np
import sys
import time
from threading import Thread
import importlib.util
import RPi.GPIO as GPIO
import time
# Define VideoStream class to handle streaming of video from webcam in separate processing thread
# Source - Adrian Rosebrock, PyImageSearch: https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/
class VideoStream:
"""Camera object that controls video streaming from the Picamera"""
def __init__(self,resolution=(320,320),framerate=30):
# Initialize the PiCamera and the camera image stream
self.stream = cv2.VideoCapture(0)
ret = self.stream.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*'MJPG'))
ret = self.stream.set(3,resolution[0])
ret = self.stream.set(4,resolution[1])
# Read first frame from the stream
(self.grabbed, self.frame) = self.stream.read()
# Variable to control when the camera is stopped
self.stopped = False
def start(self):
# Start the thread that reads frames from the video stream
Thread(target=self.update,args=()).start()
return self
def update(self):
# Keep looping indefinitely until the thread is stopped
while True:
# If the camera is stopped, stop the thread
if self.stopped:
# Close camera resources
self.stream.release()
return
# Otherwise, grab the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# Return the most recent frame
return self.frame
def stop(self):
# Indicate that the camera and thread should be stopped
self.stopped = True
#servo motor setting
Servo_pin = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(Servo_pin, GPIO.OUT)
Servo = GPIO.PWM(Servo_pin, 50)
Servo.start(0)
def servo_angle(angle):
duty = 2.5 + (12.0 - 2.5) * (angle + 90) / 180
Servo.ChangeDutyCycle(duty)
time.sleep(0.3)
#For rotating the servo when it detects PS and PET
def switch():
option = object_name
def PS():
servo_angle(-60)
servo_angle(-30)
servo_angle(0)
servo_angle(30)
servo_angle(60)
servo_angle(90)
time.sleep(2)
def METAL():
pass
def PET():
servo_angle(-60)
servo_angle(-30)
servo_angle(0)
servo_angle(30)
servo_angle(60)
servo_angle(90)
time.sleep(2)
def GLASS():
pass
def default():
pass
dict = {
'ps' : PS,
'metal' : METAL,
'pet' : PET,
'glass' : GLASS,
}
dict.get(option,default)()
# Define and parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--modeldir', help='Folder the .tflite file is located in',
default='edge')
parser.add_argument('--graph', help='Name of the .tflite file, if different than detect.tflite',
default='edgetpu.tflite')
parser.add_argument('--labels', help='Name of the labelmap file, if different than labelmap.txt',
default='labelmap.txt')
parser.add_argument('--threshold', help='Minimum confidence threshold for displaying detected objects',
default=0.8)
parser.add_argument('--resolution', help='Desired webcam resolution in WxH. If the webcam does not support the resolution entered, errors may occur.',
default='1280x720')
args = parser.parse_args()
MODEL_NAME = args.modeldir
GRAPH_NAME = args.graph
LABELMAP_NAME = args.labels
min_conf_threshold = float(args.threshold)
resW, resH = args.resolution.split('x')
imW, imH = int(resW), int(resH)
# Get path to current working directory
CWD_PATH = os.getcwd()
# Path to .tflite file, which contains the model that is used for object detection
PATH_TO_CKPT = os.path.join(CWD_PATH,MODEL_NAME,GRAPH_NAME)
# Path to label map file
PATH_TO_LABELS = os.path.join(CWD_PATH,MODEL_NAME,LABELMAP_NAME)
# Load the label map
with open(PATH_TO_LABELS, 'r') as f:
labels = [line.strip() for line in f.readlines()]
# If using Edge TPU, use special load_delegate argument
interpreter = Interpreter(model_path=PATH_TO_CKPT,
experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
print(PATH_TO_CKPT)
interpreter.allocate_tensors()
# Get model details
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
height = input_details[0]['shape'][1]
width = input_details[0]['shape'][2]
floating_model = (input_details[0]['dtype'] == np.float32)
input_mean = 127.5
input_std = 127.5
# Initialize frame rate calculation
frame_rate_calc = 1
freq = cv2.getTickFrequency()
# Initialize video stream
videostream = VideoStream(resolution=(imW,imH),framerate=30).start()
time.sleep(1)
#for frame1 in camera.capture_continuous(rawCapture, format="bgr",use_video_port=True):
while True:
# Start timer (for calculating frame rate)
t1 = cv2.getTickCount()
# Grab frame from video stream
frame1 = videostream.read()
# Acquire frame and resize to expected shape [1xHxWx3]
frame = frame1.copy()
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame_resized = cv2.resize(frame_rgb, (width, height))
input_data = np.expand_dims(frame_resized, axis=0)
# Perform the actual detection by running the model with the image as input
interpreter.set_tensor(input_details[0]['index'],input_data)
interpreter.invoke()
# Retrieve detection results
boxes = interpreter.get_tensor(output_details[0]['index'])[0] # Bounding box coordinates of detected objects
classes = interpreter.get_tensor(output_details[1]['index'])[0] # Class index of detected objects
scores = interpreter.get_tensor(output_details[2]['index'])[0] # Confidence of detected objects
#num = interpreter.get_tensor(output_details[3]['index'])[0] # Total number of detected objects (inaccurate and not needed)
# Loop over all detections and draw detection box if confidence is above minimum threshold
for i in range(len(scores)):
if ((scores[i] > min_conf_threshold) and (scores[i] <= 1.0)):
# Get bounding box coordinates and draw box
# Interpreter can return coordinates that are outside of image dimensions, need to force them to be within image using max() and min()
ymin = int(max(1,(boxes[i][0] * imH)))
xmin = int(max(1,(boxes[i][1] * imW)))
ymax = int(min(imH,(boxes[i][2] * imH)))
xmax = int(min(imW,(boxes[i][3] * imW)))
cv2.rectangle(frame, (xmin,ymin), (xmax,ymax), (10, 255, 0), 2)
# Draw label
object_name = labels[int(classes[i])] # Look up object name from "labels" array using class index
label = '%s: %d%%' % (object_name, int(scores[i]*100)) # Example: 'person: 72%'
labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2) # Get font size
label_ymin = max(ymin, labelSize[1] + 10) # Make sure not to draw label too close to top of window
cv2.rectangle(frame, (xmin, label_ymin-labelSize[1]-10), (xmin+labelSize[0], label_ymin+baseLine-10), (255, 255, 255), cv2.FILLED) # Draw white box to put label text in
cv2.putText(frame, label, (xmin, label_ymin-7), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2) # Draw label text
# Draw framerate in corner of frame
cv2.putText(frame,'FPS: {0:.2f}'.format(frame_rate_calc),(30,50),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,0),2,cv2.LINE_AA)
# All the results have been drawn on the frame, so it's time to display it.
cv2.imshow('Object detector', frame)
#roi line and object
# Calculate framerate
t2 = cv2.getTickCount()
time1 = (t2-t1)/freq
frame_rate_calc= 1/time1
# checking whether there is any new object or not
current_object=''
if current_object==object_name:
pass
else :
current_object= object_name
switch()
servo_angle(-90)
if cv2.waitKey(1) == ord('q'):
break
# Clean up
cv2.destroyAllWindows()
videostream.stop()
Servo.stop()
GPIO.cleanup()
print("Done")
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
PANDA_OUTPUT_VOLTAGE = 5.28
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
def panda_current_to_actual_current(panda_current):
# From white/grey panda schematic
return (3.3 - (panda_current * 3.3 / 4096)) / 8.25
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of pandaState voltage
self.car_voltage_instant_mV = 12e3 # Last value of pandaState voltage
self.integration_lock = threading.Lock()
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, pandaState):
try:
now = sec_since_boot()
# If pandaState is None, we're probably not in a car, so we don't care
if pandaState is None or pandaState.pandaState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
is_old_panda = False
if pandaState is None and (pandaState.pandaState.pandaType in [log.PandaState.PandaType.whitePanda, log.PandaState.PandaType.greyPanda]):
is_old_panda = True
# Low-pass battery voltage
self.car_voltage_instant_mV = pandaState.pandaState.voltage
self.car_voltage_mV = ((pandaState.pandaState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if (pandaState.pandaState.ignitionLine or pandaState.pandaState.ignitionCan):
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = pandaState.pandaState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif HARDWARE.get_battery_status() == 'Discharging':
# If the battery is discharging, we can use this measurement
# On C2: this is low by about 10-15%, probably mostly due to UNO draw not being factored in
current_power = ((HARDWARE.get_battery_voltage() / 1000000) * (HARDWARE.get_battery_current() / 1000000))
elif is_old_panda and (pandaState.pandaState.current > 1):
# If white/grey panda, use the integrated current measurements if the measurement is not 0
# If the measurement is 0, the current is 400mA or greater, and out of the measurement range of the panda
# This seems to be accurate to about 5%
current_power = (PANDA_OUTPUT_VOLTAGE * panda_current_to_actual_current(pandaState.pandaState.current))
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR, is_old_panda)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power, is_old_panda)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t, current_power, is_old_panda):
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if not is_old_panda and power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self):
return int(self.power_used_uWh)
def get_car_battery_capacity(self):
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, pandaState, offroad_timestamp):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= (not pandaState.pandaState.ignitionLine and not pandaState.pandaState.ignitionCan)
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, pandaState, offroad_timestamp, started_seen, LEON):
if pandaState is None or offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (pandaState.pandaState.usbPowerMode != log.PandaState.UsbPowerMode.client)
BATT_PERC_OFF = 10 if LEON else 3
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(pandaState, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 60))
should_shutdown &= started_seen
return should_shutdown
|
spectrum_dump.py
|
### GStreamer Spectrum Dump ####
# Modified for hyperion effect and Gstreamer 1.0 by RanzQ
# ranzq87 [(at)] gmail.com
#
# Original:
# https://github.com/Wintervenom/gst-spectrumdump
# V20111005-1 by Scott Garrett
# Wintervenom [(at)] gmail.com
################################
# Dependencies:
# PyGI (python-gi)
#
#################################
import sys
import json
import re
import math
from threading import Thread
import gi
from gi.repository import GObject, Gst, GLib
gi.require_version('Gst', '1.0')
GObject.threads_init()
Gst.init(None)
def stdout(message):
"""
Writes a message to STDOUT.
"""
sys.stdout.write("{0}\n".format(message))
sys.stdout.flush()
def stderr(message):
"""
Writes a message to STDERR.
"""
sys.stderr.write("{0}\n".format(message))
sys.stderr.flush()
def fatal(error):
"""
Output an error message to STDERR and exit with status 1.
"""
stderr("Error: {0}".format(error))
sys.exit(1)
class GstSpectrumDump(object):
"""
Dumps the spectrum magnitudes of incoming audio as volume units per band.
Optional arguments:
<source> Source of the audio (default: alsasrc or gconf setting).
<precision> How many decimal places to round the magnitudes to
(default: 16).
<bands> How many frequency bands to output (default: 128).
<amplify> Amplify output by this much (default: 1).
<logamplify> Amplify magnitude values logarithmically to compensate for
softer higher frequencies. (default: False)
<autoamp> Automatically control amplification levels when they are
too loud.
<threshold> Minimal magnitude of a band in decibels (default: 70).
<cufoff> Cut off magnitudes at this value after amplification has
been applied (default: 100).
<scale> Scale magnitudes to this value (default: 100).
<raw> Don't clip or apply logarithmic upscale the output.
(default: True).
<db> Return output in decibels instead of a percentage.
<logamplify> is ignored (default: False).
<iec> Convert decibels to percentages with IEC 60268-18 scaling
(default: False).
<vumeter> Return VU meter output instead of spectrum. <bands>
controls how many channels to output here. <threshold> is
ignored.
<interval> Milliseconds to wait between polls (default: 50).
<multichannel> Spectrum from multiple channels? (default: False)
<quiet> Don't output to STDERR (default: False if no callback).
<callback> Return the magnitude list to this function (default: None).
"""
def __init__(self, **opts):
self.running = False
self.source = opts.get('source')
self.precision = opts.get('precision')
self.bands = opts.get('bands', 128)
self.amplify = opts.get('amplify', 1)
self.logamplify = opts.get('logamplify', False)
self.autoamp = opts.get('autoamp', False)
self.threshold = opts.get('threshold', 70)
self.cutoff = opts.get('cutoff', 100)
self.scaleto = opts.get('scale', 100)
self.raw = opts.get('raw', True)
self.db = opts.get('db', False)
self.iec = opts.get('iec', False)
self.vumeter = opts.get('vumeter', False)
self.interval = opts.get('interval', 50)
self.callback = opts.get('callback')
self.multichannel = opts.get('multichannel', False)
self.bands_cutoff = opts.get('cutoff', 96)
self.quiet = opts.get('quiet', self.callback is not None)
self.pipeline = None
self.gainhits = 0
self.origamp = self.amplify
self.bus = None
self.conn = None
self.loop = None
self.loop_thread = None
if not self.source:
self.source = 'autoaudiosrc'
# defaultsrc = 'alsasrc'
# try:
# conf = gconf.client_get_default()
# source = conf.get('/system/gstreamer/%d.%d/default/audiosrc' %
# Gst.gst_version[:-1])
# if source:
# self.source = source.get_string()
# else:
# self.source = defaultsrc
# except NameError:
# stderr('Python2 GConf module not installed; using default source.')
# self.source = defaultsrc
elif self.source.startswith('mpd'):
fifo = self.source.split(' ', 1)
fifo = fifo[1] if len(fifo) > 1 else '/tmp/mpd.fifo'
pipeline = 'filesrc location={} ! audio/x-raw-int, ' \
'rate=44100, channels=2, endianness=1234, width=16, ' \
'depth=16, signed=true ! audioconvert'
self.source = pipeline.format(fifo)
# From: https://github.com/Roadmaster/audio_test/blob/master/minimal_gstreamer_messages.py
def parse_spectrum_structure(self, text):
# First let's jsonize this
# This is the message name, which we don't need
text = text.replace("spectrum, ", "")
# name/value separator in json is : and not =
text = text.replace("=", ": ")
# Mutate the {} array notation from the structure to
# [] notation for json.
# Sometimes arrays are notated using < >
text = text.replace("{", "[")
text = text.replace("}", "]")
text = text.replace("<", "[")
text = text.replace(">", "]")
# Remove a few stray semicolons that aren't needed
text = text.replace(";", "")
# Remove the data type fields, as json doesn't need them
text = re.sub(r"\(.+?\)", "", text)
# double-quote the identifiers
text = re.sub(r"([\w-]+):", r'"\1":', text)
# Wrap the whole thing in brackets
text = ("{"+text+"}")
# Try to parse and return something sensible here, even if
# the data was unparsable.
try:
return json.loads(text)
except ValueError:
return None
def parse_magnitude(self, s):
# Faster way to parse magnitudes
return map(float, s[s.find('{')+1:-3].split(','))
def round(self, n):
if self.precision:
return round(n, self.precision)
elif self.precision == 0:
return int(n)
return n
def dbtopct(self, db, index=None):
indexamp = 1
if self.iec:
pct = 0.0
if db < -70.0:
pct = 0.0
elif db < -60.0:
pct = (db + 70.0) * 0.25
elif db < -50.0:
pct = (db + 60.0) * 0.5 + 2.5
elif db < -40.0:
pct = (db + 50.0) * 0.75 + 7.5
elif db < -30.0:
pct = (db + 40.0) * 1.5 + 15.0
elif db < -20.0:
pct = (db + 30.0) * 2.0 + 30.0
elif db < 0.0:
pct = (db + 20.0) * 2.5 + 50.0
else:
pct = 100.0
else:
pct = (self.threshold + db) / float(self.threshold) * 100
if index and index > 0:
indexamp += math.log10(index)
pct = min(self.cutoff, self.amplify * (indexamp * pct))
if self.autoamp:
if pct == 100:
self.gainhits += 1
if self.amplify > 0:
self.amplify -= 0.1
elif pct == 0:
self.gainhits -= 1
if self.gainhits < -100:
if self.amplify < self.origamp:
self.amplify += 0.01
self.gainhits = 0
return pct * (self.scaleto / 100.0)
def interpolate(self, a, b, points):
points = round(points, 0) + 1.0
return [a + ((b - a) / points) * x for x in range(0, int(points))]
def scale(self, floats, maxlen=None):
if len(floats) < 2:
return floats
scaled = []
for i in range(1, len(floats)):
length = 1 + math.log10(i - 0)
scaled += self.interpolate(floats[i-1], floats[i], length)[:-1]
scaled.append(floats[-1])
if maxlen and len(scaled) > maxlen:
downscaled = []
incr = len(scaled) / float(maxlen)
index = 0
# TODO: Figure out why v is not used
# pylint: disable=unused-variable
for v in range(0, maxlen):
downscaled.append(scaled[int(round(index, 0))])
index += incr
return downscaled
else:
return scaled
# pylint: disable=unused-argument
def on_message(self, bus, message):
# We should return false if the pipeline has stopped
if not self.running:
return False
try:
# s = message.structure
s = message.get_structure()
if not s:
return
name = s.get_name()
if name == 'spectrum' and s.has_field('magnitude'):
# mags = s.get_value('magnitude')
# PyGI doesn't fully support spectrum yet:
# https://bugzilla.gnome.org/show_bug.cgi?id=693168
if self.multichannel:
mags = self.parse_spectrum_structure(s.to_string())['magnitude']
magnitudes = mags[0][:self.bands_cutoff] # We use only the first channel for now
else:
mags = self.parse_magnitude(s.to_string())
magnitudes = mags[:self.bands_cutoff]
if not self.db:
if self.logamplify:
magnitudes = [self.dbtopct(db, i) for i, db
in enumerate(magnitudes)]
else:
magnitudes = [self.dbtopct(db) for i, db
in enumerate(magnitudes)]
if not self.raw:
magnitudes = self.scale(magnitudes, self.bands)
magnitudes = [self.round(m) for m in magnitudes]
elif name == 'level' and s.has_field('peak') and s.has_field('decay'):
magnitudes = []
peaks = s.get_value('peak')
decays = s.get_value('decay')
for channel in range(0, min(self.bands, len(peaks))):
peak = max(-self.threshold, min(0, peaks[channel]))
decay = max(-self.threshold, min(0, decays[channel]))
if not self.db:
if self.logamplify:
peak = self.dbtopct(peak, peak)
decay = self.dbtopct(decay, decay)
else:
peak = self.dbtopct(peak)
decay = self.dbtopct(decay)
magnitudes.append(self.round(peak))
magnitudes.append(self.round(decay))
else:
return True
if not self.quiet:
try:
print ' | '.join(('%.3f' % m for m in magnitudes))
except IOError:
self.loop.quit()
if self.callback:
self.callback(magnitudes)
except KeyboardInterrupt:
self.loop.quit()
return True
def start_pipeline(self):
self.running = True
pipeline = [self.source]
interval = 'interval={0}'.format(1000000 * self.interval)
if self.vumeter:
pipeline.append('level message=true {}'.format(interval))
else:
spectrum = 'spectrum {} bands={} threshold=-{}'
spectrum = spectrum.format(interval, self.bands, self.threshold)
if self.multichannel:
spectrum += ' multi-channel=true'
pipeline.append(spectrum)
pipeline.append('fakesink')
# stdout('Launching pipeline: ' + ' ! '.join(pipeline))
self.pipeline = Gst.parse_launch(' ! '.join(pipeline))
# self.pipeline = Gst.parse_launch('alsasrc ! level message=true in ! fakesink')
# self.pipeline = Gst.Pipeline()
# for element in pipeline:
# self.pipeline.add(element)
self.bus = self.pipeline.get_bus()
self.bus.enable_sync_message_emission()
self.bus.add_signal_watch()
# self.bus.add_signal_watch_full(GLib.PRIORITY_DEFAULT)
self.conn = self.bus.connect('message', self.on_message)
# self.source_id = self.bus.add_watch(GLib.PRIORITY_DEFAULT, self.on_message, None)
# stdout("Bus connected.")
self.pipeline.set_state(Gst.State.PLAYING)
# stdout("Pipeline STATE_PLAYING set.")
def stop_pipeline(self):
self.running = False
if self.pipeline:
self.bus.disconnect(self.conn)
# GLib.Source.remove(self.source_id) # Not working?
# stdout("Bus disconnected.")
self.bus.remove_signal_watch()
# stdout("Signal watch removed.")
self.pipeline.set_state(Gst.State.NULL)
# stdout("Pipeline STATE_NULL set.")
def start(self):
# stdout(Gst.version())
self.start_pipeline()
self.loop = GLib.MainLoop()
# self.loop_context = self.loop.get_context()
self.loop_thread = Thread(target=self.loop.run)
self.loop_thread.daemon = True
self.loop_thread.start()
# stdout("Pipeline initialized.")
def stop(self):
# stdout("Stopping pipeline...")
self.stop_pipeline()
# Quit the MainLoop
self.loop.quit()
# Wait for the thread
self.loop_thread.join()
# stdout("Done.")
|
UdpChat.py
|
import logging
from logging.config import fileConfig
from os import path
import os
import sys
import socket
import threading
from termcolor import cprint
from time import sleep
import datetime
class Server(object):
"""docstring for Server."""
def __init__(self, port):
"""Constructor of the function
Arguments:
port {[int]} -- [On which the server will start]
"""
super(Server, self).__init__()
self.port = port
self.client_table = []
def handle_dereg(self, username, address):
"""this function handles the dereg of client
Arguments:
username {[string]} -- [The name which has to update]
address {[socket]} -- [The socket where response/ ACK needs to be sent]
"""
logging.info("Deregging received for " + username + "|")
for i in range(len(self.client_table)):
v = self.client_table[i]
if (v[0] == username):
v[4] = "OFFLINE"
logging.info("User found and unregisterded it")
self.client_table_broadcast()
# sleep(5.0)
self.server_socket.sendto("You are Offline. Bye.".encode(),
address)
def handle_reg(self, username, address):
"""this function handles the reg of client
Arguments:
username {[string]} -- [The name which has to update]
address {[socket]} -- [The socket where response/ ACK needs to be sent]
"""
logging.info("Regging received for " + username)
for i in range(len(self.client_table)):
v = self.client_table[i]
if (v[0] == username):
v[4] = "ONLINE"
logging.info("User found and registered it")
self.client_table_broadcast()
# sleep(5.0)
delayed_messages = self.get_file_messages(username)
self.server_socket.sendto(delayed_messages.encode(), address)
logging.info("offline message sent back that are " +
delayed_messages)
# self.server_socket.sendto(
# "You are online. Welcome again.".encode(), address)
def check_already_exist(self, username):
"""Check if the user is already in the table.
this helps when a new reg come or new messages come
Arguments:
username {[string]} -- [The user which needs to be check]
Returns:
[type] -- [boolen]
"""
for i in range(len(self.client_table)):
v = self.client_table[i]
if (v[0] == username):
return True
return False
def start(self):
"""This is the starting point of the server
"""
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.server_socket.bind(('', int(self.port)))
logging.info("Server started")
while True:
message, address = self.server_socket.recvfrom(1024)
request = message.decode("utf-8")
logging.info(request)
to_do = request.split(" ")
logging.info("To do is ")
logging.info(to_do[0])
if to_do[0] == "dereg":
self.handle_dereg(to_do[1], address)
continue
elif to_do[0] == "reg":
self.handle_reg(to_do[1], address)
continue
elif to_do[0] == "savem":
logging.info("Offline message received " +
message.decode("utf-8"))
if (self.check_offline_status(to_do[1])):
self.server_socket.sendto(
"Message received by the server and saved".encode(),
address)
mesage_body_after_header = message.decode("utf-8")[
len(to_do[1]) + len("savem") + 2:]
logging.info("Message body is " + mesage_body_after_header)
self.handle_offline_message(to_do[1],
mesage_body_after_header)
else:
exist_message = "Client " + to_do[1] + " exists!!"
self.server_socket.sendto(exist_message.encode(), address)
self.client_table_broadcast()
continue
# client information received from client
client_data = message.decode("utf-8").split(" ")
client_data.append("ONLINE")
# Check if the user do not already exists
if (not self.check_already_exist(client_data[0])):
self.server_socket.sendto(
"Welcome, You are registered.".encode(), address)
# appending to the client table
self.client_table.append(client_data)
logging.info("Client Port is " + client_data[1])
logging.info(client_data)
self.client_table_broadcast()
else:
self.server_socket.sendto(
"Sorry user is already registered.".encode(), address)
def handle_offline_message(self, filename, message):
"""When an offline message is reached to server.
This method handles the saving of the messages to the file
Arguments:
filename {[string]} -- [File name which is same as username]
message {[string]} -- [The message needs to be saved]
"""
logging.info("Apending started")
with open(filename, "a") as myfile:
myfile.write(str(datetime.datetime.now()) + " " + message + "\n")
logging.info("File appended")
def get_file_messages(self, username):
"""When user regs again. It will get the messages from the file
Arguments:
username {[string]} -- [file name is same as user name]
Returns:
[string] -- [file content]
"""
if (os.path.exists(username)):
logging.info(username + " exists")
file = open(username, "r")
lines = file.readlines()
content = ""
for V in lines:
content = content + V + "\n"
os.remove(username)
return content
else:
logging.info(username + " do not exists")
def check_offline_status(self, username):
"""Check if a user is offline in table
Arguments:
username {[string]} -- [User to check]
Returns:
[type] -- [boolean]
"""
logging.info("Checking status for " + username)
for v in self.client_table:
if (v[0] == username):
if (v[4] == "OFFLINE"):
return True
return False
def client_table_broadcast(self):
"""This method broadcast the table to all of the clients' broadcasting port
"""
for v in self.client_table:
self.send_table_to_client(v[2], int(v[1]))
def send_table_to_client(self, client_ip, client_port):
"""To send the client table on the client's port
Arguments:
client_ip {[str]} -- [Client Ip]
client_port {[int]} -- [Client Port]
"""
broadcast_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
b_addr = (client_ip, client_port)
logging.info("Sent to " + client_ip + " " + str(client_port))
logging.info(self.table_to_string().encode())
broadcast_socket.sendto(self.table_to_string().encode(), b_addr)
def table_to_string(self):
"""Converting the list form of the client table to string
Returns:
[type] -- [string]
"""
logging.info("Sending table")
send = "table "
for v in self.client_table:
send = send + v[0] + " " + v[1] + " " + \
v[2] + " " + v[3] + " " + v[4] + "\n"
return send
class Client(object):
"""This class handles the chat functions for the user."""
def __init__(self, nickname, server_ip, server_port, client_port):
super(Client, self).__init__()
self.nick_name = nickname.lower()
self.server_ip = server_ip
self.server_port = server_port
self.client_port = client_port
self.client_table = []
def start(self):
"""This is the starting point for the Client
"""
self.broadcast_thread = threading.Thread(
group=None,
target=self.client_table_broadcast_message_service,
name="Broadcast Service")
# starting broadcast thread to recieve client table from server
self.broadcast_thread.start()
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.client_socket.settimeout(1.0)
self.addr = (self.server_ip, int(self.server_port))
# starting client initial registration
self.do_registration()
# to start the thread to receive the table
self.input_thread = threading.Thread(
group=None, target=self.client_actions, name="Broadcast Service")
self.input_thread.start()
def print_client_table(self):
"""Printing the client table on the command of list from the console
"""
header = "{:^10} {:^20} {:^10} {:^10}".format('NAME', 'IP', 'PORT',
'STATUS')
cprint(header, "red")
logging.info(len(self.client_table))
for i in range(len(self.client_table) - 1):
v = self.client_table[i]
line = "{:^10} {:^20} {:^10} {:^10}".format(v[0], v[2], v[1], v[4])
cprint(str(line), "red")
def client_actions(self):
"""This will run in a thread to take inputs from the user
"""
while (1):
cprint(
"Multiple options available\n>>>> send <name> <message>\n>>>> list\n>>>> reg <nick-name>\n>>>> dereg <nick-name> \n>>>> deregA <any one>\n>>>> exit\n",
"red")
command = input()
choice = command.split(" ")[0]
logging.info("Choice is ")
logging.info(choice)
if (choice == "send"):
logging.info("Sending to the client")
self.handle_message_sending(command)
elif (choice == "list"):
logging.info("Listing table")
self.print_client_table()
elif choice == "dereg":
self.perform_dereg(command)
elif choice == "deregA":
self.perform_deregon_all(command)
elif choice == "reg":
self.perform_reg(command)
elif choice == "exit":
self.perform_exit()
else:
cprint("You chose wrong option\n", "red")
def perform_exit(self):
self.perform_dereg("dereg "+self.nick_name)
os._exit(1)
def perform_reg(self, command):
"""On the reg input after dereg this will process the reg to the server
Arguments:
command {[str]} -- [The command given e.g reg x]
"""
logging.info("Regging inititate")
reg_client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
reg_client_socket.sendto(command.lower().encode(), self.addr)
data, server = reg_client_socket.recvfrom(1024)
cprint(data.decode("utf-8"), "green")
logging.info("reg again")
def handle_message_sending(self, command):
"""If user select to send the message, this method will forward the message
Arguments:
command {[str]} -- [The command given for send e.g send x this is good]
"""
logging.info("Sending message " + command[4:])
send_name = command.split(" ")[1].lower()
logging.info("Username is ")
logging.info(send_name)
message = "msage " + self.nick_name + ": " + command[len(send_name) +
6:]
logging.info("Sending message |" + message + "|")
send = "savem " + send_name + " " + self.nick_name + ": " + command[len(
send_name) + 6:]
for i in range(len(self.client_table)):
v = self.client_table[i]
if (v[0] == send_name):
if (v[4] == "ONLINE"):
logging.info("User found and it's port: " + v[1])
addr = ("127.0.0.1", int(v[1]))
message_client_socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
message_client_socket.settimeout(0.5)
message_client_socket.sendto(message.encode(), addr)
try:
ack, user = message_client_socket.recvfrom(1024)
cprint(ack.decode("utf-8"), "green")
logging.info("Message received")
except socket.timeout:
cprint(
"[No ACK from " + send_name +
", message sent to server]", "green")
logging.info(
"Message not received when the client is closed")
self.save_message_request(send)
else:
logging.info("Offline message request to be sent!")
self.save_message_request(send)
def save_message_request(self, message):
"""When there is no response from the client or it is offline
then this message saves the messages
Arguments:
message {[str]} -- [Offline message to be saved on server]
Returns:
[type] -- [None]
"""
logging.info("Deregging inititate")
save_message_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
save_message_socket.settimeout(0.5)
save_message_socket.sendto(message.encode(), self.addr)
retry = 0
while (retry < 5):
try:
ack, server = save_message_socket.recvfrom(1024)
cprint(ack.decode("utf-8"), "green")
return None
except socket.timeout:
logging.info("ACK not received on saving offline message")
retry = retry + 1
cprint("[Server not responding]\n[Exiting]", "red")
os._exit(1)
def perform_deregon_all(self, command):
"""According to buisniness logic. A person can dereg himself but
if he wants to dereg somone else this method will proceed it
Arguments:
command {[str]} -- [The commnand for this e.g deregA x]
Returns:
[type] -- [None]
"""
username = command.lower().split(" ")[1]
logging.info("User is ")
logging.info(username)
logging.info("Deregging inititate")
dereg_client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
dereg_client_socket.settimeout(0.5)
dereg_client_socket.sendto(command.lower().encode(), self.addr)
retry = 0
while (retry < 5):
try:
data, server = dereg_client_socket.recvfrom(1024)
cprint(data.decode("utf-8"), "green")
return None
except socket.timeout:
logging.info(str(retry) + ": ACK not received on registration")
retry = retry + 1
cprint("[Server not responding]\n[Exiting]", "red")
os._exit(1)
def perform_dereg(self, command):
"""This performs the dereg on a server
Arguments:
command {[str]} -- [Command e.g dereg x]
Returns:
[type] -- [description]
"""
username = command.lower().split(" ")[1]
logging.info("User is ")
logging.info(username)
if (username == self.nick_name):
logging.info("Deregging inititate")
dereg_client_socket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
dereg_client_socket.settimeout(0.5)
dereg_client_socket.sendto(command.lower().encode(), self.addr)
retry = 0
while (retry < 5):
try:
data, server = dereg_client_socket.recvfrom(1024)
cprint(data.decode("utf-8"), "green")
return None
except socket.timeout:
logging.info(
str(retry) + ": ACK not received on registration")
retry = retry + 1
cprint("[Server not responding]\n[Exiting]", "red")
os._exit(1)
else:
cprint("You can not de-register someone else.\n", "red")
def client_table_broadcast_message_service(self):
"""This method starts another socket on which it receives the updated client table and
receive messages it will distinguish and handle it later
"""
logging.info("Client table service and message started at " +
self.client_port)
self.broadcast_socket = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self.broadcast_socket.bind(('', int(self.client_port)))
while True:
message, address = self.broadcast_socket.recvfrom(1024)
ack = "[Message received by " + self.nick_name + "]"
self.broadcast_socket.sendto(ack.encode(), address)
message_str = message.decode("utf-8")
header = message_str[:6]
logging.info("Header tag: " + header + "|")
if (header == "table "):
print("[Client table received]\n")
logging.info("Client table service received at " +
self.client_port)
self.update_client_table(message_str[6:])
elif (header == "msage "):
# sender_name = message_str.split(" ")[1]
# logging.info("Sender is ")
# logging.info(sender_name)
# message_rsv = message_str[6+len(sender_name):]
logging.info("Message received after the header is " +
message_str[6:])
cprint(message_str[6:], "green")
elif (header == "headr "):
logging.info("Offline message received")
detail = message_str[6:]
logging.info(detail)
def update_client_table(self, table):
"""ON receiving the table from the server.
This method will convert the string response of
the tabel from server to the list
Arguments:
table {[string]} -- [response from server on update table]
"""
logging.info("Table string is")
logging.info(table)
# clearing the list
self.client_table[:] = []
client_line = table.split("\n")
logging.info("Client line is")
logging.info(client_line)
logging.info("length of data is " + str(len(client_line)))
for v in client_line:
client_data = v.split(" ")
logging.info("1 client length is " + str(len(client_data)))
self.client_table.append(client_data)
logging.info("Table Updated: \n")
logging.info(self.client_table)
def do_registration(self):
"""When the program first starts it handles
theinformation sharing between client and server
"""
reg = self.nick_name + " " + self.client_port + \
" " + self.server_ip + " " + self.server_port
self.client_socket.sendto(reg.encode(), self.addr)
logging.info("Client reg send")
data, server = self.client_socket.recvfrom(1024)
data_str = data.decode("utf-8")
if (data_str == "Sorry user is already registered."):
cprint(data_str, "red")
os._exit(1)
logging.info("[First message received]")
class UdpChat(object):
"""docstring for UdpChat."""
def __init__(self, mode, port, nick_name, server_ip, server_port,
client_port):
"""__init__
Arguments:
mode {[str]} -- [-c or -s]
port {[str]} -- ["Server port"]
nick_name {[str]} -- [User name]
server_ip {[str]} -- [server ip]
server_port {[str]} -- [Port on which the server will be running]
client_port {[str]} -- [Client of port on which it will receive messages and udpated tables]
"""
super(UdpChat, self).__init__()
logging.info("Mode " + mode)
logging.info("Sever port" + str(port))
logging.info("Nick name " + str(nick_name))
logging.info("Server IP: " + str(server_ip))
logging.info("Sever Port: " + str(server_port))
logging.info("Client Port:" + str(client_port))
self.mode = mode
if (mode == "-c"):
logging.info("Client Called")
self.instance = Client(nick_name, server_ip, server_port,
client_port)
elif (mode == "-s"):
logging.info("Server Called")
self.instance = Server(port)
if __name__ == "__main__":
"""starting point of the table
"""
fileConfig('log.conf')
logger = logging.getLogger()
logging.info(len(sys.argv))
# Calling server
if (len(sys.argv) == 3):
U = UdpChat(sys.argv[1], sys.argv[2], None, None, None, None)
# Calling client
elif (len(sys.argv) == 6):
U = UdpChat(sys.argv[1], None, sys.argv[2], sys.argv[3], sys.argv[4],
sys.argv[5])
else:
logging.critical("Invalid format!")
print("Exiting")
sys.exit(0)
U.instance.start()
|
serve.py
|
import http.server
import json
import logging
import queue
import threading
import numpy as np
import tensorflow as tf
from .estimator import def_estimator
from .flag import FLAGS, add_flag, add_output_dir_flag
def def_serve():
"""Define `serve()` function.
See also `help(def_serve())`.
- Returns
- `serve()` function.
"""
add_output_dir_flag()
add_flag('ip_address', default='')
add_flag('port', type=int, default=80)
estimator = def_estimator(distributed=False)
def serve(model_fn, preprocess_fn=None, postprocess_fn=None):
"""Serve as a HTTP server.
- Args
- `model_fn`: Same as `train_and_evaluate()`'s.
- `preprocess_fn`: A function to preprocess server request bodies
in JSON. Its first argument is a function which returns the
JSON input. You may need to use `tf.py_func` to create this
function.
- `preprocess_fn`: A function to postprocess server responses of
JSON serializable objects.
"""
server = EstimatorServer(
estimator(model_fn, FLAGS.output_dir),
preprocess_fn,
postprocess_fn)
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
inputs = json.loads(self.rfile.read(
int(self.headers['Content-Length'])))
outputs = server.predict(inputs)
logging.info('Prediction results: {}'.format(outputs))
self.wfile.write(json.dumps(outputs).encode())
http.server.HTTPServer((FLAGS.ip_address, FLAGS.port), Handler) \
.serve_forever()
return serve
def _make_json_serializable(x):
if isinstance(x, np.ndarray):
return x.tolist()
elif isinstance(x, dict):
return {key: _make_json_serializable(value)
for key, value in x.items()}
elif isinstance(x, list):
return [_make_json_serializable(value) for value in x]
return x
class EstimatorServer:
def __init__(self, estimator, preprocess_fn=None, postprocess_fn=None):
self._input_queue = queue.Queue()
self._output_queue = queue.Queue()
def input_fn():
return (tf.train.batch(preprocess_fn(self._input_queue.get),
1,
dynamic_pad=True),
None)
def target():
for output in estimator.predict(input_fn=input_fn):
self._output_queue.put(postprocess_fn(output))
thread = threading.Thread(target=target, daemon=True)
thread.start()
def predict(self, inputs):
self._input_queue.put(inputs)
return self._output_queue.get()
|
cam.py
|
## this captures the mjpeg stream from my panasonic camera and converts it into a video that is pushed to youtube.
## It will a
# http://192.168.1.222/cam.cgi?mode=camcmd&value=capture this takes a photo
# http://192.168.1.222/cam.cgi?mode=startstream&value=5555
## you will need python, numpy, opencv, and pygi
## for windows users, the following links provide the files need needed; things are relatively easy to install if you spend a few minutes on google
## https://www.python.org/download/releases/2.7/
## http://www.lfd.uci.edu/~gohlke/pythonlibs/
## https://sourceforge.net/projects/pygobjectwin32/files/
## Install pygi-aio-xxx ; install anything to do with gst-plugins or gstreamer
## the following also needs to be updated. figure it out on your own.
MY_IP = "192.168.1.166" ## THIS IS YOUR LOCAL IP ADDRESS
THEIR_IP = "192.168.1.222" ## THIS IS THE IP ADDRESS OF THE CAMERA
RTMP_OUT = "rtmp://a.rtmp.youtube.com/live2/x/steve.4txa-37t5-jk1e-e5bx"
import gi
gi.require_version('Gst', '1.0')
from gi.repository import GObject, Gst
import numpy as np
import socket
import cv2
import binascii
import threading
import time
import sys
GObject.threads_init()
Gst.init(None)
UDP_PORT = 5555
start = binascii.unhexlify(''.join('FF D8'.split()))
end = binascii.unhexlify(''.join('FF D9'.split()))
sock = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
sock.bind((MY_IP, UDP_PORT))
tcpsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
## ## 4K Video Mode = 640x360, but photo mode is 640x480 .. adust the below line accordingly. It's currently set for 640x360, but will scale the output up to 1280x720.
## min-threshold-time=1130000000 can be adjusted to get the audio/video sink more aligned. It should be pretty close as is.
CLI='appsrc name=mysource format=TIME do-timestamp=TRUE is-live=TRUE caps="video/x-raw,format=BGR,width=640,height=360,framerate=(fraction)30/1,pixel-aspect-ratio=(fraction)1/1" ! videoconvert ! videoscale ! videorate ! capsfilter caps="video/x-raw,format=I420,width=1280,height=720,framerate=(fraction)30/1" ! queue max-size-time=0 max-size-bytes=0 max-size-buffers=0 ! tee name=RAW RAW. ! queue max-size-time=0 max-size-bytes=0 max-size-buffers=0 ! autovideosink sync=false RAW. ! queue max-size-time=0 max-size-bytes=0 max-size-buffers=0 ! x264enc cabac=true aud=true tune=zerolatency byte-stream=false sliced-threads=true threads=4 speed-preset=1 bitrate=2000 key-int-max=20 bframes=0 ! h264parse ! video/x-h264,profile=main ! mux. autoaudiosrc ! audioconvert ! voaacenc bitrate=128000 ! queue max-size-time=0 max-size-bytes=0 max-size-buffers=0 ! aacparse ! audio/mpeg,mpegversion=4,stream-format=raw ! queue max-size-buffers=1 max-size-time=0 max-size-bytes=0 min-threshold-time=1140000000 ! flvmux streamable=true name=mux ! queue max-size-buffers=3 max-size-time=0 max-size-bytes=0 ! rtmpsink location="'+RTMP_OUT+'" sync=false'
pipline=Gst.parse_launch(CLI)
appsrc=pipline.get_by_name("mysource")
#appsink=pipline.get_by_name("sink")
appsrc.set_property('emit-signals',True) #tell sink to emit signals
pipline.set_state(Gst.State.PLAYING)
def keepalive(MY_IP, THEIR_IP):
while True:
try:
tcpsock.sendto("GET /cam.cgi?mode=startstream&value=5555 HTTP/1.1\nHost: "+MY_IP+"\n\nUser-Agent:Mozilla 5.0\n", (MY_IP, 80))
response = tcpsock.recv(1024)
time.sleep( 8 )
print "keep alive"
except:
tcpsock.connect(("192.168.1.222", 80))
thread = threading.Thread(target=keepalive, args=(MY_IP,THEIR_IP,))
thread.daemon = True
thread.start()
total=0
while (1==1):
#begin = time.time()
data, addr = sock.recvfrom(999999) # buffer size is 1024 bytes
data = data.split(start)[1].split(end)[0]
data = start+data+end
data = np.frombuffer(data, np.uint8) # to array
data = cv2.imdecode(np.array(data),cv2.CV_LOAD_IMAGE_COLOR) #
#print np.shape(data) ## uncomment to see resolution of video
#cv2.imshow("img",data) ## 4K Video Mode = 640x360, but photo mode is 640x480 ..
#cv2.waitKey(1)
frame = data.tostring()
buf = Gst.Buffer.new_allocate(None, len(frame), None)
buf.fill(0,frame)
appsrc.emit("push-buffer", buf)
#final = time.time()
#total = total*0.8 + (final - begin)*.2
#print "time",str(1.0/total)
print "EXIT"
sys.exit()
|
benchmark.py
|
import argparse
import sys,os,time
import subprocess
import collect_gpu_power as cgp
from threading import Thread
# Parse arguments
parser = argparse.ArgumentParser(description='Benchmark deep learning tools')
parser.add_argument('-config', type=str, help='Path to the config file')
parser.add_argument('-post', type=bool, default=False, help='Post to our server. You should keep it False')
parser.add_argument('-debug', type=bool, default=False, help='Debug benchmark.py')
args = parser.parse_args()
if args.debug: print "[DEBUG] args:" + str(args)
# Parse config file
config_experiments = False
experiments = ''
host_file = None
flag = ''
tools = ''
cpu_name = ''
gpu_name = ''
cuda_driver = ''
cudnn = ''
cuda = ''
cpu_count = '1'
if args.config is not None:
with open(args.config) as f:
content = f.readlines()
#print content
for line in content:
line = line.split('#')[0].replace('\t','').replace('\n','').replace(' ', '')
if len(line) < 1 or "None" in line:
continue
if not config_experiments:
if "flag:" in line:
flag = line.split(':')[1]
elif "tools:" in line:
tools = line.split(':')[1].split(',')
elif "{" in line:
config_experiments = True
elif "host_file:" in line:
host_file = line.split(':')[1]
elif "cpu_name:" in line:
cpu_name = line.split(':')[1]
elif "gpu_name:" in line:
gpu_name = line.split(':')[1]
elif "cuda_driver:" in line:
cuda_driver = line.split(':')[1]
elif "cudnn:" in line:
cudnn = line.split(':')[1]
elif "cuda:" in line:
cuda = line.split(':')[1]
else:
if "}" in line:
config_experiments = False
experiments = experiments[:len(experiments)-1].replace('\t','').replace(' ','').split(':')
else:
experiments += line + ':'
else:
print("Please add -config <path to your config file>")
sys.exit(0)
post_flags = " -f " + flag + " -P " + cpu_name + " -r " + cuda_driver + " -C " + cuda + " -D " + cudnn
if args.debug:
print "[DEBUG] Defalut post flags:" + str(post_flags)
print "[DEBUG] Tool(s):" + str(tools)
print "[DEBUG] Experiments:" + str(experiments)
# Benchmark each tool
root_path = os.path.dirname(os.path.abspath(__file__))
host_name = subprocess.check_output("hostname", shell=True).strip().split('\n')[0]
if os.path.exists(root_path + "/logs/") is not True:
os.system("rm -rf logs")
print "Creating log directory... " + root_path + "/logs/"
os.system("mkdir logs")
if args.debug:
print "[DEBUG] Benchmark running on: " + host_name
print "[DEBUG] Root path:" + root_path
for tool in tools:
work_dir = root_path + "/tools/" + tool
for experiment in experiments:
os.chdir(work_dir)
exp_args = experiment.split(";")
device_name = ''
device_count = exp_args[3]
log_file = ''
if "-1" in exp_args[2]:
device_name = cpu_name
log_file = tool + "-" + exp_args[0] + "-" + exp_args[1] + "-" + device_name + "-c" + exp_args[3] + "-" +"b"+ exp_args[4] + "-"
else:
device_name = gpu_name
log_file = tool + "-" + exp_args[0] + "-" + exp_args[1] + "-" + device_name + "-devId" + exp_args[2] + "-c" + exp_args[3] + "-" +"b"+ exp_args[4] + "-"
print "\n-------Benchmarking " + tool + " " + exp_args[1] + "-------"
log_file += time.ctime()+ "-" + host_name + ".log"
log_file = log_file.replace(" ","_")
power_log_file = '%s/logs/power_%s' % (root_path, log_file)
bm_script = "python " + tool + "bm.py"
bm_script += " -netType " + exp_args[0] + " -log "+log_file+" -batchSize "+exp_args[4]+" -network "+exp_args[1]+" -lr "+exp_args[7]
if "-1" in exp_args[2]:
bm_script += " -devId " + exp_args[2] + " -numEpochs " + exp_args[5] + " -epochSize " + exp_args[6] + " -cpuCount " + exp_args[3]
post_flags += " -c " + cpu_name
else:
bm_script += " -devId " + exp_args[2] + " -numEpochs " + exp_args[5] + " -epochSize " + exp_args[6] + " -gpuCount " + exp_args[3]
post_flags += " -d " + gpu_name
if host_file is not None and len(host_file) > 4:
bm_script += " -hostFile " + host_file
print bm_script
try:
thread = Thread(target = cgp.start_collecting_gpu_power, args = (bm_script, power_log_file))
thread.start()
result_args = subprocess.check_output(bm_script, shell=True).strip().split('\n')[0]
except Exception as e:
print "Benchmark failed with " + bm_script
os.system("cat " + root_path + "/logs/" + log_file)
continue
power, mem = cgp.get_average_gpu_power_and_mem(gpu_name, power_log_file)
post_flags += " " + result_args + " -b " + exp_args[4] + " -g " + exp_args[3] + " -e " + exp_args[6] + " -E " + exp_args[5]
post_flags += " -l " + log_file + " -T " + tool + " -n " + exp_args[1]
os.chdir(root_path)
if args.post is True:
post_script = "python post_record.py " + post_flags
print post_script
print(subprocess.check_output(post_script, shell=True).strip().split('\n')[0])
post_flags = " -f " + flag + " -d " + device_name + " -P " + cpu_name + " -A " + str(mem) + " -r " + cuda_driver + " -C " + cuda + " -D " + cudnn + " -p " + str(power)
post_script = ''
else:
print "Result:"
print result_args
print "Done!"
|
flaskwebgui.py
|
__version__ = "0.3.2"
import os
import sys
import time
from datetime import datetime
import logging
import tempfile
import socketserver
import subprocess as sps
from inspect import isfunction
from threading import Lock, Thread
logging.basicConfig(level=logging.INFO, format='flaskwebgui - [%(levelname)s] - %(message)s')
# UTILS
def find_chrome_mac():
default_dir = r'/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
if os.path.exists(default_dir):
return default_dir
# use mdfind ci to locate Chrome in alternate locations and return the first one
name = 'Google Chrome.app'
alternate_dirs = [x for x in sps.check_output(["mdfind", name]).decode().split('\n') if x.endswith(name)]
if len(alternate_dirs):
return alternate_dirs[0] + '/Contents/MacOS/Google Chrome'
return None
def find_chrome_linux():
try:
import whichcraft as wch
except Exception as e:
raise Exception("whichcraft module is not installed/found \
please fill browser_path parameter or install whichcraft!") from e
chrome_names = ['chromium-browser',
'chromium',
'google-chrome',
'google-chrome-stable']
for name in chrome_names:
chrome = wch.which(name)
if chrome is not None:
return chrome
return None
def find_chrome_win():
#using edge by default since it's build on chromium
edge_path = "C:\Program Files (x86)\Microsoft\Edge\Application\msedge.exe"
if os.path.exists(edge_path):
return edge_path
import winreg as reg
reg_path = r'SOFTWARE\Microsoft\Windows\CurrentVersion\App Paths\chrome.exe'
chrome_path = None
last_exception = None
for install_type in reg.HKEY_CURRENT_USER, reg.HKEY_LOCAL_MACHINE:
try:
reg_key = reg.OpenKey(install_type, reg_path, 0, reg.KEY_READ)
chrome_path = reg.QueryValue(reg_key, None)
reg_key.Close()
except WindowsError as e:
last_exception = e
else:
if chrome_path and len(chrome_path) > 0:
break
# Only log some debug info if we failed completely to find chrome
if not chrome_path:
logging.exception(last_exception)
logging.error("Failed to detect chrome location from registry")
else:
logging.info(f"Chrome path detected as: {chrome_path}")
return chrome_path
def get_default_chrome_path():
"""
Credits for get_instance_path, find_chrome_mac, find_chrome_linux, find_chrome_win funcs
got from: https://github.com/ChrisKnott/Eel/blob/master/eel/chrome.py
"""
if sys.platform in ['win32', 'win64']:
return find_chrome_win()
elif sys.platform in ['darwin']:
return find_chrome_mac()
elif sys.platform.startswith('linux'):
return find_chrome_linux()
# class FlaskwebguiDjangoMiddleware:
# def __init__(self, get_response=None):
# self.get_response = get_response
# def __call__(self, request):
# response = self.get_response(request)
# return response
current_timestamp = None
class FlaskUI:
def __init__(self,
app,
start_server='flask',
width=800,
height=600,
maximized=False,
fullscreen=False,
browser_path=None,
socketio=None,
on_exit=None,
idle_interval=5
) -> None:
self.app = app
self.start_server = str(start_server).lower()
self.width = str(width)
self.height= str(height)
self.fullscreen = fullscreen
self.maximized = maximized
self.browser_path = browser_path if browser_path else get_default_chrome_path()
self.socketio = socketio
self.on_exit = on_exit
self.idle_interval = idle_interval
self.set_url()
self.webserver_dispacher = {
"flask": self.start_flask,
"flask-socketio": self.start_flask_socketio,
"django": self.start_django,
"fastapi": self.start_fastapi
}
self.supported_frameworks = list(self.webserver_dispacher.keys())
self.lock = Lock()
def update_timestamp(self):
self.lock.acquire()
global current_timestamp
current_timestamp = datetime.now()
self.lock.release()
def run(self):
"""
Starts 3 threads one for webframework server and one for browser gui
"""
self.update_timestamp()
t_start_webserver = Thread(target=self.start_webserver)
t_open_chromium = Thread(target=self.open_chromium)
t_stop_webserver = Thread(target=self.stop_webserver)
threads = [t_start_webserver, t_open_chromium, t_stop_webserver]
for t in threads: t.start()
for t in threads: t.join()
def set_url(self):
with socketserver.TCPServer(("localhost", 0), None) as s:
free_port = s.server_address[1]
self.host = '127.0.0.1'
self.port = free_port
self.localhost = f"http://{self.host}:{self.port}"
def start_webserver(self):
if isfunction(self.start_server):
self.start_server()
if self.start_server not in self.supported_frameworks:
raise Exception(f"'start_server'({self.start_server}) not in {','.join(self.supported_frameworks)} and also not a function which starts the webframework")
self.webserver_dispacher[self.start_server]()
def add_flask_middleware(self):
@self.app.after_request
def keep_alive_after_request(response):
self.keep_server_running()
return response
@self.app.route("/flaskwebgui-keep-server-alive")
def keep_alive_pooling():
self.keep_server_running()
return "ok"
def start_flask(self):
self.add_flask_middleware()
try:
import waitress
waitress.serve(self.app, host=self.host, port=self.port)
except:
self.app.run(host=self.host, port=self.port)
def start_flask_socketio(self):
self.add_flask_middleware()
self.socketio.run(self.app, host=self.host, port=self.port)
def start_django(self):
try:
import waitress
waitress.serve(self.app, host=self.host, port=self.port)
except:
try:#linux and mac
os.system(f"python3 manage.py runserver {self.port}")
except:#windows
os.system(f"python manage.py runserver {self.port}")
def add_fastapi_middleware(self):
@self.app.middleware("http")
async def keep_alive_after_request(request, call_next):
response = await call_next(request)
self.keep_server_running()
return response
@self.app.route("/flaskwebgui-keep-server-alive")
async def keep_alive_pooling():
self.keep_server_running()
return "ok"
def start_fastapi(self):
import uvicorn
self.add_fastapi_middleware()
uvicorn.run(self.app, host=self.host, port=self.port, log_level="warning")
def open_chromium(self):
"""
Open the browser selected (by default it looks for chrome)
# https://peter.sh/experiments/chromium-command-line-switches/
"""
logging.info(f"Opening browser at {self.localhost}")
temp_profile_dir = os.path.join(tempfile.gettempdir(), "flaskwebgui")
if self.browser_path:
launch_options = None
if self.fullscreen:
launch_options = ["--start-fullscreen"]
elif self.maximized:
launch_options = ["--start-maximized"]
else:
launch_options = [f"--window-size={self.width},{self.height}"]
options = [
self.browser_path,
f"--user-data-dir={temp_profile_dir}",
"--new-window",
"--no-sandbox",
"--no-first-run",
# "--window-position=0,0"
] + launch_options + [f'--app={self.localhost}']
sps.Popen(options, stdout=sps.PIPE, stderr=sps.PIPE, stdin=sps.PIPE)
else:
import webbrowser
webbrowser.open_new(self.localhost)
def stop_webserver(self):
#TODO add middleware for Django
if self.start_server == 'django':
logging.info("Middleware not implemented (yet) for Django.")
return
while True:
self.lock.acquire()
global current_timestamp
delta_seconds = (datetime.now() - current_timestamp).total_seconds()
self.lock.release()
if delta_seconds > self.idle_interval:
logging.info("App closed")
break
time.sleep(self.idle_interval)
if isfunction(self.on_exit):
logging.info(f"Executing {self.on_exit.__name__} function...")
self.on_exit()
logging.info("Closing connections...")
os.kill(os.getpid(), 9)
def keep_server_running(self):
self.update_timestamp()
return "Ok"
|
TestPersistentDB.py
|
"""Test the PersistentDB module.
Note:
We don't test performance here, so the test does not predicate
whether PersistentDB actually will help in improving performance or not.
We also assume that the underlying SteadyDB connections are tested.
Copyright and credit info:
* This test was contributed by Christoph Zwerschke
"""
import sys
import unittest
# The TestSteadyDB module serves as a mock object for the DB-API 2 module:
sys.path.insert(1, '../..')
from DBUtils.Tests import TestSteadyDB as dbapi
from DBUtils.PersistentDB import PersistentDB, local
__version__ = '1.2'
class TestPersistentDB(unittest.TestCase):
def setUp(self):
dbapi.threadsafety = 1
def test0_CheckVersion(self):
from DBUtils import __version__ as DBUtilsVersion
self.assertEqual(DBUtilsVersion, __version__)
from DBUtils.PersistentDB import __version__ as PersistentDBVersion
self.assertEqual(PersistentDBVersion, __version__)
self.assertEqual(PersistentDB.version, __version__)
def test1_NoThreadsafety(self):
from DBUtils.PersistentDB import NotSupportedError
for dbapi.threadsafety in (None, 0):
self.assertRaises(NotSupportedError, PersistentDB, dbapi)
def test2_Close(self):
for closeable in (False, True):
persist = PersistentDB(dbapi, closeable=closeable)
db = persist.connection()
self.assertTrue(db._con.valid)
db.close()
self.assertTrue(closeable ^ db._con.valid)
db.close()
self.assertTrue(closeable ^ db._con.valid)
db._close()
self.assertTrue(not db._con.valid)
db._close()
self.assertTrue(not db._con.valid)
def test3_Connection(self):
persist = PersistentDB(dbapi)
db = persist.connection()
db_con = db._con
self.assertTrue(db_con.database is None)
self.assertTrue(db_con.user is None)
db2 = persist.connection()
self.assertEqual(db, db2)
db3 = persist.dedicated_connection()
self.assertEqual(db, db3)
db3.close()
db2.close()
db.close()
def test4_Threads(self):
numThreads = 3
persist = PersistentDB(dbapi, closeable=True)
try:
from Queue import Queue, Empty
except ImportError: # Python 3
from queue import Queue, Empty
queryQueue, resultQueue = [], []
for i in range(numThreads):
queryQueue.append(Queue(1))
resultQueue.append(Queue(1))
def runQueries(i):
this_db = persist.connection()
while 1:
try:
try:
q = queryQueue[i].get(1, 1)
except TypeError:
q = queryQueue[i].get(1)
except Empty:
q = None
if not q:
break
db = persist.connection()
if db != this_db:
r = 'error - not persistent'
else:
if q == 'ping':
r = 'ok - thread alive'
elif q == 'close':
db.close()
r = 'ok - connection closed'
else:
cursor = db.cursor()
cursor.execute(q)
r = cursor.fetchone()
cursor.close()
r = '%d(%d): %s' % (i, db._usage, r)
try:
resultQueue[i].put(r, 1, 1)
except TypeError:
resultQueue[i].put(r, 1)
db.close()
from threading import Thread
threads = []
for i in range(numThreads):
thread = Thread(target=runQueries, args=(i,))
threads.append(thread)
thread.start()
for i in range(numThreads):
try:
queryQueue[i].put('ping', 1, 1)
except TypeError:
queryQueue[i].put('ping', 1)
for i in range(numThreads):
try:
r = resultQueue[i].get(1, 1)
except TypeError:
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(0): ok - thread alive' % i)
self.assertTrue(threads[i].isAlive())
for i in range(numThreads):
for j in range(i + 1):
try:
queryQueue[i].put('select test%d' % j, 1, 1)
r = resultQueue[i].get(1, 1)
except TypeError:
queryQueue[i].put('select test%d' % j, 1)
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(%d): test%d' % (i, j + 1, j))
try:
queryQueue[1].put('select test4', 1, 1)
except TypeError:
queryQueue[1].put('select test4', 1)
try:
r = resultQueue[1].get(1, 1)
except TypeError:
r = resultQueue[1].get(1)
self.assertEqual(r, '1(3): test4')
try:
queryQueue[1].put('close', 1, 1)
r = resultQueue[1].get(1, 1)
except TypeError:
queryQueue[1].put('close', 1)
r = resultQueue[1].get(1)
self.assertEqual(r, '1(3): ok - connection closed')
for j in range(2):
try:
queryQueue[1].put('select test%d' % j, 1, 1)
r = resultQueue[1].get(1, 1)
except TypeError:
queryQueue[1].put('select test%d' % j, 1)
r = resultQueue[1].get(1)
self.assertEqual(r, '1(%d): test%d' % (j + 1, j))
for i in range(numThreads):
self.assertTrue(threads[i].isAlive())
try:
queryQueue[i].put('ping', 1, 1)
except TypeError:
queryQueue[i].put('ping', 1)
for i in range(numThreads):
try:
r = resultQueue[i].get(1, 1)
except TypeError:
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(%d): ok - thread alive' % (i, i + 1))
self.assertTrue(threads[i].isAlive())
for i in range(numThreads):
try:
queryQueue[i].put(None, 1, 1)
except TypeError:
queryQueue[i].put(None, 1)
def test5_MaxUsage(self):
persist = PersistentDB(dbapi, 20)
db = persist.connection()
self.assertEqual(db._maxusage, 20)
for i in range(100):
cursor = db.cursor()
cursor.execute('select test%d' % i)
r = cursor.fetchone()
cursor.close()
self.assertEqual(r, 'test%d' % i)
self.assertTrue(db._con.valid)
j = i % 20 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db._con.num_uses, j)
self.assertEqual(db._con.num_queries, j)
def test6_SetSession(self):
persist = PersistentDB(dbapi, 3, ('set datestyle',))
db = persist.connection()
self.assertEqual(db._maxusage, 3)
self.assertEqual(db._setsession_sql, ('set datestyle',))
self.assertEqual(db._con.session, ['datestyle'])
cursor = db.cursor()
cursor.execute('set test')
cursor.fetchone()
cursor.close()
for i in range(3):
self.assertEqual(db._con.session, ['datestyle', 'test'])
cursor = db.cursor()
cursor.execute('select test')
cursor.fetchone()
cursor.close()
self.assertEqual(db._con.session, ['datestyle'])
def test7_ThreadLocal(self):
persist = PersistentDB(dbapi)
self.assertTrue(isinstance(persist.thread, local))
class threadlocal:
pass
persist = PersistentDB(dbapi, threadlocal=threadlocal)
self.assertTrue(isinstance(persist.thread, threadlocal))
def test8_PingCheck(self):
Connection = dbapi.Connection
Connection.has_ping = True
Connection.num_pings = 0
persist = PersistentDB(dbapi, 0, None, None, 0, True)
db = persist.connection()
self.assertTrue(db._con.valid)
self.assertEqual(Connection.num_pings, 0)
db.close()
db = persist.connection()
self.assertTrue(not db._con.valid)
self.assertEqual(Connection.num_pings, 0)
persist = PersistentDB(dbapi, 0, None, None, 1, True)
db = persist.connection()
self.assertTrue(db._con.valid)
self.assertEqual(Connection.num_pings, 1)
db.close()
db = persist.connection()
self.assertTrue(db._con.valid)
self.assertEqual(Connection.num_pings, 2)
persist = PersistentDB(dbapi, 0, None, None, 2, True)
db = persist.connection()
self.assertTrue(db._con.valid)
self.assertEqual(Connection.num_pings, 2)
db.close()
db = persist.connection()
self.assertTrue(not db._con.valid)
self.assertEqual(Connection.num_pings, 2)
cursor = db.cursor()
self.assertTrue(db._con.valid)
self.assertEqual(Connection.num_pings, 3)
cursor.execute('select test')
self.assertTrue(db._con.valid)
self.assertEqual(Connection.num_pings, 3)
persist = PersistentDB(dbapi, 0, None, None, 4, True)
db = persist.connection()
self.assertTrue(db._con.valid)
self.assertEqual(Connection.num_pings, 3)
db.close()
db = persist.connection()
self.assertTrue(not db._con.valid)
self.assertEqual(Connection.num_pings, 3)
cursor = db.cursor()
db._con.close()
self.assertTrue(not db._con.valid)
self.assertEqual(Connection.num_pings, 3)
cursor.execute('select test')
self.assertTrue(db._con.valid)
self.assertEqual(Connection.num_pings, 4)
Connection.has_ping = False
Connection.num_pings = 0
def test9_FailedTransaction(self):
persist = PersistentDB(dbapi)
db = persist.connection()
cursor = db.cursor()
db._con.close()
cursor.execute('select test')
db.begin()
db._con.close()
self.assertRaises(dbapi.InternalError, cursor.execute, 'select test')
cursor.execute('select test')
db.begin()
db.cancel()
db._con.close()
cursor.execute('select test')
if __name__ == '__main__':
unittest.main()
|
stockPredictor.py
|
'''
TO DO:
• add open
• add high
• add low
• make it a function that returns a data fram of open, close, low, high, and growth rate of each since the day before
•
• fix the calender so you can add more data //DONE
'''
DEBUG = True
from time import *
from sklearn.svm import SVR
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from yahoo_fin.stock_info import get_live_price, get_data
import requests, bitfinex, datetime, threading, subprocess
from pymongo import MongoClient
from os import path
''' ''' ''' '''
SYMBOL='btc'
SYMBOL+='usd'
''' ''' ''' '''
print('\n'*100)
plt.style.use('seaborn-darkgrid')
#....................................................................................
def writer(predicted):
with open(SYMBOL+'previouslyPredicted.txt', 'w') as db:
db.truncate(0)
db.write(str(predicted))
#....................................................................................
def reader():
with open(SYMBOL+'previouslyPredicted.txt') as db:
prePredict = list(db)[0]
return float(prePredict)
#....................................................................................
try: reader()
except: writer('0')
'''....................................................................................'''
def growthRate(pre, curr): return ((100*curr)/pre)-100
'''....................................................................................'''
def predict(SYMBOL, graph=False, graphTag='close'):
TIMER=time()
#....................................................................................
# '''
api_v2 = bitfinex.bitfinex_v2.api_v2()
pair = SYMBOL
bin_size = '1m'
limit = 1000
# t_start = datetime.datetime(2018, 4, 1, 0, 0)
# t_start = mktime(t_start.timetuple()) * 1000
#t_stop = datetime.datetime(2018, 4, 2, 0, 0)
#t_stop = mktime(t_stop.timetuple()) * 1000
result = api_v2.candles(symbol=pair, interval=bin_size,limit=limit)#, start=t_start, end=t_stop)
tags = ['time', 'open', 'close', 'high', 'low', 'volume']
df = pd.DataFrame(result, columns=tags)
df.drop_duplicates(inplace=True)
df['time'] = pd.to_datetime(df['time'], unit='ms')
df.set_index('time', inplace=True)
df.sort_index(inplace=True)
#print(df)
#....................................................................................
'''
df = get_data(SYMBOL)#, start_date='08-1-2020')
if len(df)>=1260: df = df.iloc[-1260:]
# '''
#....................................................................................
actual_price5 = df.tail(1)
# print(actual_price, get_live_price(SYMBOL))
#df = df.head(len(df)-1) #<-------- get rid of the last day's data
days = []
dates = []
df_days = df.index.values
for index, day in enumerate(df_days):
dates.append( str(day).replace('T00:00:00.000000000','') )
days.append([index])
'''
print(df.columns.tolist())
'''
df_adj_close5 = df.loc[:, 'close']
# df_open = df.loc[:, 'open']
# df_high = df.loc[:, 'high']
# df_low = df.loc[:, 'low']
adj_close_prices5 = [float(adj_close_price5) for adj_close_price5 in df_adj_close5]
# open_prices = [float(open_price) for open_price in df_open]
# high_prices = [float(high_price) for high_price in df_high]
# low_prices = [float(low_price) for low_price in df_low]
'''Models:'''
#....................................................................................
#lin_svr = SVR(kernel='linear', C=1000.0)
#lin_svr.fit(days, adj_close_prices)
''''''
#poly_svr = SVR(kernel='poly', C=1000.0, degree=2)
#poly_svr.fit(days, adj_close_prices)
''''''
rbf_svr5 = SVR(kernel='rbf', C=1000.0, gamma=0.15)
rbf_svr5.fit(days, adj_close_prices5)
#....................................................................................
# '''
api_v2 = bitfinex.bitfinex_v2.api_v2()
pair = SYMBOL
bin_size = '1m'
limit = 1000
# t_start = datetime.datetime(2018, 4, 1, 0, 0)
# t_start = mktime(t_start.timetuple()) * 1000
#t_stop = datetime.datetime(2018, 4, 2, 0, 0)
#t_stop = mktime(t_stop.timetuple()) * 1000
result = api_v2.candles(symbol=pair, interval=bin_size,limit=limit)#, start=t_start, end=t_stop)
tags = ['time', 'open', 'close', 'high', 'low', 'volume']
df = pd.DataFrame(result, columns=tags)
df.drop_duplicates(inplace=True)
df['time'] = pd.to_datetime(df['time'], unit='ms')
df.set_index('time', inplace=True)
df.sort_index(inplace=True)
#print(df)
#....................................................................................
'''
df = get_data(SYMBOL)#, start_date='08-1-2020')
if len(df)>=1260: df = df.iloc[-1260:]
# '''
#....................................................................................
actual_price = df.tail(1)
# print(actual_price, get_live_price(SYMBOL))
#df = df.head(len(df)-1) #<-------- get rid of the last day's data
days = []
dates = []
df_days = df.index.values
for index, day in enumerate(df_days):
dates.append( str(day).replace('T00:00:00.000000000','') )
days.append([index])
'''
print(df.columns.tolist())
'''
df_adj_close = df.loc[:, 'close']
# df_open = df.loc[:, 'open']
# df_high = df.loc[:, 'high']
# df_low = df.loc[:, 'low']
adj_close_prices = [float(adj_close_price) for adj_close_price in df_adj_close]
# open_prices = [float(open_price) for open_price in df_open]
# high_prices = [float(high_price) for high_price in df_high]
# low_prices = [float(low_price) for low_price in df_low]
'''Models:'''
#....................................................................................
#lin_svr = SVR(kernel='linear', C=1000.0)
#lin_svr.fit(days, adj_close_prices)
''''''
#poly_svr = SVR(kernel='poly', C=1000.0, degree=2)
#poly_svr.fit(days, adj_close_prices)
''''''
rbf_svr = SVR(kernel='rbf', C=1000.0, gamma=0.15)
rbf_svr.fit(days, adj_close_prices)
# with open(SYMBOL+'previouslyPredicted.txt') as db:
# for data in db: prePredict = data
# with open(SYMBOL+'previouslyPredicted.txt', 'a') as db: db.write(str(*rbf_svr.predict([[len(df)]]))+'\n')
# rbf_svr_open = SVR(kernel='rbf', C=1000.0, gamma=0.15)
# rbf_svr_open.fit(days, open_prices)
#
# rbf_svr_high = SVR(kernel='rbf', C=1000.0, gamma=0.15)
# rbf_svr_high.fit(days, high_prices)
#
# rbf_svr_low = SVR(kernel='rbf', C=1000.0, gamma=0.15)
# rbf_svr_low.fit(days, low_prices)
#....................................................................................
'''Outputs:'''
'''
print(SYMBOL.upper()+':')
day = len(df)-1
print(day)
print('RBF predict today: $', *rbf_svr.predict([[day]]))
print('RBF predict tomorrow: $', *rbf_svr.predict([[day+1]]))
print('Projected growthRate today-tomorrow: %', growthRate(*rbf_svr.predict([[day]]), *rbf_svr.predict([[day+1]])))
#print('Linear predict: ', *lin_svr.predict(day))
#print('Poly predict: ', *poly_svr.predict(day))
print('Totays closing price: $', str(actual_price[TAG]).split(' ')[1])
'''
# print(SYMBOL+':\nTime:', time()-TIMER)
#....................................................................................
'''Graph:'''
if graph:
plt.figure(figsize=(8,5))
totalDays = -59
pricePlots=adj_close_prices[totalDays:]
predictPlot=list(rbf_svr.predict(days))[totalDays:]
totalDays *= -1
totalDays += 1
plt.plot(np.arange(1,totalDays), pricePlots, color='red', label='Original '+graphTag)
# '''
print(pricePlots[-1], '<-- Actual price')
print(pricePlots[-2], '<-- Previous price')
# '''
plt.plot(np.arange(1,totalDays), predictPlot, color='blue', label='Predicted '+graphTag)
totalDays -= 1
# prePredict = reader()
# toBeSaved = str(*rbf_svr.predict([[len(df)]]))
# writer(toBeSaved)
# '''
print(*rbf_svr.predict([[len(df)]]))
print(*rbf_svr.predict([[len(df)-1]]))
print(dates[-1])
# '''
plt.plot( [1,totalDays], [*rbf_svr.predict([[len(df)]]), *rbf_svr.predict([[len(df)]])], '--', label='Predicted' )
# plt.plot( [1,totalDays], [*rbf_svr.predict([[len(df)-1]]), *rbf_svr.predict([[len(df)-1]])], '--', label='Previously Predicted' )
plt.plot( [1,totalDays], [pricePlots[-1], pricePlots[-1]], '--', label='Current Close' )
# plt.plot( [1,totalDays], [pricePlots[-2], pricePlots[-2]], '--', label='Previous Close' )
plt.plot( [1,totalDays], [*rbf_svr5.predict([[len(df)]]), *rbf_svr5.predict([[len(df)]])], '--', label='Predicted next 5 minutes' )
# plt.plot([1,7], [*rbf_svr_open.predict([[len(df)]]), *rbf_svr_open.predict([[len(df)]])], '--', label='Predicted to OPEN at $'+'{:.5}'.format(float(*rbf_svr_open.predict([[len(df)]])))+' for the next trading day')
# plt.plot([1,7], [*rbf_svr_low.predict([[len(df)]]), *rbf_svr_low.predict([[len(df)]])], '--', label='Predicted the LOWEST at $'+'{:.5}'.format(float(*rbf_svr_low.predict([[len(df)]])))+' for the next trading day')
# plt.plot([1,7], [*rbf_svr_high.predict([[len(df)]]), *rbf_svr_high.predict([[len(df)]])], '--', label='Predicted the HIGHEST at $'+'{:.5}'.format(float(*rbf_svr_high.predict([[len(df)]])))+' for the next trading day')
#plt.scatter(days, rbf_svr.predict(days), color='black', label='Tomorrows Predicted Close')
#plt.plot(days, lin_svr.predict(days), color='red', label='Linear model')
#plt.plot(days, poly_svr.predict(days), color='blue', label='Poly model')
# plt.legend()
plt.legend(loc='upper center', bbox_to_anchor=(0.5, -0.05),fancybox=True, shadow=True, ncol=5, prop={'size': 6})
plt.title(SYMBOL.upper()+'\nPredicted to '+graphTag.upper()+' at %'+str( growthRate(*rbf_svr.predict([[len(df)-1]]), *rbf_svr.predict([[len(df)]]) ) ) +'\n Predicted to '+graphTag.upper()+' at $'+str(growthRate(*rbf_svr5.predict([[len(df)-1]]), *rbf_svr5.predict([[len(df)]]) ))+' the next 5 minutes')
# plt.show()
plt.savefig(SYMBOL+'.pdf')
graphAddress = '/Users/iliyadehsarvi/Desktop/summer2020Project/'+SYMBOL+'.pdf'
subprocess.call(['osascript', '-e', 'tell application "Preview" to quit'])
subprocess.call(['open', graphAddress])
#....................................................................................
# return ['Last Trading Day', *rbf_svr.predict([[len(df)-1]])],\
# ['Next Trading Day', *rbf_svr.predict([[len(df)]])],\
# ['Last-Next growthRate', growthRate( *rbf_svr.predict([[len(df)-1]]),*rbf_svr.predict([[len(df)]]) )]
print(SYMBOL, growthRate( *rbf_svr.predict([[len(df)-1]]),*rbf_svr.predict([[len(df)]]) ), pricePlots[-1])
return [SYMBOL, growthRate( *rbf_svr.predict([[len(df)-1]]),*rbf_svr.predict([[len(df)]]) ), pricePlots[-1]]
# return ['Last Trading Day',\
# *rbf_svr_open.predict([[len(df)-1]]), *rbf_svr_high.predict([[len(df)-1]]),\
# *rbf_svr_low.predict([[len(df)-1]]), *rbf_svr.predict([[len(df)-1]])],\
# ['Next Trading Day',\
# *rbf_svr_open.predict([[len(df)]]), *rbf_svr_high.predict([[len(df)]]),\
# *rbf_svr_low.predict([[len(df)]]), *rbf_svr.predict([[len(df)]])],\
# ['Last-Next growthRate',\
# growthRate( *rbf_svr_open.predict([[len(df)-1]]),*rbf_svr_open.predict([[len(df)]]) ),\
# growthRate( *rbf_svr_high.predict([[len(df)-1]]),*rbf_svr_high.predict([[len(df)]]) ),\
# growthRate( *rbf_svr_low.predict([[len(df)-1]]),*rbf_svr_low.predict([[len(df)]]) ),\
# growthRate( *rbf_svr.predict([[len(df)-1]]),*rbf_svr.predict([[len(df)]]) )]
#
#return ['open', 'high', 'low', 'close'], ['open', 'high', 'low', 'close'], [Last-Next growthRate,'%open', '%high', '%low', '%close']
'''....................................................................................'''
s = time()
symbolList = ['btcusd', 'ethusd', 'bchusd', 'ltcusd', 'etcusd', 'bsvusd', 'xrpusd']#, 'qtumusd']
#print(predict('ethusd',True, 'close'))
#'''
while True:
# try:
pr = predict('btcusd', True)
# if pr[1] >= 0.1:
print(*pr)
sleep(30)
# except:
#
# break
#'''
#threads = []
#for i, data in enumerate(symbolList):
# try:
# threads.append(threading.Thread(target=predict, args=(data,True,'close')))
# threads[i].start()
# except: continue
#for thread in threads: thread.join()
#print(time()-s)
|
main.py
|
import pydlbot_ui as Ui
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
import time
import threading
if __name__ == '__main__':
p = threading.Thread(target=main)
p.start()
for i in range(3):
t = threading.Thread(target=f,args=(i,))
t.start()
def main():
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui.Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
def f(id):
print ("thread function",id)
return
|
hypothesis_test.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
from functools import partial, reduce
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import unittest
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto.caffe2_pb2 import TensorProto
dyndep.InitOpsLibrary('@/caffe2/caffe2/fb/optimizers:sgd_simd_ops')
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@st.composite
def _tensor_and_prefix(draw, dtype, elements, min_dim=1, max_dim=4, **kwargs):
dims_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
extra_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
return (draw(hu.arrays(dims_ + extra_, dtype, elements)),
draw(hu.arrays(extra_, dtype, elements)))
def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
elements=None, **kwargs):
""" generates a tensor and a list of indices of larger tensor of same dim"""
data_dims_ = st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim)
original_dim = st.integers(min_value=2, max_value=10)
return st.tuples(data_dims_, original_dim).flatmap(lambda pair: st.tuples(
st.just(pair[1]), # original dimension
hu.arrays(pair[0], dtype, elements), # data tensor
hu.arrays(pair[0][0], dtype=np.int64, elements=st.integers(
min_value=0, max_value=pair[1] - 1)),
))
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
np.int16: core.DataType.INT16,
np.int64: core.DataType.INT64,
np.float64: core.DataType.DOUBLE,
}
def _dtypes(dtypes=None):
dtypes = dtypes if dtypes else [np.int32, np.int64, np.float32]
return st.sampled_from(dtypes)
def _test_binary(name, ref, filter_=None, gcs=hu.gcs,
test_gradient=False, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(
lambda dtype: hu.tensors(
n=2, dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
out=st.sampled_from(('Y', 'X1', 'X2') if allow_inplace else ('Y',)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary(self, inputs, out, gc, dc):
op = core.CreateOperator(name, ["X1", "X2"], [out])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
# We only do gradient check with float32 types.
if test_gradient and X1.dtype == np.float32:
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
return test_binary
def _test_binary_broadcast(name, ref, filter_=None,
gcs=hu.gcs, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(lambda dtype: _tensor_and_prefix(
dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
in_place=(st.booleans() if allow_inplace else st.just(False)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary_broadcast(self, inputs, in_place, gc, dc):
op = core.CreateOperator(
name, ["X1", "X2"], ["X1" if in_place else "Y"], broadcast=1)
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
def cast_ref(x, y):
return (np.array(ref(x, y)[0], dtype=x.dtype), )
# gradient not implemented yet
# self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], cast_ref)
return test_binary_broadcast
class TestOperators(hu.HypothesisTestCase):
def test_comparison_ops(self):
ops = {"LT": lambda x1, x2: [x1 < x2],
"LE": lambda x1, x2: [x1 <= x2],
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in ops.items():
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_row_mul(self, inputs, gc, dc):
op = core.CreateOperator("RowMul", ["X1", "X2"], ["Y"])
X1, Xtmp = inputs
X2 = Xtmp[:, 0]
def ref(x, y):
ret = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(y.size):
ret[i, ] = x[i, ] * y[i]
return [ret]
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
@given(inputs=hu.tensors(n=2), **hu.gcs_cpu_only)
def test_max(self, inputs, gc, dc):
op = core.CreateOperator("Max", ["X1", "X2"], ["Y"])
X1, X2 = inputs
# Make X1 and X2 far from each other, since X1=X2 is not differentiable
# and the step size of gradient checker is 0.05
X1[np.logical_and(X1 >= X2 - 0.05, X1 <= X2)] -= 0.05
X1[np.logical_and(X1 <= X2 + 0.05, X1 >= X2)] += 0.05
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
def elementwise_max(X, Y):
return [np.maximum(X, Y)]
self.assertReferenceChecks(gc, op, [X1, X2], elementwise_max)
def test_add(self):
def ref(x, y):
return (x + y, )
_test_binary("Add", ref, test_gradient=True)(self)
_test_binary_broadcast("Add", ref)(self)
def test_sub(self):
def ref(x, y):
return (x - y, )
# TODO(jiayq): enable gradient test when implemented.
_test_binary("Sub", ref, test_gradient=True)(self)
_test_binary_broadcast("Sub", ref)(self)
def test_mul(self):
def ref(x, y):
return (x * y, )
_test_binary("Mul", ref, test_gradient=True)(self)
_test_binary_broadcast("Mul", ref)(self)
def test_div(self):
def ref(x, y):
return (x / y, )
def non_zero(x):
return abs(x) > 10e-5
def div_dtypes():
return st.sampled_from([np.float32, np.float64])
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=True,
dtypes=div_dtypes, gcs=hu.gcs_cpu_only
)(self)
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=False,
dtypes=div_dtypes
)(self)
_test_binary_broadcast(
"Div", ref, filter_=non_zero, dtypes=div_dtypes)(self)
@given(X=hu.tensor(), in_place=st.booleans(), **hu.gcs)
def test_negative(self, X, in_place, gc, dc):
op = core.CreateOperator("Negative", ["X"],
["Y" if not in_place else "X"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_tanh(self, X, gc, dc):
op = core.CreateOperator("Tanh", "X", "Y")
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_averaged_loss(self, X, gc, dc):
op = core.CreateOperator("AveragedLoss", ["X"], ["loss"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator("Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], softsign)
if inplace:
with self.assertRaises(Exception):
self.assertGradientChecks(gc, op, [X], 0, [0])
else:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(
device_options=st.lists(
min_size=2,
max_size=4,
elements=st.sampled_from(hu.expanded_device_options)),
set_seed=st.booleans())
def test_random_seed_behaviour(self, device_options, set_seed):
# Assume we are always operating on CUDA or CPU, since RNG is
# inconsistent between CPU and GPU.
device_options = copy.deepcopy(device_options)
assume(len({do.device_type for do in device_options}) == 1)
if set_seed:
for do in device_options:
do.random_seed = 1000
def run(do):
# Reset each time because 'Y' may already exist in the workspace
# on a different device
workspace.ResetWorkspace()
ws = workspace.C.Workspace()
op = core.CreateOperator(
"XavierFill", [], ["Y"],
device_option=do,
shape=[2])
ws.run(op)
return ws.blobs["Y"].fetch()
ys = [run(do) for do in device_options]
for y in ys[1:]:
if set_seed:
np.testing.assert_array_equal(ys[0], y)
else:
with self.assertRaises(AssertionError):
np.testing.assert_array_equal(ys[0], y)
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
engine=st.sampled_from(["", "PACKED"]),
**hu.gcs)
def test_fully_connected_axis(self, axis, num_output, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
def prod(xs):
p = 1
for x in xs:
p *= x
return p
K = prod(list(X.shape)[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
b = np.random.randn(N).astype(np.float32)
op = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
engine=engine,
axis=axis)
for name, param in [("X", X), ("W", W), ("b", b)]:
self.ws.create_blob(name).feed(param)
self.ws.run(op)
Y = self.ws.blobs["Y"].fetch()
self.assertEqual(list(Y.shape), list(X.shape)[:axis] + [N])
inputs = [X, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for param, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, param, [0])
@unittest.skipIf(not workspace.has_gpu_support,
"Skipping test due to no gpu present.")
@given(hidden_size=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
bidirectional=st.booleans(),
rnn_mode=st.sampled_from(["lstm"]), # TODO: "gru"
input_mode=st.sampled_from(["linear"]),
dropout=st.floats(min_value=1.0, max_value=1.0),
T=st.integers(min_value=2, max_value=6),
N=st.integers(min_value=1, max_value=4),
D=st.integers(min_value=1, max_value=4))
def test_recurrent(self, hidden_size, num_layers, bidirectional, rnn_mode,
input_mode, dropout, T, N, D):
# Random seed, this one happens to pass
seed = 1234
np.random.seed(seed)
input_weight_size = hidden_size * D
recurrent_weight_size = hidden_size * hidden_size
input_bias_size = hidden_size
recurrent_bias_size = hidden_size
num_directions = 2 if bidirectional else 1
total_sz = 4 * (input_weight_size + recurrent_weight_size +
input_bias_size + recurrent_bias_size) * num_layers
total_sz *= num_directions
W = np.random.rand(total_sz).astype(np.float32)
self.ws.create_blob("WEIGHT").feed(W, device_option=hu.gpu_do)
op = core.CreateOperator(
"Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=hidden_size,
bidirectional=bidirectional,
rnn_mode=rnn_mode,
dropout=dropout,
input_mode=input_mode,
num_layers=num_layers,
seed=seed,
engine="CUDNN")
X = np.random.randn(T, N, D).astype(np.float32)
self.ws.create_blob("INPUT").feed(X, device_option=hu.gpu_do)
W = self.ws.blobs["WEIGHT"].fetch()
H = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32)
C = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32) if rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
input_idxs = [i for (i, _) in enumerate(inputs)] \
if rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
self.assertGradientChecks(
hu.gpu_do, op, inputs, input_idx, [0])
@given(ndim=st.integers(1, 4),
axis=st.integers(0, 3),
num_inputs=st.integers(2, 4), **hu.gcs)
def test_depth_concat(self, ndim, axis, num_inputs, gc, dc):
assume(axis < ndim)
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7][:ndim]
individual_dims = [1, 2, 3, 4, 5][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[axis] = individual_dims[i]
inputs.append(np.random.randn(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
axis=axis)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(num_inputs=st.integers(2, 4),
order=st.sampled_from([("NCHW", 1), ("NHWC", 3)]),
**hu.gcs)
def test_depth_concat_with_order(self, num_inputs, order, gc, dc):
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7]
individual_dims = [1, 2, 3, 4][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[order[1]] = individual_dims[i]
inputs.append(np.random.rand(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
order=order[0])
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(X=hu.arrays(dims=[5, 2],
elements=st.floats(min_value=0.0, max_value=10.0)),
**hu.gcs_cpu_only)
def test_last_n_windows(self, X, gc, dc):
workspace.FeedBlob('input', X)
collect_net = core.Net('collect_net')
collect_net.LastNWindowCollector(
['input'],
['output'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_data',
[collect_net], num_iter=2))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
inputs = workspace.FetchBlob('input')
new_output = np.zeros([7, inputs.shape[1]])
for i in range(inputs.shape[0] * 2):
new_output[i % 7] = inputs[i % inputs.shape[0]]
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)
@given(batch_size=st.integers(1, 3),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
NCHW_TO_NHWC = (0, 2, 3, 1)
NHWC_TO_NCHW = (0, 3, 1, 2)
COL_NHWC_TO_NCHW = (4, 2, 3, 0, 1)
N = batch_size
C = channels
H = size
W = size
out_h = int((H + (2 * pad) - dkernel) / stride + 1)
out_w = int((W + (2 * pad) - dkernel) / stride + 1)
im_nchw = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
im_nhwc = im_nchw.transpose(NCHW_TO_NHWC)
op_im2col_nchw = core.CreateOperator(
"Im2Col",
["im_nchw"], ["col_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_im2col_nhwc = core.CreateOperator(
"Im2Col",
["im_nhwc"], ["col_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.create_blob("im_nchw").feed(im_nchw, device_option=gc)
self.ws.create_blob("im_nhwc").feed(im_nhwc, device_option=gc)
self.ws.run(op_im2col_nchw)
self.ws.run(op_im2col_nhwc)
# there is probably a clever way to spell this in np
col_nchw = self.ws.blobs["col_nchw"].fetch()
col_nhwc = self.ws.blobs["col_nhwc"].fetch()
col_nchw_ = col_nchw.reshape(N, C, kernel, kernel, out_h, out_w)
col_nhwc_ = col_nhwc.reshape(N, out_h, out_w, kernel, kernel, C)
for i in range(0, N):
np.testing.assert_allclose(
col_nchw_[i],
col_nhwc_[i].transpose(COL_NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
op_col2im_nchw = core.CreateOperator(
"Col2Im",
["col_nchw", "im_nchw"],
["out_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_col2im_nhwc = core.CreateOperator(
"Col2Im",
["col_nhwc", "im_nhwc"],
["out_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.run(op_col2im_nchw)
self.ws.run(op_col2im_nhwc)
out_nchw = self.ws.blobs["out_nchw"].fetch()
out_nhwc = self.ws.blobs["out_nhwc"].fetch()
np.testing.assert_allclose(
out_nchw,
out_nhwc.transpose(NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
self.ws.create_blob("data").feed(data)
op = core.CreateOperator("Print", "data", [])
self.ws.run(op)
@given(inputs=hu.tensors(n=2),
in_place=st.booleans(),
momentum=st.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
**hu.gcs)
def test_momentum_sgd(
self, inputs, in_place, momentum, nesterov, lr, gc, dc):
grad, m = inputs
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"MomentumSGD",
["grad", "m", "lr"],
["grad" if in_place else "grad_o",
"m" if in_place else "m_o"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc)
self.assertDeviceChecks(
dc, op, [grad, m, lr], [0])
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
self.assertReferenceChecks(gc, op, [grad, m, lr], momentum_sgd)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
decay=st.floats(min_value=0.1, max_value=0.9),
momentum=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
def test_rmsprop_sgd(self, inputs, in_place, decay, momentum, lr, epsilon,
gc, dc):
grad, ms, mom = inputs
ms = np.abs(ms) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"RmsProp",
["grad", "ms", "mom", "lr"],
["grad" if in_place else "grad_o",
"ms" if in_place else "ms_o",
"mom" if in_place else "mom_o"],
momentum=momentum, decay=decay, epsilon=epsilon, device_option=gc)
self.assertDeviceChecks(dc, op, [grad, ms, mom, lr], [0])
def rmsprop(grad, ms, mom, lr):
lr = lr[0]
ms_o = ms + (1. - decay) * (np.square(grad) - ms)
mom_o = momentum * mom + lr * grad / np.sqrt(epsilon + ms_o)
grad_o = mom_o
return (grad_o, ms_o, mom_o)
self.assertReferenceChecks(gc, op, [grad, ms, mom, lr], rmsprop)
# Reference
@staticmethod
def _dense_adagrad(epsilon, w, h, grad, lr):
lr = lr[0]
h_o = h + np.square(grad)
grad_o = lr * grad / (np.sqrt(h_o) + epsilon)
w_o = w + grad_o
return (w_o, h_o)
# Reference
@staticmethod
def _dense_adam(epsilon, beta1, beta2, w, m1, m2, grad, lr, iters):
lr = lr[0]
iters = iters[0]
t = iters + 1
corrected_local_rate = lr * np.sqrt(1. - np.power(beta2, t)) / \
(1. - np.power(beta1, t))
m1_o = (beta1 * m1) + (1. - beta1) * grad
m2_o = (beta2 * m2) + (1. - beta2) * np.square(grad)
grad_o = corrected_local_rate * m1_o / \
(np.sqrt(m2_o) + epsilon)
w_o = w + grad_o
return (w_o, m1_o, m2_o)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_adagrad_sgd(self, inputs, in_place, lr, epsilon, engine,
gc, dc):
w, grad, h = inputs
h = np.abs(h) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"Adagrad",
["w", "h", "grad", "lr"],
["w" if in_place else "grad_o",
"h" if in_place else "h_o"],
epsilon=epsilon, engine=engine, device_option=gc)
self.assertDeviceChecks(dc, op, [w, h, grad, lr], [0])
self.assertReferenceChecks(gc, op, [w, h, grad, lr],
partial(self._dense_adagrad, epsilon))
@given(inputs=hu.tensors(n=3),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_adagrad_sgd(self, inputs, lr, epsilon,
engine, gc, dc):
w, grad, h = inputs
indices = np.arange(h.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
h = np.abs(h)
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"SparseAdagrad",
["param", "h", "indices", "grad", "lr"],
["param", "h"],
epsilon=epsilon,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [w, h, indices, grad, lr], [0])
def adagrad(param, h, i, grad, lr):
sw, sh = self._dense_adagrad(epsilon, param[i], h[i], grad, lr)
h[i] = sh
param[i] = sw
return (param, h)
self.assertReferenceChecks(gc, op, [w, h, indices, grad, lr], adagrad)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
beta1=st.floats(min_value=0.1, max_value=0.9),
beta2=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
iters=st.integers(min_value=1, max_value=10000),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs_cpu_only)
def test_adam_sgd(self, inputs, in_place, beta1, beta2, lr, iters, epsilon,
gc, dc):
w, grad, m1, m2 = inputs
m2 += np.abs(m2) + 0.01
lr = np.asarray([lr], dtype=np.float32)
iters = np.asarray([iters], dtype=np.int64)
op = core.CreateOperator(
"Adam",
["w", "m1", "m2", "grad", "lr", "iters"],
["w" if in_place else "w_o",
"m1" if in_place else "m1_o",
"m2" if in_place else "m2_o"],
beta1=beta1, beta2=beta2, epsilon=epsilon,
device_option=gc)
input_device_options = {"iters": hu.cpu_do}
inputs = [w, m1, m2, grad, lr, iters]
self.assertDeviceChecks(
dc, op, inputs, [0], input_device_options=input_device_options)
self.assertReferenceChecks(gc, op, inputs, partial(self._dense_adam,
epsilon, beta1, beta2),
input_device_options=input_device_options)
@given(inputs=hu.tensors(n=4),
beta1=st.floats(min_value=0.1, max_value=0.9),
beta2=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
iters=st.integers(min_value=1, max_value=10000),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs_cpu_only)
def test_sparse_adam_sgd(self, inputs, beta1, beta2, lr, iters,
epsilon, gc, dc):
w, grad, m1, m2 = inputs
indices = np.arange(m1.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
m2 += np.abs(m2) + 0.01
lr = np.asarray([lr], dtype=np.float32)
iters = np.asarray([iters], dtype=np.int64)
op = core.CreateOperator(
"SparseAdam",
["w", "m1", "m2", "indices", "grad", "lr", "iters"],
["w", "m1", "m2"],
beta1=beta1, beta2=beta2, epsilon=epsilon,
device_option=gc)
input_device_options = {"iters": hu.cpu_do}
inputs = [w, m1, m2, indices, grad, lr, iters]
self.assertDeviceChecks(
dc, op, inputs, [0], input_device_options=input_device_options)
def adam(w, m1, m2, i, grad, lr, iters):
nw, nm1, nm2 = self._dense_adam(epsilon, beta1, beta2, w[i],
m1[i], m2[i], grad, lr, iters)
w[i] = nw
m1[i] = nm1
m2[i] = nm2
return (w, m1, m2)
self.assertReferenceChecks(gc, op, inputs, adam)
# Reference
@staticmethod
def _dense_ftrl(alpha, beta, lambda1, lambda2, w, nz, g):
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
# python port of Sigrid's implementation
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
w = (np.sign(z) * lambda1 - z) / (
(beta + np.sqrt(n)) / alpha + lambda2)
w[np.abs(z) <= lambda1] = 0
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_ftrl, alpha, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd(self, inputs, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad"],
["var", "nz"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad], [0])
# Reference
def ftrl(w, nz, i, g):
sw, snz = self._dense_ftrl(alpha, beta, lambda1, lambda2,
w[i], nz[i], g)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad], ftrl)
@given(input=hu.tensor(max_value=20,
max_dim=1,
dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
with_remapping=st.booleans(),
**hu.gcs_cpu_only)
def test_unique(self, input, with_remapping, gc, dc):
op = core.CreateOperator(
"Unique",
["input"],
["unique"] + (["remapping"] if with_remapping else []),
device_option=gc)
self.assertDeviceChecks(dc, op, [input], [0])
# Validator
def unique_valid(input, unique, remapping=None):
self.assertEqual(unique.size, len(set(input)))
self.assertEqual(sorted(unique), sorted(set(input)))
if with_remapping:
self.assertEqual(remapping.shape, input.shape)
remapped = [unique[remapping[i]] for i in range(len(input))]
np.testing.assert_array_equal(remapped, input)
self.assertValidationChecks(gc, op, [input], unique_valid)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
top_k=st.integers(min_value=1, max_value=3),
**hu.gcs)
def test_accuracy(self, prediction, labels, top_k, gc, dc):
if(top_k > 1):
gc = hu.cpu_do
op = core.CreateOperator(
"Accuracy",
["prediction", "labels"],
["accuracy"],
top_k=top_k,
device_option=gc
)
def op_ref(prediction, labels, top_k):
N = prediction.shape[0]
correct = 0
for i in range(0, len(prediction)):
# we no longer have cmp function in python 3
pred_sorted = sorted([
[item, j] for j, item in enumerate(prediction[i])],
cmp=lambda x, y: int(y[0] > x[0]) - int(y[0] < x[0]))
max_ids = [x[1] for x in pred_sorted[0:top_k]]
for m in max_ids:
if m == labels[i]:
correct += 1
accuracy = correct / N
return (accuracy,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels, top_k],
reference=op_ref)
@given(target_probabilities=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.01,
max_value=1)),
**hu.gcs)
def test_perplexity(self, target_probabilities, gc, dc):
op = core.CreateOperator(
"Perplexity",
["target_probabilities"],
["perplexity"]
)
def op_ref(target_probabilities):
N = target_probabilities.shape[0]
perplexities = np.power(target_probabilities, -1.0 / N)
perplexity = reduce(lambda x, y: x * y, perplexities)
return (perplexity,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[target_probabilities],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_segment_ids(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToSegmentIds",
["lengths"],
["segment_ids"])
def op_ref(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_range_fill(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsRangeFill",
["lengths"],
["increasing_seq"])
def op_ref(lengths):
sids = []
for _, l in enumerate(lengths):
sids.extend(range(l))
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(**hu.gcs_cpu_only)
def test_segment_ids_to_ranges(self, gc, dc):
lengths = [4, 6, 3, 2, 0, 4]
op = core.CreateOperator(
"SegmentIdsToRanges",
["segment_ids"],
["ranges"])
def op_ref(segment_ids):
ranges = [np.array([0, 0], dtype=np.int32)]
prev = 0
for i, sid in enumerate(segment_ids):
while sid != prev:
prev += 1
ranges.append(np.array([i, 0], dtype=np.int32))
ranges[-1][1] += 1
return (np.array(ranges, dtype=np.int32), )
def lengths_to_segment_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=np.array(lengths_to_segment_ids(lengths), dtype=np.int32),
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_ranges(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["ranges"])
def op_ref(x):
if not x.size:
return (x.reshape((0, 2)), )
return (np.column_stack((np.concatenate(([0], np.cumsum(x)[:-1])),
x)), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
**hu.gcs)
def test_multi_class_accuracy(self, prediction, labels, gc, dc):
op = core.CreateOperator(
"MultiClassAccuracy",
["prediction", "labels"],
["accuracies", "amounts"]
)
def op_ref(prediction, labels):
N = prediction.shape[0]
D = prediction.shape[1]
accuracies = np.empty(D, dtype=float)
accuracies.fill(0)
amounts = np.empty(D, dtype=int)
amounts.fill(0)
max_ids = np.argmax(prediction, axis=1)
for i in range(0, N):
max_id = max_ids[i]
label_id = labels[i]
if max_id == label_id:
accuracies[label_id] += 1
amounts[label_id] += 1
for i in range(0, D):
amount = amounts[i]
if amount:
accuracies[i] /= amount
return (accuracies, amounts,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_segment_ids_to_lengths(self, lengths, gc, dc):
op = core.CreateOperator(
"SegmentIdsToLengths",
["segment_ids"],
["lengths"])
def lengths_to_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return sids
segment_ids = lengths_to_ids(lengths)
def ids_to_lengths(ids):
ids_length = len(ids)
if ids_length == 0:
return (np.array([], dtype=np.int32),)
lengths = []
# segment id starts with 0
prev_id = -1
tmp_length = 0
for idx in range(ids_length):
cur_id = ids[idx]
if cur_id != prev_id:
if idx != 0:
lengths.append(tmp_length)
while prev_id + 1 != cur_id:
lengths.append(0)
prev_id += 1
prev_id = cur_id
tmp_length = 0
tmp_length += 1
lengths.append(tmp_length)
return (np.array(lengths, dtype=np.int32),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(segment_ids, dtype=np.int32)],
reference=ids_to_lengths)
@given(lengths=st.lists(st.integers(min_value=1, max_value=10),
min_size=0,
max_size=10),
power=st.sampled_from([0.5, 1.0, 1.5, 2.0]),
**hu.gcs_cpu_only)
def test_lengths_to_weights(self, lengths, power, gc, dc):
op = core.CreateOperator(
"LengthsToWeights",
["lengths"],
["weights"],
power=power)
def lengths_to_weights(lengths):
weighted_length = []
for l in lengths:
weighted_length.extend(l * [1 / pow(l, power)])
return (np.array(weighted_length, dtype=float),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=lengths_to_weights)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_exp(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Exp",
["input"],
["output"]
)
def exp_ref(input_tensor):
return (np.exp(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=exp_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=1,
max_value=10000)),
**hu.gcs_cpu_only)
def test_log(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Log",
["input"],
["output"]
)
def log_ref(input_tensor):
return (np.log(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=log_ref)
self.assertGradientChecks(gc, op, [input_tensor], 0, [0])
@given(num_threads=st.integers(1, 10), # noqa
num_elements=st.integers(1, 100),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_blobs_queue_threading(self, num_threads, num_elements,
capacity, num_blobs, do):
"""
- Construct matrices of size N x D
- Start K threads
- Push all N rows into the queue of capacity C
- Pull all N rows out of the queue.
- Verify that the output matrices are permutation of the rows of the
original matrices.
"""
import threading
import Queue
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=capacity,
num_blobs=num_blobs,
device_option=do)
self.ws.run(op)
xs = [np.random.randn(num_elements, 5).astype(np.float32)
for _ in range(num_blobs)]
q = Queue.Queue()
for i in range(num_elements):
q.put([x[i] for x in xs])
def enqueue(t):
while True:
feed_blobs = ["x_{}_{}".format(i, t) for i in range(num_blobs)]
op = core.CreateOperator(
"EnqueueBlobs",
["queue"] + feed_blobs,
feed_blobs,
device_option=do)
try:
elems = q.get_nowait()
for elem, feed_blob in zip(elems, feed_blobs):
self.ws.create_blob(feed_blob).feed(
elem, device_option=do)
self.ws.run(op)
except Queue.Empty:
return
# Create all blobs before racing on multiple threads
# (blob creation is not threadsafe)
for t in range(num_threads):
for i in range(num_blobs):
self.ws.create_blob("x_{}_{}".format(i, t))
threads = [threading.Thread(target=enqueue, args=(t,))
for t in range(num_threads)]
for thread in threads:
thread.start()
for n in range(num_elements):
dequeue_blobs = ["y_{}_{}".format(i, n) for i in range(num_blobs)]
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
dequeue_blobs,
device_option=do)
self.ws.run(op)
for thread in threads:
thread.join()
op = core.CreateOperator("CloseBlobsQueue", ["queue"], [])
self.ws.run(op)
ys = [np.vstack([self.ws.blobs["y_{}_{}".format(i, n)].fetch()
for n in range(num_elements)])
for i in range(num_blobs)]
for i in range(num_blobs):
self.assertEqual(ys[i].shape, xs[i].shape)
for j in range(num_elements):
# Verify that the rows of the returned blob are a
# permutation. The order may be different due to
# different threads racing.
self.assertTrue(
any(np.array_equal(xs[i][j], ys[i][k])
for k in range(num_elements)))
@given(num_producers=st.integers(1, 10),
num_consumers=st.integers(1, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_safe_blobs_queue(self, num_producers, num_consumers,
capacity, num_blobs, do):
init_net = core.Net('init_net')
queue = init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs)
producer_steps = []
truth = 0
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queue] + blobs, blobs + [status])
count = (i + 1) * 10
step = core.execution_step(name, net, num_iter=count)
truth += count
producer_steps.append(step)
producer_exit_net = core.Net('producer_exit_net')
producer_exit_net.CloseBlobsQueue([queue], 0)
producer_step = core.execution_step('producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True),
core.execution_step('producer_exit', producer_exit_net)]
)
consumer_steps = []
counters = []
const_1 = init_net.ConstantFill([], 1, value=1.0)
for i in range(num_consumers):
name = 'consumer_%d' % i
net1 = core.Net(name)
blobs = net1.SafeDequeueBlobs([queue], num_blobs + 1)
status = blobs[-1]
net2 = core.Net(name + '_counter')
counter = init_net.ConstantFill([], 1, value=0.0)
counters.append(counter)
net2.Add([counter, const_1], counter)
consumer_steps.append(core.execution_step(
name, [net1, net2], should_stop_blob=status))
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
v = 0
for counter in counters:
v += self.ws.blobs[str(counter)].fetch().tolist()
self.assertEqual(v, truth)
@given(num_queues=st.integers(1, 5),
num_iter=st.integers(5, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3))
def test_weighted_sample_blobs_queue(
self, num_queues, num_iter, capacity, num_blobs
):
# Create BlobsQueue for each input queue
print("num_queues", num_queues)
init_net = core.Net('init_net')
queues = [
init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs
) for _ in range(num_queues)
]
# Create multiple producer nets and one producer exist net
producer_steps = []
producer_exit_nets = []
for i in range(num_queues):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for _ in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queues[i]] + blobs, blobs + [status])
exit_net = core.Net('producer_exit_%d' % i)
exit_net.CloseBlobsQueue(queues[i], 0)
producer_exit_nets.append(exit_net)
step = core.execution_step(
name, [
core.execution_step(
'producer_%d' % i, [net], num_iter=num_iter
),
core.execution_step('producer_exit_%d' % i, [exit_net]),
]
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True,
),
]
)
status_lst = []
def append(ins, outs):
status_lst.append(ins)
# Create one consumer dequeue net and one consumer exist net
consumer_net = core.Net('weight_sample_dequeue_net')
blobs = consumer_net.WeightedSampleDequeueBlobs(
queues,
num_blobs + 1,
weights=np.random.uniform(low=0.0, high=1.0, size=(num_queues,))
)
status = blobs[-1]
consumer_net.Python(append)(status)
consumer_step = core.execution_step(
'consumer',
[
core.execution_step(
'consumer', [consumer_net], should_stop_blob=status
),
core.execution_step('producer_exit', producer_exit_nets)
]
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [producer_step, consumer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
assert len(status_lst) >= num_iter + 1
assert len(status_lst) <= num_iter * num_queues + 1
@given(
data=hu.tensor(),
**hu.gcs_cpu_only)
def test_squeeze_expand_dims(self, data, gc, dc):
dims = [0, 0]
if len(data.shape) > 2:
dims.append(2)
op = core.CreateOperator(
"ExpandDims",
["data"],
["expanded"],
dims=dims)
def expand_dims_ref(data, *args, **kw):
inc_dims = list(set(dims))
inc_dims.sort()
r = data
for dim in inc_dims:
r = np.expand_dims(r, axis=dim)
return (r, )
def squeeze_ref(data, *args, **kw):
dec_dims = list(set(dims))
dec_dims.sort(reverse=True)
r = data
for dim in dec_dims:
r = np.squeeze(r, axis=dim)
return (r, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data],
reference=expand_dims_ref,
output_to_grad='expanded',
grad_reference=squeeze_ref)
@given(**hu.gcs_cpu_only)
def test_tt_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
inp_sizes = [2, 2, 2, 2]
out_sizes = [2, 2, 2, 2]
tt_ranks = [1, 3, 3, 3, 1]
op = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=tt_ranks,
)
X = np.expand_dims(
np.random.rand(16).astype(np.float32), axis=0)
b = np.array([0] * 16).astype(np.float32)
cores = tt_core.init_tt_cores(inp_sizes, out_sizes, tt_ranks)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("b").feed(b)
self.ws.create_blob("cores").feed(cores)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
Y = Y.reshape([16])
golden = np.array([-9.51763490e-07, -1.28442286e-06,
-2.86281141e-07, 2.28865644e-07,
-1.96180017e-06, -1.78920531e-06,
9.31094666e-07, -2.04273989e-07,
1.70017107e-06, 1.64845711e-06,
-1.06099132e-06, -4.69111137e-07,
6.57552358e-08, -1.28942040e-08,
-2.29114004e-07, -1.04262714e-06])
# This golden array is dependent on the specified inp_sizes, out_sizes,
# tt_ranks, and seed. Changing these will cause the test to fail.
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=1e-10)
@given(num_workers=st.integers(1, 10),
net_type=st.sampled_from(
["simple", "dag"] +
(["async_dag"] if workspace.has_gpu_support else [])),
do=st.sampled_from(hu.device_options))
def test_dag_net_forking(self, net_type, num_workers, do):
from caffe2.python.cnn import CNNModelHelper
m = CNNModelHelper()
n = 10
d = 2
depth = 2
iters = 5
np.random.seed(1701)
# Build a binary tree of FC layers, summing at each node.
for i in reversed(range(depth)):
for j in range(2 ** i):
bottom_1 = "{}_{}".format(i + 1, 2 * j)
bottom_2 = "{}_{}".format(i + 1, 2 * j + 1)
mid_1 = "{}_{}_m".format(i + 1, 2 * j)
mid_2 = "{}_{}_m".format(i + 1, 2 * j + 1)
top = "{}_{}".format(i, j)
m.FC(
bottom_1, mid_1,
dim_in=d, dim_out=d,
weight_init=m.ConstantInit(np.random.randn()),
bias_init=m.ConstantInit(np.random.randn()))
m.FC(
bottom_2, mid_2,
dim_in=d, dim_out=d,
weight_init=m.ConstantInit(np.random.randn()),
bias_init=m.ConstantInit(np.random.randn()))
m.net.Sum([mid_1, mid_2], top)
m.net.SquaredL2Distance(["0_0", "label"], "xent")
m.net.AveragedLoss("xent", "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.Proto().device_option.CopyFrom(do)
m.param_init_net.Proto().device_option.CopyFrom(do)
m.Proto().type = net_type
m.Proto().num_workers = num_workers
self.ws.run(m.param_init_net)
print(str(m.Proto()))
def run():
import numpy as np
np.random.seed(1701)
input_blobs = ["{}_{}".format(depth, j) for j in range(2 ** depth)]
for input_blob in input_blobs:
self.ws.create_blob(input_blob).feed(
np.random.randn(n, d).astype(np.float32),
device_option=do)
self.ws.create_blob("label").feed(
np.random.randn(n, d).astype(np.float32),
device_option=do)
self.ws.run(m.net)
gradients = [
self.ws.blobs[str(input_to_grad[input_blob])].fetch()
for input_blob in input_blobs]
return gradients
outputs = [run() for _ in range(iters)]
for output in outputs[1:]:
np.testing.assert_array_equal(outputs[0], output)
self.assertAlmostEqual(np.sum(np.square(output)), 91.81752,
delta=1e-2)
@given(input=hu.tensor(min_dim=2, max_dim=6, dtype=np.int32,
elements=st.integers(min_value=0,
max_value=2**32 - 1)),
slice_dim=st.integers(),
a=st.integers(),
b=st.integers(),
is_empty=st.booleans(),
**hu.gcs_cpu_only)
def test_slice(self, input, slice_dim, a, b, is_empty, gc, dc):
slice_dim = slice_dim % len(input.shape)
if (is_empty):
input = np.random.rand(*([0] + list(input.shape))).astype(np.int32)
slice_dim += 1
a = a % input.shape[slice_dim]
b = b % input.shape[slice_dim] + 1
start_vec = np.zeros(len(input.shape), dtype=np.int32)
end_vec = np.ones(len(input.shape), dtype=np.int32) * -1
start_vec[slice_dim] = min(a, b)
end_vec[slice_dim] = max(a, b)
op = core.CreateOperator(
"Slice",
["input", "start", "end"],
["output"])
def slice_ref(x, s, e):
if len(s.shape) == 0:
return x
slc = [slice(si, None if ei == -1 else ei) for si, ei in zip(s, e)]
return (x[slc], )
self.assertReferenceChecks(gc, op, [input, start_vec, end_vec],
slice_ref)
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_shape(self, data, gc, dc):
op = core.CreateOperator("Shape", ["data"], ["shape"])
self.assertReferenceChecks(gc, op, [data], lambda x: (x.shape, ))
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_has_elements(self, data, gc, dc):
op = core.CreateOperator("HasElements", ["data"], ["has_elements"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) > 0, ))
op = core.CreateOperator("IsEmpty", ["data"], ["is_empty"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) == 0, ))
@given(initial_iters=st.integers(0, 100),
max_iters=st.integers(0, 100))
def test_should_stop_as_criteria_net_execution_step(
self, initial_iters, max_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
self.ws.create_blob("num_iters").feed(
np.asarray([max_iters]).astype(np.int64))
criteria_net = core.Net("criteria")
criteria_net.GE(["iter", "num_iters"], ["stop"])
criteria_net.Proto().external_output.extend(["stop"])
plan = core.Plan('plan')
plan.AddStep(core.execution_step(
'step', [criteria_net, net],
should_stop_blob=core.BlobReference("stop")))
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], max(initial_iters, max_iters))
def test_disabled_execution_step(self):
def createNets(i, disabled):
should_stop = 'should_stop_{}'.format(i)
output = 'output_{}'.format(i)
# init content and stop signal
init = core.Net("init_{}".format(i))
init.ConstantFill(
[],
[output],
shape=[1],
value=0.0
)
init.Cast([output], [should_stop], to='bool')
# decide if disabled or not
criterion = core.Net("criterion_{}".format(i))
tmp = criterion.ConstantFill(
[],
shape=[1],
value=1.0 if disabled else 0.0
)
criterion.Cast([tmp], [should_stop], to='bool')
criterion.Proto().external_output.extend([should_stop])
# the body net is just to turn a 0 blob to 1
net = core.Net("net_{}".format(i))
net.ConstantFill(
[],
[output],
shape=[1],
value=1.0
)
# always end the loop
ender = core.Net("ender_{}".format(i))
tmp = ender.ConstantFill(
[],
shape=[1],
value=1.0
)
ender.Cast([tmp], [should_stop], to='bool')
ender.Proto().external_output.extend([should_stop])
return [init, criterion, net, ender]
nets = [createNets(1, False),
createNets(2, True),
createNets(3, False)]
steps = [
core.execution_step(
'step_1', nets[0],
should_stop_blob=core.BlobReference('should_stop_1')),
core.execution_step(
'step_2', nets[1],
should_stop_blob=core.BlobReference('should_stop_2')),
core.execution_step('step_3', nets[2])
]
expected = [1.0, 0.0, 1.0]
plan = core.Plan('plan')
plan.AddStep(core.execution_step('all_steps', steps, num_iter=3))
self.ws.run(plan)
for i, _ in enumerate(nets):
self.assertEqual(
self.ws.blobs['output_{}'.format(i + 1)].fetch()[0],
expected[i])
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100))
def test_iter_count_with_execution_step(self, initial_iters, num_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
step = core.ExecutionStep("step", [net])
step.SetIter(num_iters)
plan = core.Plan("plan")
plan.AddStep(step)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters)
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100),
num_nets=st.integers(0, 5))
def test_atomic_iter_with_concurrent_steps(self, initial_iters, num_iters,
num_nets):
init_net = core.Net("init_net")
iter_mutex = init_net.CreateMutex([], ["iter_mutex"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
concurrent_steps = core.ExecutionStep("concurrent_steps",
num_iter=num_iters)
for i in range(num_nets):
net = core.Net("net_{}".format(i))
net.AtomicIter([iter_mutex, "iter"], ["iter"])
step = core.ExecutionStep("step", [net])
concurrent_steps.AddSubstep(step)
concurrent_steps.SetConcurrentSubsteps(True)
plan = core.Plan("plan")
plan.AddStep(concurrent_steps)
self.ws.run(init_net)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters * num_nets)
@given(a=hu.tensor(),
src=st.sampled_from(_NUMPY_TYPE_TO_ENUM.keys()),
dst=st.sampled_from(_NUMPY_TYPE_TO_ENUM.keys()),
use_name=st.booleans(),
**hu.gcs)
def test_cast(self, a, src, dst, use_name, gc, dc):
a = a.astype(src)
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)
def ref(data):
return [data.astype(dst)]
to = _NUMPY_TYPE_TO_ENUM[dst]
if use_name:
to = TensorProto.DataType.Name(to).lower()
op = core.CreateOperator('Cast', ["X"], ["Y"], to=to)
self.assertDeviceChecks(dc, op, [a], [0])
out, = self.assertReferenceChecks(gc, op, [a], ref)
self.assertEqual(dst, out.dtype)
@given(a=hu.tensor(),
eps=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_logit(self, a, eps, gc, dc):
def ref(data):
data = np.clip(data, eps, 1.0 - eps)
return (np.log(data / (1 - data)), )
op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(a=hu.tensor(elements=st.floats(allow_nan=True)),
value=st.floats(min_value=-10, max_value=10),
**hu.gcs_cpu_only)
def test_replace_nan(self, a, value, gc, dc):
def ref(data):
out = np.copy(data)
out[np.isnan(data)] = value
return (out, )
op = core.CreateOperator('ReplaceNaN', ["X"], ["Y"], value=value)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
has_extra_shape=st.booleans(),
extra_shape=st.lists(
min_size=1, max_size=5, elements=st.integers(1, 5)),
**hu.gcs)
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = data.item(0)
gt_shape = data.shape
inputs = [data]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
if has_input:
if has_extra_shape:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
extra_shape=extra_shape,
value=value)
gt_shape += tuple(extra_shape)
else:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
value=value)
else:
op = core.CreateOperator('ConstantFill', [], ["Y"],
dtype=enum_type,
value=value,
shape=list(gt_shape))
inputs = []
def ref(inputs=None):
outputs = np.full(shape=gt_shape, fill_value=value, dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(t=st.integers(1, 5),
n=st.integers(1, 5),
d=st.integers(1, 5))
def test_elman_recurrent_network(self, t, n, d):
from caffe2.python import cnn
np.random.seed(1701)
step_net = cnn.CNNModelHelper(name="Elman")
# TODO: name scope external inputs and outputs
step_net.Proto().external_input.extend(
["input_t", "seq_lengths", "timestep",
"hidden_t_prev", "gates_t_w", "gates_t_b"])
step_net.Proto().type = "simple"
step_net.Proto().external_output.extend(["hidden_t", "gates_t"])
step_net.FC("hidden_t_prev", "gates_t", dim_in=d, dim_out=d, axis=2)
step_net.net.Sum(["gates_t", "input_t"], ["gates_t"])
step_net.net.Sigmoid(["gates_t"], ["hidden_t"])
# Initialize params for step net in the parent net
for op in step_net.param_init_net.Proto().op:
workspace.RunOperatorOnce(op)
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
backward_mapping = {str(k): str(v) for k, v
in backward_mapping.items()}
backward_step_net = core.Net("ElmanBackward")
del backward_step_net.Proto().op[:]
backward_step_net.Proto().op.extend(backward_ops)
assert backward_mapping["input_t"] == "gates_t_grad"
links = [
("hidden_t_prev", "hidden", 0),
("hidden_t", "hidden", 1),
("input_t", "input", 0),
]
link_internal, link_external, link_offset = zip(*links)
backward_links = [
("hidden_t_prev_grad", "hidden_grad", 0),
("hidden_t_grad", "hidden_grad", 1),
("gates_t_grad", "input_grad", 0),
]
backward_link_internal, backward_link_external, backward_link_offset = \
zip(*backward_links)
backward_step_net.Proto().external_input.extend(["hidden_t_grad"])
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_input)
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_output)
inputs = ["input", "seq_lengths", "gates_t_w", "gates_t_b", "hidden_input"]
recurrent_inputs = ["hidden_input"]
op = core.CreateOperator(
"RecurrentNetwork",
inputs,
["output", "hidden", "hidden_output", "step_workspaces"],
alias_src=["hidden", "hidden"],
alias_dst=["output", "hidden_output"],
alias_offset=[1, -1],
recurrent_states=["hidden"],
initial_recurrent_state_ids=map(inputs.index, recurrent_inputs),
link_internal=link_internal,
link_external=link_external,
link_offset=link_offset,
backward_link_internal=backward_link_internal,
backward_link_external=backward_link_external,
backward_link_offset=backward_link_offset,
param=map(inputs.index, step_net.params),
step_net=str(step_net.Proto()),
backward_step_net=str(backward_step_net.Proto()),
outputs_with_grads=[0],
)
workspace.FeedBlob(
"input", np.random.randn(t, n, d).astype(np.float32))
workspace.FeedBlob(
"hidden_input", np.random.randn(1, n, d).astype(np.float32))
workspace.FeedBlob(
"seq_lengths", np.random.randint(0, t, size=(n,)).astype(np.int32))
def reference(input, seq_lengths, gates_w, gates_b, hidden_input):
T = input.shape[0]
N = input.shape[1]
D = input.shape[2]
hidden = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert hidden.shape[1] == N
assert hidden.shape[2] == D
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, D)
hidden_t_prev = hidden[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T)
gates = gates.reshape(1, N, D) + input_t.reshape(1, N, D)
hidden[t + 1] = sigmoid(gates)
return hidden[1:], hidden, hidden[-1].reshape(1, N, D)
self.assertReferenceChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
reference,
outputs_to_check=[0, 1, 2])
for param in [0, 2, 3]:
self.assertGradientChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
param,
[0])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_space_to_batch(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(n, c, h, w).astype(np.float32)
op = core.CreateOperator("SpaceToBatch", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_batch_to_space(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(
n * block_size * block_size,
c,
(h + 2 * pad) / block_size,
(w + 2 * pad) / block_size).astype(np.float32)
op = core.CreateOperator("BatchToSpace", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
scale=st.floats(min_value=-2.0, max_value=2.0),
**hu.gcs)
def test_scale(self, X, in_place, scale, gc, dc):
op = core.CreateOperator(
"Scale", ["X"], ["Y" if not in_place else "X"],
scale=scale)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(s=st.text())
def test_string_serde(self, s):
s = s.encode('ascii', 'ignore')
self.ws.create_blob("a").feed(s)
serialized = self.ws.blobs["a"].serialize("a")
self.ws.create_blob("b").deserialize(serialized)
self.assertEqual(s, self.ws.blobs[("a")].fetch())
self.assertEqual(s, self.ws.blobs[("b")].fetch())
@given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_distances(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
def check_grad(op):
self.assertGradientChecks(gc, op, [X, Y], 0, [0],
stepsize=1e-2, threshold=1e-2)
self.assertGradientChecks(gc, op, [X, Y], 1, [0],
stepsize=1e-2, threshold=1e-2)
l2_op = core.CreateOperator("SquaredL2Distance",
["X", "Y"], ["l2_dist"])
self.ws.run(l2_op)
np.testing.assert_allclose(self.ws.blobs[("l2_dist")].fetch(),
np.square(X - Y).sum(axis=1) * 0.5,
rtol=1e-4, atol=1e-4)
check_grad(l2_op)
if gc.device_type == 1:
# Only SquaredL2Distance has CUDA implementation
return
dot_op = core.CreateOperator("DotProduct", ["X", "Y"], ["dot"])
self.ws.run(dot_op)
np.testing.assert_allclose(self.ws.blobs[("dot")].fetch(),
np.multiply(X, Y).sum(axis=1),
rtol=1e-4, atol=1e-4)
check_grad(dot_op)
kEps = 1e-12
cos_op = core.CreateOperator("CosineSimilarity", ["X", "Y"], ["cos"])
self.ws.run(cos_op)
cos = np.divide(np.multiply(X, Y).sum(axis=1),
np.multiply(np.linalg.norm(X, axis=1) + kEps,
np.linalg.norm(Y, axis=1) + kEps))
np.testing.assert_allclose(self.ws.blobs[("cos")].fetch(), cos,
rtol=1e-4, atol=1e-4)
check_grad(cos_op)
@given(pad=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_same_pad_image(self, pad, size, input_channels, batch_size, order,
mode, gc, dc):
assume(size > pad)
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad=pad,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(pad_t=st.integers(0, 3),
pad_l=st.integers(0, 3),
pad_b=st.integers(0, 3),
pad_r=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_pad_image(self, pad_t, pad_l, pad_b, pad_r, size, input_channels,
batch_size, order, mode, gc, dc):
assume(size > max(pad_b, pad_r, pad_t, pad_l))
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad_t=pad_t,
pad_l=pad_l,
pad_b=pad_b,
pad_r=pad_r,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)),
mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_instance_norm(self, size, input_channels, batch_size, order,
epsilon, gc, dc):
op = core.CreateOperator(
"InstanceNorm",
["X", "scale", "bias"],
["Y"],
order=order,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
def ref_nchw(x, scale, bias):
x = x.reshape(batch_size * input_channels, size * size)
y = (x - x.mean(1)[:, np.newaxis])
y /= np.sqrt(x.var(1) + epsilon)[:, np.newaxis]
y = y.reshape(batch_size, input_channels, size, size)
y = y * scale.reshape(1, input_channels, 1, 1)
y = y + bias.reshape(1, input_channels, 1, 1)
return (y, )
def ref_nhwc(x, scale, bias):
x = x.swapaxes(2, 3).swapaxes(1, 2)
y = ref_nchw(x, scale, bias)[0]
return (y.swapaxes(1, 2).swapaxes(2, 3), )
self.assertReferenceChecks(
gc, op, [X, scale, bias],
ref_nchw if order == "NCHW" else ref_nhwc)
# TODO(jiayq): when there are backward and GPU implementations, enable
# these two.
# self.assertDeviceChecks(dc, op, [X, scale, bias], [0])
# self.assertGradientChecks(gc, op, [X, scale, bias], 0, [0])
ws = workspace.C.Workspace()
feeds = [("X", X), ("scale", scale), ("bias", bias)]
for blob, arr in feeds:
ws.create_blob(blob).feed(arr)
for _ in range(100):
ws.run(op)
for blob, arr in feeds:
np.testing.assert_array_equal(ws.blobs[blob].fetch(), arr)
@given(sizes=st.lists(st.integers(1, 100), min_size=1),
in_place=st.booleans(),
**hu.gcs)
def test_unsafe_coalesce(self, sizes, in_place, gc, dc):
gAlignment = 32
Xs = [np.random.randn(size)
.astype(np.random.choice([np.float32, np.float64, np.uint8]))
for size in sizes]
op = core.CreateOperator(
"UnsafeCoalesce",
["X_{}".format(i) for i, _ in enumerate(sizes)],
[("X_{}" if in_place else "Y_{}").format(i)
for i, _ in enumerate(sizes)] + ["coalesced"])
self.assertDeviceChecks(dc, op, Xs, list(range(len(sizes) + 1)))
def unsafe_coalesce(*xs):
def to_uint8(x):
x_aligned_bytes = ((x.nbytes + gAlignment - 1) // gAlignment) \
* gAlignment
x_aligned = np.zeros(
shape=(x_aligned_bytes // x.dtype.itemsize, ),
dtype=x.dtype)
x_aligned[:x.size] = x
x_cast = np.fromstring(x_aligned.tobytes(), dtype='<u1')
return x_cast
flat = [to_uint8(x) for x in xs]
coalesced = np.concatenate(flat)
return list(xs) + [coalesced]
self.assertReferenceChecks(gc, op, Xs, unsafe_coalesce)
@given(inp=_dtypes().flatmap(lambda dt: _tensor_and_indices(
elements=st.floats(min_value=0.5, max_value=10), dtype=dt)),
**hu.gcs_cpu_only)
def test_sparse_to_dense(self, inp, gc, dc):
first_dim, X, I = inp
# values don't matter
D = np.random.uniform(0, 1, size=(first_dim,) + X.shape[1:])
op = core.CreateOperator("SparseToDense", ["I", "X", "D"], ["Y"])
def sparse_to_dense(I, X, D):
O = np.zeros(D.shape)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
self.assertReferenceChecks(gc, op, [I, X, D], sparse_to_dense)
self.assertDeviceChecks(dc, op, [I, X, D], [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_dot_product(self, inputs, gc, dc):
X, Y = inputs
op = core.CreateOperator("DotProduct", ["X", "Y"], 'out')
def dotproduct(X, Y):
return (np.sum(X * Y, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
K=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_padding(self, N, M, K, pad_value, gc, dc):
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
pad_value=pad_value)
def dotproduct(X, Y):
Z = np.ones((N, max(M, K))).astype(np.float32) * pad_value
if M < K:
Z[:, :M] = X
return (np.sum(Z * Y, axis=1), )
else:
Z[:, :K] = Y
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10), **hu.gcs_cpu_only)
def test_ensure_dense(self, N, M, gc, dc):
# in place
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "X")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
# or not
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "out")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
@given(N=st.integers(min_value=10, max_value=100),
M=st.integers(min_value=2, max_value=10),
num_buckets=st.integers(min_value=1, max_value=5),
**hu.gcs_cpu_only)
def test_accumulate_histogram_op(self, N, M, num_buckets, gc, dc):
X = np.random.rand(N, M).astype(np.float32)
lower_bound, upper_bound = 0.1, 0.9
op = core.CreateOperator("AccumulateHistogram", ["X"],
['cur_hist', 'acc_hist'],
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
def histogram(X):
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist, acc_hist = hist, hist
return [cur_hist, acc_hist]
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertReferenceChecks(gc, op, [X], histogram)
if __name__ == "__main__":
unittest.main()
|
test_logging.py
|
#!/usr/bin/env python
#
# Copyright 2001-2010 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Test harness for the logging module. Run all tests.
Copyright (C) 2001-2010 Vinay Sajip. All Rights Reserved.
"""
import logging
import logging.handlers
import logging.config
import codecs
import cPickle
import cStringIO
import gc
import json
import os
import re
import select
import socket
from SocketServer import ThreadingTCPServer, StreamRequestHandler
import struct
import sys
import tempfile
from test.test_support import captured_stdout, run_with_locale, run_unittest
import textwrap
import unittest
import warnings
import weakref
try:
import threading
except ImportError:
threading = None
class BaseTest(unittest.TestCase):
"""Base class for logging tests."""
log_format = "%(name)s -> %(levelname)s: %(message)s"
expected_log_pat = r"^([\w.]+) -> ([\w]+): ([\d]+)$"
message_num = 0
def setUp(self):
"""Setup the default logging stream to an internal StringIO instance,
so that we can examine log output as we want."""
logger_dict = logging.getLogger().manager.loggerDict
logging._acquireLock()
try:
self.saved_handlers = logging._handlers.copy()
self.saved_handler_list = logging._handlerList[:]
self.saved_loggers = logger_dict.copy()
self.saved_level_names = logging._levelNames.copy()
finally:
logging._releaseLock()
# Set two unused loggers: one non-ASCII and one Unicode.
# This is to test correct operation when sorting existing
# loggers in the configuration code. See issue 8201.
logging.getLogger("\xab\xd7\xbb")
logging.getLogger(u"\u013f\u00d6\u0047")
self.root_logger = logging.getLogger("")
self.original_logging_level = self.root_logger.getEffectiveLevel()
self.stream = cStringIO.StringIO()
self.root_logger.setLevel(logging.DEBUG)
self.root_hdlr = logging.StreamHandler(self.stream)
self.root_formatter = logging.Formatter(self.log_format)
self.root_hdlr.setFormatter(self.root_formatter)
self.root_logger.addHandler(self.root_hdlr)
def tearDown(self):
"""Remove our logging stream, and restore the original logging
level."""
self.stream.close()
self.root_logger.removeHandler(self.root_hdlr)
while self.root_logger.handlers:
h = self.root_logger.handlers[0]
self.root_logger.removeHandler(h)
h.close()
self.root_logger.setLevel(self.original_logging_level)
logging._acquireLock()
try:
logging._levelNames.clear()
logging._levelNames.update(self.saved_level_names)
logging._handlers.clear()
logging._handlers.update(self.saved_handlers)
logging._handlerList[:] = self.saved_handler_list
loggerDict = logging.getLogger().manager.loggerDict
loggerDict.clear()
loggerDict.update(self.saved_loggers)
finally:
logging._releaseLock()
def assert_log_lines(self, expected_values, stream=None):
"""Match the collected log lines against the regular expression
self.expected_log_pat, and compare the extracted group values to
the expected_values list of tuples."""
stream = stream or self.stream
pat = re.compile(self.expected_log_pat)
try:
stream.reset()
actual_lines = stream.readlines()
except AttributeError:
# StringIO.StringIO lacks a reset() method.
actual_lines = stream.getvalue().splitlines()
self.assertEqual(len(actual_lines), len(expected_values))
for actual, expected in zip(actual_lines, expected_values):
match = pat.search(actual)
if not match:
self.fail("Log line does not match expected pattern:\n" +
actual)
self.assertEqual(tuple(match.groups()), expected)
s = stream.read()
if s:
self.fail("Remaining output at end of log stream:\n" + s)
def next_message(self):
"""Generate a message consisting solely of an auto-incrementing
integer."""
self.message_num += 1
return "%d" % self.message_num
class BuiltinLevelsTest(BaseTest):
"""Test builtin levels and their inheritance."""
def test_flat(self):
#Logging levels in a flat logger namespace.
m = self.next_message
ERR = logging.getLogger("ERR")
ERR.setLevel(logging.ERROR)
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
DEB = logging.getLogger("DEB")
DEB.setLevel(logging.DEBUG)
# These should log.
ERR.log(logging.CRITICAL, m())
ERR.error(m())
INF.log(logging.CRITICAL, m())
INF.error(m())
INF.warn(m())
INF.info(m())
DEB.log(logging.CRITICAL, m())
DEB.error(m())
DEB.warn (m())
DEB.info (m())
DEB.debug(m())
# These should not log.
ERR.warn(m())
ERR.info(m())
ERR.debug(m())
INF.debug(m())
self.assert_log_lines([
('ERR', 'CRITICAL', '1'),
('ERR', 'ERROR', '2'),
('INF', 'CRITICAL', '3'),
('INF', 'ERROR', '4'),
('INF', 'WARNING', '5'),
('INF', 'INFO', '6'),
('DEB', 'CRITICAL', '7'),
('DEB', 'ERROR', '8'),
('DEB', 'WARNING', '9'),
('DEB', 'INFO', '10'),
('DEB', 'DEBUG', '11'),
])
def test_nested_explicit(self):
# Logging levels in a nested namespace, all explicitly set.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
# These should log.
INF_ERR.log(logging.CRITICAL, m())
INF_ERR.error(m())
# These should not log.
INF_ERR.warn(m())
INF_ERR.info(m())
INF_ERR.debug(m())
self.assert_log_lines([
('INF.ERR', 'CRITICAL', '1'),
('INF.ERR', 'ERROR', '2'),
])
def test_nested_inherited(self):
#Logging levels in a nested namespace, inherited from parent loggers.
m = self.next_message
INF = logging.getLogger("INF")
INF.setLevel(logging.INFO)
INF_ERR = logging.getLogger("INF.ERR")
INF_ERR.setLevel(logging.ERROR)
INF_UNDEF = logging.getLogger("INF.UNDEF")
INF_ERR_UNDEF = logging.getLogger("INF.ERR.UNDEF")
UNDEF = logging.getLogger("UNDEF")
# These should log.
INF_UNDEF.log(logging.CRITICAL, m())
INF_UNDEF.error(m())
INF_UNDEF.warn(m())
INF_UNDEF.info(m())
INF_ERR_UNDEF.log(logging.CRITICAL, m())
INF_ERR_UNDEF.error(m())
# These should not log.
INF_UNDEF.debug(m())
INF_ERR_UNDEF.warn(m())
INF_ERR_UNDEF.info(m())
INF_ERR_UNDEF.debug(m())
self.assert_log_lines([
('INF.UNDEF', 'CRITICAL', '1'),
('INF.UNDEF', 'ERROR', '2'),
('INF.UNDEF', 'WARNING', '3'),
('INF.UNDEF', 'INFO', '4'),
('INF.ERR.UNDEF', 'CRITICAL', '5'),
('INF.ERR.UNDEF', 'ERROR', '6'),
])
def test_nested_with_virtual_parent(self):
# Logging levels when some parent does not exist yet.
m = self.next_message
INF = logging.getLogger("INF")
GRANDCHILD = logging.getLogger("INF.BADPARENT.UNDEF")
CHILD = logging.getLogger("INF.BADPARENT")
INF.setLevel(logging.INFO)
# These should log.
GRANDCHILD.log(logging.FATAL, m())
GRANDCHILD.info(m())
CHILD.log(logging.FATAL, m())
CHILD.info(m())
# These should not log.
GRANDCHILD.debug(m())
CHILD.debug(m())
self.assert_log_lines([
('INF.BADPARENT.UNDEF', 'CRITICAL', '1'),
('INF.BADPARENT.UNDEF', 'INFO', '2'),
('INF.BADPARENT', 'CRITICAL', '3'),
('INF.BADPARENT', 'INFO', '4'),
])
class BasicFilterTest(BaseTest):
"""Test the bundled Filter class."""
def test_filter(self):
# Only messages satisfying the specified criteria pass through the
# filter.
filter_ = logging.Filter("spam.eggs")
handler = self.root_logger.handlers[0]
try:
handler.addFilter(filter_)
spam = logging.getLogger("spam")
spam_eggs = logging.getLogger("spam.eggs")
spam_eggs_fish = logging.getLogger("spam.eggs.fish")
spam_bakedbeans = logging.getLogger("spam.bakedbeans")
spam.info(self.next_message())
spam_eggs.info(self.next_message()) # Good.
spam_eggs_fish.info(self.next_message()) # Good.
spam_bakedbeans.info(self.next_message())
self.assert_log_lines([
('spam.eggs', 'INFO', '2'),
('spam.eggs.fish', 'INFO', '3'),
])
finally:
handler.removeFilter(filter_)
#
# First, we define our levels. There can be as many as you want - the only
# limitations are that they should be integers, the lowest should be > 0 and
# larger values mean less information being logged. If you need specific
# level values which do not fit into these limitations, you can use a
# mapping dictionary to convert between your application levels and the
# logging system.
#
SILENT = 120
TACITURN = 119
TERSE = 118
EFFUSIVE = 117
SOCIABLE = 116
VERBOSE = 115
TALKATIVE = 114
GARRULOUS = 113
CHATTERBOX = 112
BORING = 111
LEVEL_RANGE = range(BORING, SILENT + 1)
#
# Next, we define names for our levels. You don't need to do this - in which
# case the system will use "Level n" to denote the text for the level.
#
my_logging_levels = {
SILENT : 'Silent',
TACITURN : 'Taciturn',
TERSE : 'Terse',
EFFUSIVE : 'Effusive',
SOCIABLE : 'Sociable',
VERBOSE : 'Verbose',
TALKATIVE : 'Talkative',
GARRULOUS : 'Garrulous',
CHATTERBOX : 'Chatterbox',
BORING : 'Boring',
}
class GarrulousFilter(logging.Filter):
"""A filter which blocks garrulous messages."""
def filter(self, record):
return record.levelno != GARRULOUS
class VerySpecificFilter(logging.Filter):
"""A filter which blocks sociable and taciturn messages."""
def filter(self, record):
return record.levelno not in [SOCIABLE, TACITURN]
class CustomLevelsAndFiltersTest(BaseTest):
"""Test various filtering possibilities with custom logging levels."""
# Skip the logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
for k, v in my_logging_levels.items():
logging.addLevelName(k, v)
def log_at_all_levels(self, logger):
for lvl in LEVEL_RANGE:
logger.log(lvl, self.next_message())
def test_logger_filter(self):
# Filter at logger level.
self.root_logger.setLevel(VERBOSE)
# Levels >= 'Verbose' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
def test_handler_filter(self):
# Filter at handler level.
self.root_logger.handlers[0].setLevel(SOCIABLE)
try:
# Levels >= 'Sociable' are good.
self.log_at_all_levels(self.root_logger)
self.assert_log_lines([
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
])
finally:
self.root_logger.handlers[0].setLevel(logging.NOTSET)
def test_specific_filters(self):
# Set a specific filter object on the handler, and then add another
# filter object on the logger itself.
handler = self.root_logger.handlers[0]
specific_filter = None
garr = GarrulousFilter()
handler.addFilter(garr)
try:
self.log_at_all_levels(self.root_logger)
first_lines = [
# Notice how 'Garrulous' is missing
('Boring', '1'),
('Chatterbox', '2'),
('Talkative', '4'),
('Verbose', '5'),
('Sociable', '6'),
('Effusive', '7'),
('Terse', '8'),
('Taciturn', '9'),
('Silent', '10'),
]
self.assert_log_lines(first_lines)
specific_filter = VerySpecificFilter()
self.root_logger.addFilter(specific_filter)
self.log_at_all_levels(self.root_logger)
self.assert_log_lines(first_lines + [
# Not only 'Garrulous' is still missing, but also 'Sociable'
# and 'Taciturn'
('Boring', '11'),
('Chatterbox', '12'),
('Talkative', '14'),
('Verbose', '15'),
('Effusive', '17'),
('Terse', '18'),
('Silent', '20'),
])
finally:
if specific_filter:
self.root_logger.removeFilter(specific_filter)
handler.removeFilter(garr)
class MemoryHandlerTest(BaseTest):
"""Tests for the MemoryHandler."""
# Do not bother with a logger name group.
expected_log_pat = r"^[\w.]+ -> ([\w]+): ([\d]+)$"
def setUp(self):
BaseTest.setUp(self)
self.mem_hdlr = logging.handlers.MemoryHandler(10, logging.WARNING,
self.root_hdlr)
self.mem_logger = logging.getLogger('mem')
self.mem_logger.propagate = 0
self.mem_logger.addHandler(self.mem_hdlr)
def tearDown(self):
self.mem_hdlr.close()
BaseTest.tearDown(self)
def test_flush(self):
# The memory handler flushes to its target handler based on specific
# criteria (message count and message level).
self.mem_logger.debug(self.next_message())
self.assert_log_lines([])
self.mem_logger.info(self.next_message())
self.assert_log_lines([])
# This will flush because the level is >= logging.WARNING
self.mem_logger.warn(self.next_message())
lines = [
('DEBUG', '1'),
('INFO', '2'),
('WARNING', '3'),
]
self.assert_log_lines(lines)
for n in (4, 14):
for i in range(9):
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
# This will flush because it's the 10th message since the last
# flush.
self.mem_logger.debug(self.next_message())
lines = lines + [('DEBUG', str(i)) for i in range(n, n + 10)]
self.assert_log_lines(lines)
self.mem_logger.debug(self.next_message())
self.assert_log_lines(lines)
class ExceptionFormatter(logging.Formatter):
"""A special exception formatter."""
def formatException(self, ei):
return "Got a [%s]" % ei[0].__name__
class ConfigFileTest(BaseTest):
"""Reading logging config from a .ini-style config file."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1 adds a little to the standard configuration.
config1 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config1a moves the handler to the root.
config1a = """
[loggers]
keys=root,parser
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
# config2 has a subtle configuration error that should be reported
config2 = config1.replace("sys.stdout", "sys.stbout")
# config3 has a less subtle configuration error
config3 = config1.replace("formatter=form1", "formatter=misspelled_name")
# config4 specifies a custom formatter class to be loaded
config4 = """
[loggers]
keys=root
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=NOTSET
handlers=hand1
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
class=""" + __name__ + """.ExceptionFormatter
format=%(levelname)s:%(name)s:%(message)s
datefmt=
"""
# config5 specifies a custom handler class to be loaded
config5 = config1.replace('class=StreamHandler', 'class=logging.StreamHandler')
# config6 uses ', ' delimiters in the handlers and formatters sections
config6 = """
[loggers]
keys=root,parser
[handlers]
keys=hand1, hand2
[formatters]
keys=form1, form2
[logger_root]
level=WARNING
handlers=
[logger_parser]
level=DEBUG
handlers=hand1
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[handler_hand2]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stderr,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
[formatter_form2]
format=%(message)s
datefmt=
"""
# config7 adds a compiler logger.
config7 = """
[loggers]
keys=root,parser,compiler
[handlers]
keys=hand1
[formatters]
keys=form1
[logger_root]
level=WARNING
handlers=hand1
[logger_compiler]
level=DEBUG
handlers=
propagate=1
qualname=compiler
[logger_parser]
level=DEBUG
handlers=
propagate=1
qualname=compiler.parser
[handler_hand1]
class=StreamHandler
level=NOTSET
formatter=form1
args=(sys.stdout,)
[formatter_form1]
format=%(levelname)s ++ %(message)s
datefmt=
"""
def apply_config(self, conf):
file = cStringIO.StringIO(textwrap.dedent(conf))
logging.config.fileConfig(file)
def test_config0_ok(self):
# A simple config file which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config file defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config3_failure(self):
# A simple config file which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config file specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_ok(self):
self.test_config1_ok(config=self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1a)
logger = logging.getLogger("compiler.parser")
# See issue #11424. compiler-hyphenated sorts
# between compiler and compiler.xyz and this
# was preventing compiler.xyz from being included
# in the child loggers of compiler because of an
# overzealous loop termination condition.
hyphenated = logging.getLogger('compiler-hyphenated')
# All will output a message
logger.info(self.next_message())
logger.error(self.next_message())
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
('CRITICAL', '3'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
# Will not appear
hyphenated.critical(self.next_message())
self.assert_log_lines([
('INFO', '4'),
('ERROR', '5'),
('INFO', '6'),
('ERROR', '7'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class LogRecordStreamHandler(StreamRequestHandler):
"""Handler for a streaming logging request. It saves the log message in the
TCP server's 'log_output' attribute."""
TCP_LOG_END = "!!!END!!!"
def handle(self):
"""Handle multiple requests - each expected to be of 4-byte length,
followed by the LogRecord in pickle format. Logs the record
according to whatever policy is configured locally."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = self.unpickle(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
def unpickle(self, data):
return cPickle.loads(data)
def handle_log_record(self, record):
# If the end-of-messages sentinel is seen, tell the server to
# terminate.
if self.TCP_LOG_END in record.msg:
self.server.abort = 1
return
self.server.log_output += record.msg + "\n"
class LogRecordSocketReceiver(ThreadingTCPServer):
"""A simple-minded TCP socket-based logging receiver suitable for test
purposes."""
allow_reuse_address = 1
log_output = ""
def __init__(self, host='localhost',
port=logging.handlers.DEFAULT_TCP_LOGGING_PORT,
handler=LogRecordStreamHandler):
ThreadingTCPServer.__init__(self, (host, port), handler)
self.abort = False
self.timeout = 0.1
self.finished = threading.Event()
def serve_until_stopped(self):
while not self.abort:
rd, wr, ex = select.select([self.socket.fileno()], [], [],
self.timeout)
if rd:
self.handle_request()
# Notify the main thread that we're about to exit
self.finished.set()
# close the listen socket
self.server_close()
@unittest.skipUnless(threading, 'Threading required for this test.')
class SocketHandlerTest(BaseTest):
"""Test for SocketHandler objects."""
def setUp(self):
"""Set up a TCP server to receive log messages, and a SocketHandler
pointing to that server's address and port."""
BaseTest.setUp(self)
self.tcpserver = LogRecordSocketReceiver(port=0)
self.port = self.tcpserver.socket.getsockname()[1]
self.threads = [
threading.Thread(target=self.tcpserver.serve_until_stopped)]
for thread in self.threads:
thread.start()
self.sock_hdlr = logging.handlers.SocketHandler('localhost', self.port)
self.sock_hdlr.setFormatter(self.root_formatter)
self.root_logger.removeHandler(self.root_logger.handlers[0])
self.root_logger.addHandler(self.sock_hdlr)
def tearDown(self):
"""Shutdown the TCP server."""
try:
self.tcpserver.abort = True
del self.tcpserver
self.root_logger.removeHandler(self.sock_hdlr)
self.sock_hdlr.close()
for thread in self.threads:
thread.join(2.0)
finally:
BaseTest.tearDown(self)
def get_output(self):
"""Get the log output as received by the TCP server."""
# Signal the TCP receiver and wait for it to terminate.
self.root_logger.critical(LogRecordStreamHandler.TCP_LOG_END)
self.tcpserver.finished.wait(2.0)
return self.tcpserver.log_output
def test_output(self):
# The log message sent to the SocketHandler is properly received.
logger = logging.getLogger("tcp")
logger.error("spam")
logger.debug("eggs")
self.assertEqual(self.get_output(), "spam\neggs\n")
class MemoryTest(BaseTest):
"""Test memory persistence of logger objects."""
def setUp(self):
"""Create a dict to remember potentially destroyed objects."""
BaseTest.setUp(self)
self._survivors = {}
def _watch_for_survival(self, *args):
"""Watch the given objects for survival, by creating weakrefs to
them."""
for obj in args:
key = id(obj), repr(obj)
self._survivors[key] = weakref.ref(obj)
def _assertTruesurvival(self):
"""Assert that all objects watched for survival have survived."""
# Trigger cycle breaking.
gc.collect()
dead = []
for (id_, repr_), ref in self._survivors.items():
if ref() is None:
dead.append(repr_)
if dead:
self.fail("%d objects should have survived "
"but have been destroyed: %s" % (len(dead), ", ".join(dead)))
def test_persistent_loggers(self):
# Logger objects are persistent and retain their configuration, even
# if visible references are destroyed.
self.root_logger.setLevel(logging.INFO)
foo = logging.getLogger("foo")
self._watch_for_survival(foo)
foo.setLevel(logging.DEBUG)
self.root_logger.debug(self.next_message())
foo.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
])
del foo
# foo has survived.
self._assertTruesurvival()
# foo has retained its settings.
bar = logging.getLogger("foo")
bar.debug(self.next_message())
self.assert_log_lines([
('foo', 'DEBUG', '2'),
('foo', 'DEBUG', '3'),
])
class EncodingTest(BaseTest):
def test_encoding_plain_file(self):
# In Python 2.x, a plain file object is treated as having no encoding.
log = logging.getLogger("test")
fn = tempfile.mktemp(".log")
# the non-ascii data we write to the log.
data = "foo\x80"
try:
handler = logging.FileHandler(fn)
log.addHandler(handler)
try:
# write non-ascii data to the log.
log.warning(data)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
f = open(fn)
try:
self.assertEqual(f.read().rstrip(), data)
finally:
f.close()
finally:
if os.path.isfile(fn):
os.remove(fn)
def test_encoding_cyrillic_unicode(self):
log = logging.getLogger("test")
#Get a message in Unicode: Do svidanya in Cyrillic (meaning goodbye)
message = u'\u0434\u043e \u0441\u0432\u0438\u0434\u0430\u043d\u0438\u044f'
#Ensure it's written in a Cyrillic encoding
writer_class = codecs.getwriter('cp1251')
writer_class.encoding = 'cp1251'
stream = cStringIO.StringIO()
writer = writer_class(stream, 'strict')
handler = logging.StreamHandler(writer)
log.addHandler(handler)
try:
log.warning(message)
finally:
log.removeHandler(handler)
handler.close()
# check we wrote exactly those bytes, ignoring trailing \n etc
s = stream.getvalue()
#Compare against what the data should be when encoded in CP-1251
self.assertEqual(s, '\xe4\xee \xf1\xe2\xe8\xe4\xe0\xed\xe8\xff\n')
class WarningsTest(BaseTest):
def test_warnings(self):
with warnings.catch_warnings():
logging.captureWarnings(True)
try:
warnings.filterwarnings("always", category=UserWarning)
file = cStringIO.StringIO()
h = logging.StreamHandler(file)
logger = logging.getLogger("py.warnings")
logger.addHandler(h)
warnings.warn("I'm warning you...")
logger.removeHandler(h)
s = file.getvalue()
h.close()
self.assertTrue(s.find("UserWarning: I'm warning you...\n") > 0)
#See if an explicit file uses the original implementation
file = cStringIO.StringIO()
warnings.showwarning("Explicit", UserWarning, "dummy.py", 42,
file, "Dummy line")
s = file.getvalue()
file.close()
self.assertEqual(s,
"dummy.py:42: UserWarning: Explicit\n Dummy line\n")
finally:
logging.captureWarnings(False)
def formatFunc(format, datefmt=None):
return logging.Formatter(format, datefmt)
def handlerFunc():
return logging.StreamHandler()
class CustomHandler(logging.StreamHandler):
pass
class ConfigDictTest(BaseTest):
"""Reading logging config from a dictionary."""
expected_log_pat = r"^([\w]+) \+\+ ([\w]+)$"
# config0 is a standard configuration.
config0 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
# config1 adds a little to the standard configuration.
config1 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config2 has a subtle configuration error that should be reported
config2 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdbout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a handler
config2a = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NTOSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config1 but with a misspelt level on a logger
config2b = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WRANING',
},
}
# config3 has a less subtle configuration error
config3 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'misspelled_name',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config4 specifies a custom formatter class to be loaded
config4 = {
'version': 1,
'formatters': {
'form1' : {
'()' : __name__ + '.ExceptionFormatter',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# As config4 but using an actual callable rather than a string
config4a = {
'version': 1,
'formatters': {
'form1' : {
'()' : ExceptionFormatter,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form2' : {
'()' : __name__ + '.formatFunc',
'format' : '%(levelname)s:%(name)s:%(message)s',
},
'form3' : {
'()' : formatFunc,
'format' : '%(levelname)s:%(name)s:%(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
'hand2' : {
'()' : handlerFunc,
},
},
'root' : {
'level' : 'NOTSET',
'handlers' : ['hand1'],
},
}
# config5 specifies a custom handler class to be loaded
config5 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
# config6 specifies a custom handler class to be loaded
# but has bad arguments
config6 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : __name__ + '.CustomHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'9' : 'invalid parameter name',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#config 7 does not define compiler.parser but defines compiler.lexer
#so compiler.parser should be disabled after applying it
config7 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.lexer' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
config8 = {
'version': 1,
'disable_existing_loggers' : False,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
'compiler.lexer' : {
},
},
'root' : {
'level' : 'WARNING',
},
}
config9 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'WARNING',
'stream' : 'ext://sys.stdout',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'NOTSET',
},
}
config9a = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'WARNING',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
config9b = {
'version': 1,
'incremental' : True,
'handlers' : {
'hand1' : {
'level' : 'INFO',
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'INFO',
},
},
}
#As config1 but with a filter added
config10 = {
'version': 1,
'formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'filters' : {
'filt1' : {
'name' : 'compiler.parser',
},
},
'handlers' : {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
'filters' : ['filt1'],
},
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'filters' : ['filt1'],
},
},
'root' : {
'level' : 'WARNING',
'handlers' : ['hand1'],
},
}
#As config1 but using cfg:// references
config11 = {
'version': 1,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but missing the version key
config12 = {
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
#As config11 but using an unsupported version
config13 = {
'version': 2,
'true_formatters': {
'form1' : {
'format' : '%(levelname)s ++ %(message)s',
},
},
'handler_configs': {
'hand1' : {
'class' : 'logging.StreamHandler',
'formatter' : 'form1',
'level' : 'NOTSET',
'stream' : 'ext://sys.stdout',
},
},
'formatters' : 'cfg://true_formatters',
'handlers' : {
'hand1' : 'cfg://handler_configs[hand1]',
},
'loggers' : {
'compiler.parser' : {
'level' : 'DEBUG',
'handlers' : ['hand1'],
},
},
'root' : {
'level' : 'WARNING',
},
}
def apply_config(self, conf):
logging.config.dictConfig(conf)
def test_config0_ok(self):
# A simple config which overrides the default settings.
with captured_stdout() as output:
self.apply_config(self.config0)
logger = logging.getLogger()
# Won't output anything
logger.info(self.next_message())
# Outputs a message
logger.error(self.next_message())
self.assert_log_lines([
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config1_ok(self, config=config1):
# A config defining a sub-parser as well.
with captured_stdout() as output:
self.apply_config(config)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config2_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2)
def test_config2a_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2a)
def test_config2b_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config2b)
def test_config3_failure(self):
# A simple config which overrides the default settings.
self.assertRaises(StandardError, self.apply_config, self.config3)
def test_config4_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config4a_ok(self):
# A config specifying a custom formatter class.
with captured_stdout() as output:
self.apply_config(self.config4a)
#logger = logging.getLogger()
try:
raise RuntimeError()
except RuntimeError:
logging.exception("just testing")
sys.stdout.seek(0)
self.assertEqual(output.getvalue(),
"ERROR:root:just testing\nGot a [RuntimeError]\n")
# Original logger output is empty
self.assert_log_lines([])
def test_config5_ok(self):
self.test_config1_ok(config=self.config5)
def test_config6_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config6)
def test_config7_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config7)
logger = logging.getLogger("compiler.parser")
self.assertTrue(logger.disabled)
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
#Same as test_config_7_ok but don't disable old loggers.
def test_config_8_ok(self):
with captured_stdout() as output:
self.apply_config(self.config1)
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
with captured_stdout() as output:
self.apply_config(self.config8)
logger = logging.getLogger("compiler.parser")
self.assertFalse(logger.disabled)
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
logger = logging.getLogger("compiler.lexer")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '3'),
('ERROR', '4'),
('INFO', '5'),
('ERROR', '6'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
def test_config_9_ok(self):
with captured_stdout() as output:
self.apply_config(self.config9)
logger = logging.getLogger("compiler.parser")
#Nothing will be output since both handler and logger are set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9a)
#Nothing will be output since both handler is still set to WARNING
logger.info(self.next_message())
self.assert_log_lines([], stream=output)
self.apply_config(self.config9b)
#Message should now be output
logger.info(self.next_message())
self.assert_log_lines([
('INFO', '3'),
], stream=output)
def test_config_10_ok(self):
with captured_stdout() as output:
self.apply_config(self.config10)
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_config11_ok(self):
self.test_config1_ok(self.config11)
def test_config12_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config12)
def test_config13_failure(self):
self.assertRaises(StandardError, self.apply_config, self.config13)
@unittest.skipUnless(threading, 'listen() needs threading to work')
def setup_via_listener(self, text):
# Ask for a randomly assigned port (by using port 0)
t = logging.config.listen(0)
t.start()
t.ready.wait()
# Now get the port allocated
port = t.port
t.ready.clear()
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2.0)
sock.connect(('localhost', port))
slen = struct.pack('>L', len(text))
s = slen + text
sentsofar = 0
left = len(s)
while left > 0:
sent = sock.send(s[sentsofar:])
sentsofar += sent
left -= sent
sock.close()
finally:
t.ready.wait(2.0)
logging.config.stopListening()
t.join(2.0)
def test_listen_config_10_ok(self):
with captured_stdout() as output:
self.setup_via_listener(json.dumps(self.config10))
logger = logging.getLogger("compiler.parser")
logger.warning(self.next_message())
logger = logging.getLogger('compiler')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger('compiler.lexer')
#Not output, because filtered
logger.warning(self.next_message())
logger = logging.getLogger("compiler.parser.codegen")
#Output, as not filtered
logger.error(self.next_message())
self.assert_log_lines([
('WARNING', '1'),
('ERROR', '4'),
], stream=output)
def test_listen_config_1_ok(self):
with captured_stdout() as output:
self.setup_via_listener(textwrap.dedent(ConfigFileTest.config1))
logger = logging.getLogger("compiler.parser")
# Both will output a message
logger.info(self.next_message())
logger.error(self.next_message())
self.assert_log_lines([
('INFO', '1'),
('ERROR', '2'),
], stream=output)
# Original logger output is empty.
self.assert_log_lines([])
class ManagerTest(BaseTest):
def test_manager_loggerclass(self):
logged = []
class MyLogger(logging.Logger):
def _log(self, level, msg, args, exc_info=None, extra=None):
logged.append(msg)
man = logging.Manager(None)
self.assertRaises(TypeError, man.setLoggerClass, int)
man.setLoggerClass(MyLogger)
logger = man.getLogger('test')
logger.warning('should appear in logged')
logging.warning('should not appear in logged')
self.assertEqual(logged, ['should appear in logged'])
class ChildLoggerTest(BaseTest):
def test_child_loggers(self):
r = logging.getLogger()
l1 = logging.getLogger('abc')
l2 = logging.getLogger('def.ghi')
c1 = r.getChild('xyz')
c2 = r.getChild('uvw.xyz')
self.assertTrue(c1 is logging.getLogger('xyz'))
self.assertTrue(c2 is logging.getLogger('uvw.xyz'))
c1 = l1.getChild('def')
c2 = c1.getChild('ghi')
c3 = l1.getChild('def.ghi')
self.assertTrue(c1 is logging.getLogger('abc.def'))
self.assertTrue(c2 is logging.getLogger('abc.def.ghi'))
self.assertTrue(c2 is c3)
# Set the locale to the platform-dependent default. I have no idea
# why the test does this, but in any case we save the current locale
# first and restore it at the end.
@run_with_locale('LC_ALL', '')
def test_main():
run_unittest(BuiltinLevelsTest, BasicFilterTest,
CustomLevelsAndFiltersTest, MemoryHandlerTest,
ConfigFileTest, SocketHandlerTest, MemoryTest,
EncodingTest, WarningsTest, ConfigDictTest, ManagerTest,
ChildLoggerTest)
if __name__ == "__main__":
test_main()
|
api.py
|
#coding: utf8
import datetime
import hashlib
import logging
import time
from threading import Thread
import pymysql
import pymysql.cursors
import requests
import telebot
import cache_worker
import config
import secret_config
import text
import ujson
import utils
bot = telebot.TeleBot(token = secret_config.token)
class DB:
def __init__(self, host, user, db, password):
self.host = host
self.user = user
self.password = password
self.db = db
self.charset = 'utf8mb4'
self.cursorclass = pymysql.cursors.DictCursor
class DataConn:
def __init__(self, db_obj):
self.host = db_obj.host
self.user = db_obj.user
self.password = db_obj.password
self.db = db_obj.db
self.charset = db_obj.charset
self.cursorclass = db_obj.cursorclass
def __enter__(self):
self.conn = pymysql.connect(
host = self.host,
user = self.user,
password = self.password,
db = self.db,
charset = self.charset,
cursorclass = self.cursorclass
)
return self.conn
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
if exc_val:
raise
db = DB(
host = secret_config.host,
user = secret_config.user,
password = secret_config.password,
db = secret_config.db
)
if __name__ == '__main__':
log_name = 'logs.txt'
f = open(log_name,'w')
f.close()
print('Файл логов создан')
telebot_logger = logging.getLogger('telebot')
mysql_info = logging.getLogger('mysql')
main_info = logging.getLogger('main_info')
report_info = logging.getLogger('reports')
print('Список логгеров создан')
logging.basicConfig(
format='%(filename)s [LINE:%(lineno)-3d]# %(levelname)-8s - %(name)-9s [%(asctime)s] - %(message)-50s ',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename = 'logs.txt',
level = logging.INFO
)
def replacer(text):
text_list = list(text)
for i in range(len(text)):
if text_list[i] in config.restricted_characters:
text_list[i] = config.restricted_characters_replace[text_list[i]]
return ''.join(text_list)
def register_admins(chat_id):
admins = []
chat_info = bot.get_chat(chat_id)
chat = {
'chat_id': chat_id,
'title': chat_info.title
}
chat_admins = bot.get_chat_administrators(chat_id)
print('Найдено {} администраторов. Ориентировочное время регистрации {} сек'.format(len(chat_admins), len(chat_admins)/5))
counter = 0
for i in chat_admins:
counter += 1
print('Зарегистрировано {}/{} администраторов. Чат: {}'.format(counter, len(chat_admins), chat_info.title))
try:
register_new_user(i.user, 'ru')
if i.user.is_bot == False:
user_settings = ujson.loads(get_user_param(i.user.id, 'settings'))
checker = False
for a in user_settings['admined_groups']:
if a['chat_id'] == chat_id:
checker = True
a['title'] = chat_info.title
if not checker:
user_settings['admined_groups'].append(chat)
change_user_param(i.user.id, 'settings', ujson.dumps(user_settings))
admin = {
'user_id': i.user.id,
'first_name': i.user.first_name,
'second_name': i.user.last_name,
'status': i.status
}
admins.append(admin)
time.sleep(0.2)
except Exception as e:
print(e)
curr_settings = get_group_params(chat_id)
curr_settings['admins'] = admins
change_group_params(chat_id, ujson.dumps(curr_settings))
print(admins)
def ban_sticker(msg, sticker_id):
"""
Банит стикер\n
:param msg:\n
:param sticker_id:\n
"""
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `banned_stickers` WHERE `chat_id` = %s AND `sticker_id` = %s'
curs.execute(sql, (msg.chat.id, sticker_id))
res = curs.fetchone()
if res is None:
sql = 'INSERT INTO `banned_stickers`(`chat_id`, `chat_name`, `sticker_id`, `ban_time`) VALUES (%s, %s, %s, %s)'
try:
curs.execute(sql, (msg.chat.id, msg.chat.title, sticker_id, int(time.time())))
conn.commit()
except Exception as e:
print(sql)
print(e)
else:
if res != msg.chat.title:
sql = 'SELECT * FROM `banned_stickers` WHERE `chat_id` = %s'
curs.execute(sql, (msg.chat.id, ))
res = curs.fetchall()
for i in res:
sql = 'UPDATE `banned_stickers` SET `chat_name` = %s WHERE `chat_id` = %s'
curs.execute(sql, (msg.chat.title, msg.chat.id))
conn.commit()
def unban_sticker(msg, sticker_id):
"""
Разбанивает стикер\n
:param msg:\n
:param sticker_id:\n
"""
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `banned_stickers` WHERE `chat_id` = %s and `sticker_id` = %s'
curs.execute(sql, (msg.chat.id, sticker_id))
res = curs.fetchone()
if res is not None:
sql = 'DELETE FROM `banned_stickers` WHERE `chat_id` = %s and `sticker_id` = %s'
curs.execute(sql, (msg.chat.id, sticker_id))
conn.commit()
return True
else:
return False
def get_creator(chat_obj):
"""
Возвращает объект создателя чата\n
:param msg:\n
"""
creator = bot.get_chat_administrators(chat_obj.id)[0].user
for i in bot.get_chat_administrators(chat_obj.id):
if i.status == 'creator':
creator = i.user
return creator
def register_new_user(user_obj, lang):
"""
Регистрирует нового пользователя\n
:param user_obj:\n
:param lang:\n
"""
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `users` WHERE `user_id` = %s'
curs.execute(sql, (user_obj.id, ))
res = curs.fetchone()
sec_name = 'None'
try:
sec_name = user_obj.second_name
except Exception as e:
sec_name = 'None'
logging.error(e)
if res is None:
sql = 'INSERT INTO `users` (`user_id`, `registration_time`, `first_name`, `second_name`, `settings`) VALUES (%s, %s, %s, %s, %s)'
settings = config.default_user_settings
settings['language'] = lang
curs.execute(sql, (user_obj.id, int(time.time()), user_obj.first_name, sec_name, ujson.dumps(settings)))
conn.commit()
utils.notify_new_user(user_obj, lang)
else:
curr_settings = ujson.loads(get_user_param(user_obj.id, 'settings'))
curr_settings['language'] = lang
change_user_param(user_obj.id, 'settings', ujson.dumps(curr_settings))
def change_user_param(user_id, key, value):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'UPDATE `users` SET `{key}` = %s WHERE `user_id` = %s'.format(key = key)
curs.execute(sql, (value, user_id))
conn.commit()
def get_bot_settings(token):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT `settings` FROM `bot_settings` WHERE `token` = %s'
curs.execute(sql, (token, ))
r = curs.fetchone()
return r['settings']
def change_bot_settings(token, new_settings):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'UPDATE `bot_settings` SET `settings` = %s WHERE `token` = %s'
curs.execute(sql, (new_settings, token))
conn.commit()
def register_new_chat(chat_obj):
"""
Регистрирует новый чат\n
:param msg:\n
"""
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM chats WHERE `chat_id` = %s'
curs.execute(sql, (chat_obj.id, ))
res = curs.fetchone()
if res is None:
creator = get_creator(chat_obj)
sql = 'INSERT INTO `chats` (`chat_id`, `chat_name`, `creator_name`, `creator_id`, `chat_members_count`, `registration_time`, `settings`) VALUES (%s, %s, %s, %s, %s, %s, %s)'
try:
curs.execute(sql, (chat_obj.id, chat_obj.title, creator.first_name, creator.id, bot.get_chat_members_count(chat_obj.id), int(time.time()), ujson.dumps(config.default_group_settings)))
conn.commit()
s = ''
for i in bot.get_chat_administrators(chat_obj.id):
print(s)
s = s + '<a href="tg://user?id={user_id}">{user_name}</a> '.format(user_id = i.user.id, user_name = i.user.first_name)
bot.send_message(
chat_obj.id,
text.group_commands['ru']['registration'].format(admins = s),
parse_mode = 'HTML'
)
except Exception as e:
logging.error('error: {}'.format(e))
logging.error(sql)
utils.notify_new_chat(chat_obj)
t = Thread(target = register_admins, args = (chat_obj.id, ))
t.start()
t.join()
def get_users_count():
"""
Возвращает количество пользователей в базе\n
"""
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT COUNT(`user_id`) FROM `users`'
curs.execute(sql)
res = curs.fetchone()
return res['COUNT(`user_id`)']
def get_unblocked_chats_count():
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT COUNT(`chat_id`) FROM `chats` WHERE `is_blocked` = 1'
curs.execute(sql)
res = curs.fetchone()
return res['COUNT(`chat_id`)']
def get_unblocked_users_count():
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT COUNT(`user_id`) FROM `users` WHERE `is_blocked` = 0'
curs.execute(sql)
res = curs.fetchone()
return res['COUNT(`user_id`)']
def get_chats_count():
"""
Возвращает количество чатов в базе\n
"""
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT COUNT(`chat_id`) FROM `chats`'
curs.execute(sql)
res = curs.fetchone()
return res['COUNT(`chat_id`)']
def get_user_param(user_id, column):
"""
Возвращает определенный параметр пользовательских настроек
:param msg:
:param column:
"""
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `users` WHERE `user_id` = %s'.format(
column = column
)
curs.execute(sql, (user_id, ))
res = curs.fetchone()
try:
return res[column]
except Exception as e:
res = {}
register_new_user(bot.get_chat_member(-1001236256304, user_id).user, 'ru')
change_user_param(user_id, 'settings', ujson.dumps(config.default_user_settings))
res['settings'] = ujson.dumps(config.default_user_settings)
return res[column]
def get_group_params(chat_id):
res = cache_worker.group_info_search_in_cache(chat_id)
if not res['result']:
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `chats` WHERE `chat_id` = %s'
curs.execute(sql, (chat_id, ))
res = curs.fetchone()
try:
ujson.loads(res['settings'])['get_notifications']
cache_worker.group_info_update_cache(chat_id, res['settings'])
return ujson.loads(res['settings'])
except Exception as e:
register_new_chat(bot.get_chat(chat_id))
change_group_params(chat_id, ujson.dumps(config.default_group_settings))
bot.send_message(
chat_id,
text.group_commands['ru']['errors']['db_error']['got_error']
)
bot.send_message(
chat_id,
text.group_commands['ru']['errors']['db_error']['finshed']
)
return ujson.loads(res['settings'])['get_notifications']
else:
return ujson.loads(res['text'])
def change_group_params(chat_id, new_params):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'UPDATE `chats` SET `settings` = %s WHERE `chat_id` = %s'
try:
curs.execute(sql, (new_params, chat_id))
conn.commit()
cache_worker.group_info_update_cache(chat_id, new_params)
except Exception as e:
print(e)
print(sql)
def is_user_new(msg):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM users WHERE `user_id` = %s'
curs.execute(sql, (msg.from_user.id, ))
r = curs.fetchone()
if r is None:
res = True
else:
res = False
return res
def check_sticker(sticker_id, chat_id):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `banned_stickers` WHERE `sticker_id` = %s AND `chat_id` = %s'
curs.execute(sql, (sticker_id, chat_id))
r = curs.fetchone()
if r is None:
return False
else:
return True
def get_warns(user_id, chat_id):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `warns` WHERE `user_id` = %s AND `chat_id` = %s'
curs.execute(sql, (user_id, chat_id))
res = curs.fetchone()
if res is None:
sql = 'INSERT INTO `warns`(`user_id`, `chat_id`, `warns`) VALUES (%s, %s, %s)'
warns = 0
curs.execute(sql, (user_id, chat_id, warns))
conn.commit()
else:
warns = int(res['warns'])
return warns
def new_warn(user_id, chat_id):
with DataConn(db) as conn:
curs = conn.cursor()
warns = get_warns(user_id, chat_id)
warns += 1
set_warns(user_id, chat_id, warns)
def zeroing_warns(user_id, chat_id):
set_warns(user_id, chat_id, 0)
def set_warns(user_id, chat_id, warns):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'UPDATE `warns` SET `warns` = %s WHERE `user_id` = %s AND `chat_id` = %s'
curs.execute(sql, (warns, user_id, chat_id))
conn.commit()
def get_chats():
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `chats` ORDER BY `registration_time` ASC'
curs.execute(sql)
res = curs.fetchall()
return res
def get_users():
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `users` ORDER BY `registration_time` ASC'
curs.execute(sql)
res = curs.fetchall()
return res
def get_all():
all_chats = []
all_chats.extend(get_chats())
all_chats.extend(get_users())
return all_chats
def replacerr(text):
text_list = list(text)
for idx, word in enumerate(text):
if word in config.restricted_characters:
text_list[idx] = config.restricted_characters_replace[word]
return ''.join(text_list)
def escape_string(value):
# value = value.replace('\\', r'\\\\')
# value = value.replace('\0', r'\\0')
# value = value.replace('\n', r'\\n')
# value = value.replace('\r', r'\\r')
# value = value.replace('\032', r'\\Z')
value = value.replace("'", r"\'")
value = value.replace('"', r'\"')
return value
def update_stats_bot(count):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'INSERT INTO `stats` (`amount`, `check_time`) VALUES (%s, %s)'
curs.execute(sql, (count, int(time.time())))
conn.commit()
def delete_pending():
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'DELETE * FROM `stats`'
curs.execute(sql)
conn.commit()
def check_global_ban(user_id):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `global_bans` WHERE `user_id` = %s'
curs.execute(sql, (user_id, ))
res = curs.fetchone()
if res is None:
return False
else:
return True
def global_ban(user_id):
with DataConn(db) as conn:
if not check_global_ban(user_id):
curs = conn.cursor()
sql = 'INSERT INTO `global_bans` (`user_id`) VALUES (%s)'
curs.execute(sql, (user_id, ))
conn.commit()
def global_unban(user_id):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'DELETE FROM `global_bans` WHERE `user_id` = %s'
curs.execute(sql, (user_id, ))
conn.commit()
def new_update(msg, end_time):
user_id = msg.from_user.id
chat_id = msg.chat.id
try:
new_content(msg, end_time)
except Exception as e:
logging.error(e)
try:
update_chat_stats(msg)
except Exception as e:
logging.error(e)
try:
update_user_stats(msg)
except Exception as e:
logging.error(e)
def update_user_stats(msg):
user_id = msg.from_user.id
chat_id = msg.chat.id
chat_name = msg.chat.title
user_name = msg.from_user.first_name
with DataConn(db) as conn:
curs = conn.cursor()
current_updates = get_user_messages_count(user_id, chat_id)
sql = 'SELECT * FROM `most_active_users` WHERE `user_id` = %s AND `chat_id` = %s'
curs.execute(sql, (user_id, chat_id))
res = curs.fetchone()
if res is None:
sql = 'INSERT INTO `most_active_users` (`user_id`, `user_name`, `chat_id`, `chat_name`, `amount`) VALUES (%s, %s, %s, %s, %s)'
curs.execute(sql, (user_id, user_name, chat_id, chat_name, current_updates))
conn.commit()
else:
sql = 'UPDATE `most_active_users` SET `user_name` = %s, `amount` = %s WHERE `user_id` = %s AND `chat_id` = %s'
curs.execute(sql, (user_name, current_updates, user_id, chat_id))
sql = 'UPDATE `most_active_users` SET `chat_name` = %s WHERE `chat_id` = %s'
curs.execute(sql, (chat_name, chat_id))
conn.commit()
def get_user_messages_count(user_id, chat_id):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT `amount` FROM `most_active_users` WHERE `chat_id` = %s AND `user_id` = %s'
curs.execute(sql, (chat_id, user_id))
res = curs.fetchone()
return res['amount']
def update_chat_stats(msg):
with DataConn(db) as conn:
curs = conn.cursor()
current_updates = get_chat_updates_count(msg.chat.id)
sql = 'SELECT * FROM `most_popular_chats` WHERE `chat_id` = %s'
curs.execute(sql, (msg.chat.id, ))
res = curs.fetchone()
if res is None:
sql = 'INSERT INTO `most_popular_chats` (`updates_count`, `chat_id`, `chat_name`, `last_update`) VALUES (%s, %s, %s, %s)'
curs.execute(sql, (current_updates, msg.chat.id, msg.chat.title, msg.date))
try:
conn.commit()
except Exception as e:
logging.error(e)
logging.error(sql)
else:
sql = 'UPDATE `most_popular_chats` SET `updates_count` = %s, `chat_name` = %s, `last_update` = %s WHERE `chat_id` = %s'
curs.execute(sql, (current_updates, msg.chat.title, msg.date, msg.chat.id))
try:
conn.commit()
except Exception as e:
logging.error(e)
logging.error(sql)
def get_chat_updates_count(chat_id):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT `updates_count` FROM `most_popular_chats` WHERE `chat_id` = %s'
curs.execute(sql, (chat_id, ))
res = curs.fetchone()
return int(res['updates_count'])
def get_file_size(msg):
res = 0
if msg.content_type == 'audio':
res = msg.audio.file_size
elif msg.content_type == 'document':
res = msg.document.file_size
elif msg.content_type == 'photo':
res = msg.photo[-1].file_size
elif msg.content_type == 'sticker':
res = msg.sticker.file_size
elif msg.content_type == 'video':
res = msg.audio.file_size
elif msg.content_type == 'video_note':
res = msg.audio.file_size
elif msg.content_type == 'voice':
res = msg.voice.file_size
return res
def get_file_id(msg):
res = ''
if msg.content_type == 'audio':
res = msg.audio.file_id
elif msg.content_type == 'document':
res = msg.document.file_id
elif msg.content_type == 'photo':
res = msg.photo[-1].file_id
elif msg.content_type == 'sticker':
res = msg.sticker.file_id
elif msg.content_type == 'video':
res = msg.audio.file_id
elif msg.content_type == 'video_note':
res = msg.audio.file_id
elif msg.content_type == 'voice':
res = msg.voice.file_idfile_id
return res
def new_message(msg, end_time):
user_id = msg.from_user.id
chat_id = msg.chat.id
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'INSERT INTO `proceeded_messages` (`user_id`, `chat_id`, `msg_time`, `used_time`, `proceeded_at`, `content_type`) VALUES (%s, %s, %s, %s, %s, %s)'
curs.execute(sql, (user_id, chat_id, msg.date, end_time*1000, int(time.time()), msg.content_type))
conn.commit()
def new_content(msg, end_time):
new_message(msg, end_time)
if msg.content_type == 'text':
try:
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'INSERT INTO `text` (`user_id`, `chat_id`, `text`, `msg_date`, `message_id`) VALUES (%s, %s, %s, %s, %s)'
curs.execute(sql, (msg.from_user.id, msg.chat.id, msg.text, msg.date, msg.message_id))
conn.commit()
except Exception as e:
logging.error(e)
logging.error(sql)
else:
try:
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'INSERT INTO `{cont_type}` (`user_id`, `chat_id`, `file_id`, `file_size`) VALUES (%s, %s, %s, %s)'.format(
cont_type = msg.content_type
)
curs.execute(sql, (msg.from_user.id, msg.chat.id, get_file_id(msg), get_file_size(msg)))
conn.commit()
except Exception as e:
logging.error(e)
logging.error(sql)
def get_chat_users(chat_id, limit):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `most_active_users` WHERE `chat_id` = %s ORDER BY `amount` DESC LIMIT {limit}'.format(limit = limit)
curs.execute(sql, (chat_id, ))
r = curs.fetchall()
return r
def get_chat_users_count(chat_id):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT COUNT(`user_id`) FROM `most_active_users` WHERE `chat_id` = %s ORDER BY `amount` DESC'
curs.execute(sql, (chat_id, ))
r = curs.fetchone()
return r['COUNT(`user_id`)']
def new_voteban(chat_id, chat_name, victim_id, victim_name, vote_hash):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'INSERT INTO `votebans`(`vote_hash`, `victim_id`, `victim_name`, `chat_id`, `chat_name`, `votes_count`, `votes_limit`, `started_at`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)'
curs.execute(sql, (vote_hash, victim_id, victim_name, chat_id, chat_name, 0, utils.get_voteban_limit(chat_id), int(time.time())))
conn.commit()
def update_voteban(vote_hash):
with DataConn(db) as conn:
curs = conn.cursor()
curr_votes = get_voteban_votes_count(vote_hash)
utils.set_voteban_votes_count(vote_hash, curr_votes)
if utils.get_voteban_limit():
pass
def get_voteban_votes_count(vote_hash):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT COUNT(`vote_id`) FROM `voteban` WHERE `vote_id` = %s'
curs.execute(sql, (vote_hash, ))
r = curs.fetchone()
return r['COUNT(`vote_id`)']
def set_voteban_votes_count(vote_hash, votes_count):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'UPDATE `votebans SET `votes_count` = %s WHERE `vote_hash` = %s'
curs.execute(sql, (votes_count, vote_hash))
conn.commit()
def get_voteban_info(vote_hash):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'SELECT * FROM `votebans` WHERE `vote_hash` = %s'
curs.execute(sql, (vote_hash, ))
r = curs.fetchone()
return r
def set_voteban_info(column, state, vote_hash):
with DataConn(db) as conn:
curs = conn.cursor()
sql = 'UPDATE `votebans` SET `{column}` = %s WHERE `vote_hash` = %s'.format(column = column)
curs.execute(state, vote_hash)
conn.commit()
def new_chat_invite(chat_id, inviter, invited, joined_at):
with DataConn(db) as conn:
cursor = conn.cursor()
sql = 'INSERT INTO `inviters` (`chat_id`, `inviter`, `invited`, `joined_at`) VALUES (%s, %s, %s, %s)'
cursor.execute(sql, (chat_id, inviter, invited, joined_at))
conn.commit()
def get_top_inviters(chat_id, limit):
with DataConn(db) as conn:
cursor = conn.cursor()
sql = 'SELECT COUNT(`inviter`), `inviter` FROM `inviters` WHERE `chat_id` = %s ORDER BY COUNT(`inviter`) ASC LIMIT %s'
cursor.execute(sql, (chat_id, limit))
return cursor.fetchall()
def add_to_delete_queue(chat_id, message_id, deleted_at):
with DataConn(db) as conn:
cursor = conn.cursor()
sql = 'INSERT INTO `will_be_deleted` (`chat_id`, `message_id`, `ttl`) VALUES (%s, %s, %s)'
cursor.execute(sql, (chat_id, message_id, deleted_at))
conn.commit()
def get_expired_messages():
with DataConn(db) as conn:
cursor = conn.cursor()
sql = 'SELECT * FROM `will_be_deleted` WHERE `ttl` < %s ORDER BY `ttl` ASC'
cursor.execute(sql, (int(datetime.datetime.now().timestamp()), ))
msg_list = cursor.fetchall()
sql = 'DELETE FROM `will_be_deleted` WHERE `ttl` < %s ORDER BY `ttl` ASC'
cursor.execute(sql, (int(datetime.datetime.now().timestamp()), ))
conn.commit()
return msg_list
def add_log_channel(chat_id, channel_id):
settings = get_group_params(chat_id)
settings['logs_channel']['is_on'] = True
settings['logs_channel']['chat_id'] = channel_id
change_group_params(chat_id, ujson.dumps(settings))
try:
with DataConn(db) as conn:
cursor = conn.cursor()
sql = 'INSERT INTO `log_channels` (`chat_id`, `channel_id`) VALUES (%s, %s)'
cursor.execute(sql, (chat_id, channel_id))
conn.commit()
except Exception as e:
print(e)
def remove_log_channel(chat_id):
settings = get_group_params(chat_id)
settings['logs_channel']['is_on'] = False
settings['logs_channel']['chat_id'] = 0
change_group_params(chat_id, ujson.dumps(settings))
try:
with DataConn(db) as conn:
cursor = conn.cursor()
sql = 'DELETE FROM `log_channels` WHERE `chat_id` = %s'
cursor.execute(sql, (chat_id, ))
conn.commit()
except Exception as e:
print(e)
|
cp18_ascynio.py
|
# CP18 Concurrency with asyncio
import threading
import itertools
import time
import sys
class Signal:
go = True
def spin(msg, signal):
write, flush = sys.stdout.write, sys.stdout.flush
for char in itertools.cycle('|/-\\'):
status = char + ' ' + msg
write(status)
flush()
write('\x08' * len(status))
time.sleep(.1)
if not signal.go:
break
write(' ' * len(status) + '\x08' * len(status))
def slow_function():
# pretend waiting a long time for I/O
time.sleep(3)
return 42
def superivsor():
signal = Signal()
spinner = threading.Thread(target=spin, args=('thinking!', signal))
print('spinner object:', spinner)
spinner.start()
result = slow_function()
signal.go = False
spinner.join()
return result
def main():
result = superivsor()
print('Answer: ', result)
if __name__ == '__main__':
main()
|
vectors.py
|
import math
from typing import Tuple, Optional, Union
import random
import webbrowser
import rlbot.utils.structures.game_data_struct as game_data_struct
from utilities.utils import *
VectorArgument = Union[float, game_data_struct.Vector3]
class Vector2:
def __init__(self, x: VectorArgument, y: Optional[float] = None):
self.x: float = 0
self.y: float = 0
if isinstance(x, game_data_struct.Vector3):
self.x = x.x
self.y = x.y
elif y is not None:
self.x = x
self.y = y
else:
raise TypeError("Wrong type(s) given for Vector2.x and/or Vector2.y")
def __add__(self, v: "Vector2") -> "Vector2":
return Vector2(self.x + v.x, self.y + v.y)
def __sub__(self, v: "Vector2") -> "Vector2":
return Vector2(self.x - v.x, self.y - v.y)
def __mul__(self, v: float) -> "Vector2":
return Vector2(self.x * v, self.y * v)
def __truediv__(self, v: float) -> "Vector2":
return Vector2(self.x / v, self.y / v)
def __rmul__(self, v: float) -> "Vector2":
return Vector2(self.x * v, self.y * v)
def __rtruediv__(self, v: float) -> "Vector2":
return Vector2(self.x / v, self.y / v)
def __str__(self) -> str:
return f"({self.x}, {self.y})"
def __repr__(self) -> str:
return self.__str__()
def __eq__(self, other: "Vector2") -> bool:
if isinstance(other, Vector2):
if other.x == self.y and other.y == self.y:
return True
return False
return False
def __neg__(self) -> "Vector2":
return -1 * self
def __getitem__(self, item: int) -> float:
if item == 0:
return self.x
elif item == 1:
return self.y
else:
raise IndexError("Invalid index for accessing Vector2. Must be 0 or 1.")
def __setitem__(self, key: int, value: float):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise IndexError("Invalid index for accessing Vector2. Must be 0 or 1.")
def correction_to(self, ideal):
correction = math.atan2(self.y, -self.x) - math.atan2(ideal.y, -ideal.x) # The in-game axes are left handed, so use -x
return correction if abs(correction) <= math.pi else (correction - sign(correction) * 2 * math.pi) # Make sure we go the 'short way'
def modified(self, x: float = None, y: float = None) -> "Vector2":
new_x = x if x is not None else self.x
new_y = y if y is not None else self.y
return Vector2(new_x, new_y)
@property # Returns the euclidean distance of this vector
def length(self) -> float:
return math.sqrt(self.x**2 + self.y**2)
@property
def size(self) -> float:
return self.length
@property
def as_tuple(self) -> Tuple[float, float]:
return self.x, self.y
def normalize(self):
if self.size == 0: return Vector2(self.x, self.y)
self /= self.size
@property
def normalized(self) -> "Vector2":
# A shorthand to get a normalized (length 1) copy of this vector.
if self.size == 0: return Vector2(self.x, self.y)
return self / self.size
class Vector3:
def __init__(self, x: VectorArgument, y: Optional[float] = None, z: Optional[float] = None):
self.x: float = 0
self.y: float = 0
self.z: float = 0
if isinstance(x, game_data_struct.Vector3):
self.x = x.x
self.y = x.y
self.z = x.z
elif isinstance(x, game_data_struct.Rotator):
self.x = x.roll
self.y = x.pitch
self.z = x.yaw
elif y is not None and z is not None:
self.x = x
self.y = y
self.z = z
else:
raise TypeError("Wrong type(s) given for Vector3.y and/or Vector3.z")
def __add__(self, v) -> "Vector3":
if isinstance(v, Vector2): return Vector3(self.x + v.x, self.y + v.y, self.z)
return Vector3(self.x + v.x, self.y + v.y, self.z + v.z)
def __sub__(self, val):
if isinstance(val, Vector2): return Vector3(self.x - val.x, self.y - val.y, self.z)
return Vector3(self.x - val.x, self.y - val.y, self.z - val.z)
def __mul__(self, v: float) -> "Vector3":
return Vector3(self.x * v, self.y * v, self.z * v)
def __truediv__(self, v: float) -> "Vector3":
return Vector3(self.x / v, self.y / v, self.z / v)
def __rmul__(self, v: float) -> "Vector3":
return Vector3(self.x * v, self.y * v, self.z * v)
def __rtruediv__(self, v: float) -> "Vector3":
return Vector3(self.x / v, self.y / v, self.z / v)
def __str__(self) -> str:
return f"({self.x}, {self.y}, {self.z})"
def __repr__(self) -> str:
return self.__str__()
def __eq__(self, other: "Vector3") -> bool:
if isinstance(other, Vector3):
if other.x == self.y and other.y == self.y and other.z == self.z:
return True
return False
return False
def __neg__(self) -> "Vector3":
return -1 * self
def __getitem__(self, item: int) -> float:
return [self.x, self.y, self.z][item]
def proparty(self) -> "Vector3":
did_you_have_fun_yet = False # Toggle this if this pro party was enough fun.
if did_you_have_fun_yet:
return property(self)
from pathlib import Path
import urllib.request
from threading import Thread
from hashlib import sha1
# If you're reading this, good job. Congrats, you've found it. Move along citizen.
you_fool = []
you_activated_my_trap_card = False
rot13 = str.maketrans('ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz',
'NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm')
try:
with open(Path(__file__).absolute().parent.parent / 'nhqvb'.translate(rot13) / 'obvvat.zc4'.translate(rot13), 'rb') as f:
𝚖𝚞𝚜𝚒𝚌 = f.read()
assert sha1(𝚖𝚞𝚜𝚒𝚌).hexdigest() == 'e053141add7086cf4686af99719069958385c10c'
except:
you_activated_my_trap_card = True # https://youtu.be/LJU3UXDsI2o?t=585
def fun(selfie):
nonlocal did_you_have_fun_yet
if did_you_have_fun_yet:
return self(selfie)
import 𝚒𝚗𝚜𝚙𝚎𝚌𝚝, 𝚠𝚒𝚗𝚜𝚘𝚞𝚗𝚍
from rlbot.agents.base_agent import BaseAgent
frames = inspect.getouterframes(inspect.currentframe())
for outer in frames:
agent = outer.frame.f_locals.get('self', None)
if not isinstance(agent, BaseAgent): continue
def get_state(p):
nonlocal jmp, you_fool
j = p.game_cars[agent.index].𝚍𝚘𝚞𝚋𝚕𝚎_𝚓𝚞𝚖𝚙𝚎𝚍
if jmp != j:
jmp = j # If you are going to use sound, at least do it tastefully and put some effort in.
if jmp:
if you_activated_my_trap_card:
if you_fool:
def trap():
fool = you_fool.pop(0)
you_fool.append(fool)
if fool:
𝚠𝚒𝚗𝚜𝚘𝚞𝚗𝚍.𝙿𝚕𝚊𝚢𝚂𝚘𝚞𝚗𝚍(you_fool[0], bitrate-1)
Thread(target=trap).start()
else:
you_fool.append(b'')
def scheming():
for popcorn in ['02', '1', '02', '3', '4']:
delicious = 'uggcf://tvguho.pbz/QbzAbzAbz/Nanepul/oybo/fcbbxl/nanepul/nhqvb/unyybjrra_{}.zc4?enj=gehr'.format(popcorn)
you_fool.append(urllib.request.urlopen(delicious.translate(rot13)).read())
del you_fool[you_fool.index(b'')]
Thread(target=scheming).start()
else:
𝚠𝚒𝚗𝚜𝚘𝚞𝚗𝚍.𝙿𝚕𝚊𝚢𝚂𝚘𝚞𝚗𝚍(𝚖𝚞𝚜𝚒𝚌, winsound.SND_MEMORY) #buffer + bitrate*len(𝚖𝚞𝚜𝚒𝚌))
return orig(p)
agent.get_output, orig, jmp, bitrate, buffer = get_state, agent.get_output, False, 5, 10453
did_you_have_fun_yet = True # no performance concern :)
break
return self(selfie)
return property(fun)
def flatten(self) -> Vector2:
return Vector2(self.x, self.y)
@proparty # Returns the euclidean distance of this vector
def length(self) -> float:
return math.sqrt(self.x**2 + self.y**2 + self.z**2)
@property
def size(self) -> float:
return self.length
def dot(self, v: "Vector3"):
return self.x * v.x + self.y * v.y + self.z * v.z
def normalize(self):
if self.size == 0: return Vector3(self.x, self.y, self.z)
self /= self.size
@property
def normalized(self) -> "Vector3":
# A shorthand to get a normalized (length 1) copy of this vector.
if self.size == 0: return Vector3(self.x, self.y, self.z)
return self / self.size
def modified(self, x: float = None, y: float = None, z: float = None) -> "Vector3":
new_x: float = x if x is not None else self.x
new_y: float = y if y is not None else self.y
new_z: float = z if z is not None else self.z
return Vector3(new_x, new_y, new_z)
def angle_between(self, other: "Vector3") -> float:
if self.size == 0 or other.size == 0: return 0
d: float = Vector3.dot(self, other)
magnitude_product: float = self.length * other.length
div = d / magnitude_product
div = clamp(div, -1, 1) # To prevent floating point issues where the value of div is something like 1.0000000000000002
angle: float = math.acos(div)
return angle
class life(int):
math = False
love = life()
assert love <3
|
resultserver.py
|
# Copyright (C) 2010-2013 Claudio Guarnieri.
# Copyright (C) 2014-2016 Cuckoo Foundation.
# Copyright (C) 2020-2021 PowerLZY.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
from __future__ import print_function
import errno
import json
import os
import socket
import select
import logging
import datetime
import SocketServer
import threading
from lib.cuckoo.common.abstracts import ProtocolHandler
from lib.cuckoo.common.config import Config
from lib.cuckoo.common.constants import CUCKOO_ROOT
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.exceptions import CuckooCriticalError
from lib.cuckoo.common.exceptions import CuckooResultError
from lib.cuckoo.common.netlog import BsonParser
from lib.cuckoo.common.utils import create_folder, Singleton
log = logging.getLogger(__name__)
BUFSIZE = 16 * 1024
class Disconnect(Exception):
pass
class ResultServer(SocketServer.ThreadingTCPServer, object):
"""Result server. Singleton!
This class handles results coming back from the analysis machines.
"""
__metaclass__ = Singleton
allow_reuse_address = True
daemon_threads = True
def __init__(self, *args, **kwargs):
self.cfg = Config()
self.analysistasks = {}
self.analysishandlers = {}
ip = self.cfg.resultserver.ip
self.port = int(self.cfg.resultserver.port)
while True:
try:
server_addr = ip, self.port
SocketServer.ThreadingTCPServer.__init__(
self, server_addr, ResultHandler, *args, **kwargs
)
except Exception as e:
if e.errno == errno.EADDRINUSE:
if self.cfg.resultserver.get("force_port"):
raise CuckooCriticalError(
"Cannot bind ResultServer on port %d, "
"bailing." % self.port
)
else:
log.warning("Cannot bind ResultServer on port %s, "
"trying another port.", self.port)
self.port += 1
elif e.errno == errno.EADDRNOTAVAIL:
raise CuckooCriticalError(
"Unable to bind ResultServer on %s:%s %s. This "
"usually happens when you start Cuckoo without "
"bringing up the virtual interface associated with "
"the ResultServer IP address. Please refer to "
"http://docs.cuckoosandbox.org/en/latest/faq/#troubles-problem"
" for more information." % (ip, self.port, e)
)
else:
raise CuckooCriticalError(
"Unable to bind ResultServer on %s:%s: %s" %
(ip, self.port, e)
)
else:
log.debug("ResultServer running on %s:%s.", ip, self.port)
self.servethread = threading.Thread(target=self.serve_forever)
self.servethread.setDaemon(True)
self.servethread.start()
break
def add_task(self, task, machine):
"""Register a task/machine with the ResultServer."""
self.analysistasks[machine.ip] = task, machine
self.analysishandlers[task.id] = []
def del_task(self, task, machine):
"""Delete ResultServer state and wait for pending RequestHandlers."""
x = self.analysistasks.pop(machine.ip, None)
if not x:
log.warning("ResultServer did not have %s in its task info.",
machine.ip)
handlers = self.analysishandlers.pop(task.id, None)
for h in handlers:
h.end_request.set()
h.done_event.wait()
def register_handler(self, handler):
"""Register a RequestHandler so that we can later wait for it."""
task, machine = self.get_ctx_for_ip(handler.client_address[0])
if not task or not machine:
return False
self.analysishandlers[task.id].append(handler)
def get_ctx_for_ip(self, ip):
"""Return state for this IP's task."""
x = self.analysistasks.get(ip)
if not x:
log.critical("ResultServer unable to map ip to context: %s.", ip)
return None, None
return x
def build_storage_path(self, ip):
"""Initialize analysis storage folder."""
task, machine = self.get_ctx_for_ip(ip)
if not task or not machine:
return
return os.path.join(CUCKOO_ROOT, "storage", "analyses", str(task.id))
class ResultHandler(SocketServer.BaseRequestHandler):
"""Result handler.
This handler speaks our analysis log network protocol.
"""
def setup(self):
self.rawlogfd = None
self.protocol = None
self.startbuf = ""
self.end_request = threading.Event()
self.done_event = threading.Event()
self.pid, self.ppid, self.procname = None, None, None
self.server.register_handler(self)
if hasattr(select, "poll"):
self.poll = select.poll()
self.poll.register(self.request, select.POLLIN)
else:
self.poll = None
def finish(self):
self.done_event.set()
if self.protocol:
self.protocol.close()
if self.rawlogfd:
self.rawlogfd.close()
def wait_sock_or_end(self):
while True:
if self.end_request.isSet():
return False
if self.poll:
if self.poll.poll(1000):
return True
else:
rs, _, _ = select.select([self.request], [], [], 1)
if rs:
return True
def seek(self, pos):
pass
def read(self, length):
buf = ""
while len(buf) < length:
if not self.wait_sock_or_end():
raise Disconnect()
tmp = self.request.recv(length-len(buf))
if not tmp:
raise Disconnect()
buf += tmp
if isinstance(self.protocol, BsonParser):
if self.rawlogfd:
self.rawlogfd.write(buf)
else:
self.startbuf += buf
return buf
def read_any(self):
if not self.wait_sock_or_end():
raise Disconnect()
tmp = self.request.recv(BUFSIZE)
if not tmp:
raise Disconnect()
return tmp
def read_newline(self, strip=False):
buf = ""
while "\n" not in buf:
buf += self.read(1)
if strip:
buf = buf.strip()
return buf
def negotiate_protocol(self):
protocol = self.read_newline(strip=True)
# Command with version number.
if " " in protocol:
command, version = protocol.split()
version = int(version)
else:
command, version = protocol, None
if command == "BSON":
self.protocol = BsonParser(self, version)
elif command == "FILE":
self.protocol = FileUpload(self, version)
elif command == "LOG":
self.protocol = LogHandler(self, version)
else:
raise CuckooOperationalError(
"Netlog failure, unknown protocol requested."
)
self.protocol.init()
def handle(self):
ip, port = self.client_address
self.connect_time = datetime.datetime.now()
self.storagepath = self.server.build_storage_path(ip)
if not self.storagepath:
return
# Create all missing folders for this analysis.
self.create_folders() # folders = "shots", "files", "logs", "buffer"
try:
# Initialize the protocol handler class for this connection.
self.negotiate_protocol()
for event in self.protocol:
if isinstance(self.protocol, BsonParser) and event["type"] == "process":
self.open_process_log(event)
except CuckooResultError as e:
log.warning(
"ResultServer connection stopping because of "
"CuckooResultError: %s.", e
)
except (Disconnect, socket.error):
pass
except:
log.exception("FIXME - exception in resultserver connection %s",
self.client_address)
def open_process_log(self, event):
pid = event["pid"]
ppid = event["ppid"]
procname = event["process_name"]
if self.pid is not None:
log.debug(
"ResultServer got a new process message but already "
"has pid %d ppid %s procname %s.", pid, ppid, procname
)
raise CuckooResultError(
"ResultServer connection state inconsistent."
)
# Only report this process when we're tracking it.
if event["track"]:
log.debug(
"New process (pid=%s, ppid=%s, name=%s)",
pid, ppid, procname
)
filepath = os.path.join(self.storagepath, "logs", "%s.bson" % pid)
self.rawlogfd = open(filepath, "wb")
self.rawlogfd.write(self.startbuf)
self.pid, self.ppid, self.procname = pid, ppid, procname
def create_folders(self):
folders = "shots", "files", "logs", "buffer"
for folder in folders:
try:
create_folder(self.storagepath, folder=folder)
except CuckooOperationalError:
log.error("Unable to create folder %s" % folder)
return False
class FileUpload(ProtocolHandler):
RESTRICTED_DIRECTORIES = "reports/",
lock = threading.Lock()
def init(self):
self.upload_max_size = \
self.handler.server.cfg.resultserver.upload_max_size
self.storagepath = self.handler.storagepath
self.fd = None
self.filelog = os.path.join(self.handler.storagepath, "files.json")
def __iter__(self):
# Read until newline for file path, e.g.,
# shots/0001.jpg or files/9498687557/libcurl-4.dll.bin
dump_path = self.handler.read_newline(strip=True).replace("\\", "/")
if self.version >= 2:
filepath = self.handler.read_newline(strip=True)
pids = map(int, self.handler.read_newline(strip=True).split())
else:
filepath, pids = None, []
log.debug("File upload request for %s", dump_path)
dir_part, filename = os.path.split(dump_path)
if "./" in dump_path or not dir_part or dump_path.startswith("/"):
raise CuckooOperationalError(
"FileUpload failure, banned path: %s" % dump_path
)
for restricted in self.RESTRICTED_DIRECTORIES:
if restricted in dir_part:
raise CuckooOperationalError(
"FileUpload failure, banned path."
)
try:
create_folder(self.storagepath, dir_part)
except CuckooOperationalError:
log.error("Unable to create folder %s", dir_part)
return
file_path = os.path.join(self.storagepath, dump_path.strip())
if not file_path.startswith(self.storagepath):
raise CuckooOperationalError(
"FileUpload failure, path sanitization failed."
)
if os.path.exists(file_path):
log.warning(
"Analyzer tried to overwrite an existing file, "
"closing connection."
)
return
self.fd = open(file_path, "wb")
chunk = self.handler.read_any()
while chunk:
self.fd.write(chunk)
if self.fd.tell() >= self.upload_max_size:
log.warning(
"Uploaded file length larger than upload_max_size, "
"stopping upload."
)
self.fd.write("... (truncated)")
break
try:
chunk = self.handler.read_any()
except:
break
self.lock.acquire()
with open(self.filelog, "a+b") as f:
f.write("%s\n" % json.dumps({
"path": dump_path,
"filepath": filepath,
"pids": pids,
}))
self.lock.release()
log.debug("Uploaded file length: %s", self.fd.tell())
return
yield
def close(self):
if self.fd:
self.fd.close()
class LogHandler(ProtocolHandler):
"""
"""
def init(self):
self.logpath = os.path.join(self.handler.storagepath, "analysis.log")
self.fd = self._open()
log.debug("LogHandler for live analysis.log initialized.")
def __iter__(self):
if not self.fd:
return
while True:
try:
buf = self.handler.read_newline(strip=False)
except Disconnect:
break
if not buf:
break
self.fd.write(buf)
self.fd.flush()
return
yield
def close(self):
if self.fd:
self.fd.close()
def _open(self):
if not os.path.exists(self.logpath):
return open(self.logpath, "wb")
log.debug("Log analysis.log already existing, appending data.")
fd = open(self.logpath, "ab")
# add a fake log entry, saying this had to be re-opened
# use the same format as the default logger, in case anyone wants to parse this
# 2015-02-23 12:05:05,092 [lib.api.process] DEBUG: Using QueueUserAPC injection.
now = datetime.datetime.now()
print >>fd, "\n%s,%03.0f [lib.core.resultserver] WARNING: This log file was re-opened, log entries will be appended." % (
now.strftime("%Y-%m-%d %H:%M:%S"), now.microsecond / 1000.0
)
return fd
|
HammerKlavier.py
|
"""
HammerKlavier.py -- Hammer a server by using multiple threads or processes to
submit many simultaneous requests.
(HammerKlavier means 'hammer keyboard')
"""
from processing import Process
from .pdict import PersistentDict
def multiProcessTest(n, funcs):
"""Fork N processes and run a testing function in each."""
if type(funcs) != list:
funcs = [funcs] * n
procs = []
for f, args in funcs:
procs.append(Process(target=f, args=args))
for p in procs:
p.start()
for p in procs:
p.join()
def testPDict(dictName, keyRoot):
from random import randint
from time import clock
abc = keyRoot + 'abcdefghijklmnopqrstuvwxyz'
val1 = abc * 23
val2 = abc * 20
db = PersistentDict(dictName)
print(('start len db = ', len(db)))
t0 = clock()
for i in range(10000):
key = abc+'%4.4d' % i
# print key
db[key] = val1
print(('inserts', clock() - t0))
t0 = clock()
for i in range(10000):
key = abc+'%4.4d' % i
# print key
tmp = db[key]
db[key] = val2
tmp = db[key]
print(('newvalues', clock() - t0))
t0 = clock()
for i in range(10000):
j = randint(0, 10000)
key = abc+'%4.4d' % j
try:
tmp = db[key]
# print key
del db[key]
# db.sync()
except:
pass
print(('deletes', clock() - t0))
print(('end len db = ', len(db)))
db.close()
def main():
multiProcessTest(4, [(testPDict, ['EventStore', 'p1']), (testPDict, ['EventStore', 'p2']),
(testPDict, ['EventStore', 'p3']), (testPDict, ['EventStore', 'p4'])])
if __name__ == '__main__':
main()
|
train_rl_object.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import h5py
import time
import argparse
import numpy as np
import os, sys, json
import os.path as osp
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import _init_paths
from nav.loaders.nav_reinforce_loader import NavReinforceDataset
from nav.models.navigator import Navigator
from nav.reinforce.eval_process import eval
from nav.reinforce.train_process import train
from nav.reinforce.imitation_process import imitation
def main(args):
mp.set_start_method('forkserver', force=True)
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
# set up shared_model
checkpoint_path = osp.join(args.checkpoint_dir, '%s.pth' % args.start_from)
checkpoint = torch.load(checkpoint_path)
shared_nav_model = Navigator(checkpoint['opt'])
shared_nav_model.load_state_dict(checkpoint['model_state'])
shared_nav_model.cpu()
shared_nav_model.share_memory()
print('shared_nav_model set up.')
# some arguments need to be copied from start_from
args.use_action = checkpoint['opt']['use_action']
args.nav_types = ['object']
# processes
processes = []
counter = mp.Value('i', 0)
lock = mp.Lock()
# train
for rank in range(args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_nav_model, counter, lock))
p.start()
processes.append(p)
# imitation
p = mp.Process(target=imitation, args=(args.num_processes, args, shared_nav_model, counter))
p.start()
processes.append(p)
# eval
p = mp.Process(target=eval, args=(args.num_processes+1, args, shared_nav_model, counter, 'val'))
p.start()
processes.append(p)
# join
for p in processes:
p.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Data input settings
parser.add_argument('--data_json', type=str, default='cache/prepro/reinforce/data.json')
parser.add_argument('--data_h5', type=str, default='cache/prepro/reinforce/data.h5')
parser.add_argument('--imitation_data_json', type=str, default='cache/prepro/imitation/data.json')
parser.add_argument('--imitation_data_h5', type=str, default='cache/prepro/imitation/data.h5')
parser.add_argument('--path_feats_dir', type=str, default='cache/path_feats')
parser.add_argument('--path_images_dir', type=str, default='cache/path_images')
parser.add_argument('--target_obj_conn_map_dir', type=str, default='data/target-obj-conn-maps')
parser.add_argument('--pretrained_cnn_path', type=str, default='cache/hybrid_cnn.pt')
parser.add_argument('--house_meta_dir', type=str, default='pyutils/House3D/House3D/metadata')
parser.add_argument('--house_data_dir', type=str, default='data/SUNCGdata/house')
parser.add_argument('--checkpoint_dir', type=str, default='output/nav_object')
parser.add_argument('--seed', type=int, default=24)
# multiprocess settings
parser.add_argument('--num_processes', type=int, default=12)
# log settings
parser.add_argument('--max_epochs', type=int, default=500)
parser.add_argument('--num_iters_per_epoch', type=int, default=1000)
parser.add_argument('--tb_dir', type=str, default='log_dir/nav_object')
parser.add_argument('--log_dir', type=str, default='log_dir/nav_object')
# Navigator settings
parser.add_argument('--shortest_path_ratio', type=float, default=1.0)
parser.add_argument('--max_episode_length', type=int, default=80)
parser.add_argument('--max_threads_per_gpu', type=int, default=1)
parser.add_argument('--mult_increasing_per_iters', type=int, default=2500)
parser.add_argument('--max_seq_length', type=int, default=50, help='max_seq_length')
parser.add_argument('--rnn_type', type=str, default='lstm')
parser.add_argument('--rnn_size', type=int, default=256)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--rnn_dropout', type=float, default=0.1)
parser.add_argument('--fc_dropout', type=float, default=0.0)
parser.add_argument('--seq_dropout', type=float, default=0.0)
parser.add_argument('--fc_dim', type=int, default=64)
parser.add_argument('--act_dim', type=int, default=64)
# Output settings
parser.add_argument('--start_from', type=str, default='im0')
parser.add_argument('--id', type=str, default='rl0')
# Optimizer
parser.add_argument('--batch_size', type=int, default=20, help='batch size in number of questions per batch')
parser.add_argument('--grad_clip', type=float, default=0.1, help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=1e-5, help='learning rate')
parser.add_argument('--lr_decay', type=int, default=1, help='if decay learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=8000, help='at what iters to start decaying learning rate')
parser.add_argument('--learning_rate_decay_every', type=int, default=8000, help='every how many iters thereafter to drop LR by half')
parser.add_argument('--im_learning_rate_decay_start', type=int, default=8000, help='learning rate decay start on Imitation')
parser.add_argument('--im_learning_rate_decay_every', type=int, default=8000, help='learning rate decay every on Imitation')
parser.add_argument('--optim_alpha', type=float, default=0.8, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-8, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=1e-3, help='weight decay for l2 regularization')
args = parser.parse_args()
# update log_dir and tb_dir
args.log_dir = osp.join(args.log_dir, args.id)
args.tb_dir = osp.join(args.tb_dir, args.id)
if not osp.exists(args.log_dir): os.makedirs(args.log_dir)
if not osp.exists(args.tb_dir): os.makedirs(args.tb_dir)
# main
main(args)
|
train.py
|
import os
import neat
import gym, ppaquette_gym_super_mario
import pickle
import multiprocessing as mp
import visualize
gym.logger.set_level(40)
class Train:
def __init__(self, generations, parallel=2, level="1-1"):
self.actions = [
[0, 0, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 1],
]
self.generations = generations
self.lock = mp.Lock()
self.par = parallel
self.level = level
def _get_actions(self, a):
return self.actions[a.index(max(a))]
def _fitness_func_no_parallel(self, genomes, config):
env = gym.make('ppaquette/SuperMarioBros-'+self.level+'-Tiles-v0')
env.action_space
idx, genomes = zip(*genomes)
for genome in genomes:
try:
state = env.reset()
net = neat.nn.FeedForwardNetwork.create(genome, config)
done = False
i = 0
old = 40
while not done:
state = state.flatten()
output = net.activate(state)
output = self._get_actions(output)
s, reward, done, info = env.step(output)
state = s
i += 1
if i % 50 == 0:
if old == info['distance']:
break
else:
old = info['distance']
# [print(str(i) + " : " + str(info[i]), end=" ") for i in info.keys()]
# print("\n******************************")
fitness = -1 if info['distance'] <= 40 else info['distance']
genome.fitness = fitness
env.close()
except KeyboardInterrupt:
env.close()
exit()
def _fitness_func(self, genome, config, o):
env = gym.make('ppaquette/SuperMarioBros-1-1-Tiles-v0')
# env.configure(lock=self.lock)
try:
state = env.reset()
net = neat.nn.FeedForwardNetwork.create(genome, config)
done = False
i = 0
old = 40
while not done:
state = state.flatten()
output = net.activate(state)
output = self._get_actions(output)
s, reward, done, info = env.step(output)
state = s
i += 1
if i % 50 == 0:
if old == info['distance']:
break
else:
old = info['distance']
# [print(str(i) + " : " + str(info[i]), end=" ") for i in info.keys()]
# print("\n******************************")
fitness = -1 if info['distance'] <= 40 else info['distance']
if fitness >= 3252:
pickle.dump(genome, open("finisher.pkl", "wb"))
env.close()
print("Done")
exit()
o.put(fitness)
env.close()
except KeyboardInterrupt:
env.close()
exit()
def _eval_genomes(self, genomes, config):
idx, genomes = zip(*genomes)
for i in range(0, len(genomes), self.par):
output = mp.Queue()
processes = [mp.Process(target=self._fitness_func, args=(genome, config, output)) for genome in
genomes[i:i + self.par]]
[p.start() for p in processes]
[p.join() for p in processes]
results = [output.get() for p in processes]
for n, r in enumerate(results):
genomes[i + n].fitness = r
def _run(self, config_file, n):
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
config_file)
p = neat.Population(config)
p.add_reporter(neat.StdOutReporter(True))
p.add_reporter(neat.Checkpointer(5))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
print("loaded checkpoint...")
winner = p.run(self._eval_genomes, n)
win = p.best_genome
pickle.dump(winner, open('winner.pkl', 'wb'))
pickle.dump(win, open('real_winner.pkl', 'wb'))
visualize.draw_net(config, winner, True)
visualize.plot_stats(stats, ylog=False, view=True)
visualize.plot_species(stats, view=True)
def main(self, config_file='config'):
local_dir = os.path.dirname(__file__)
config_path = os.path.join(local_dir, config_file)
self._run(config_path, self.generations)
if __name__ == "__main__":
t = Train(1000)
t.main()
|
getting_current_thread.py
|
import threading
def thread_target():
print("Current Thread: {}".format(threading.current_thread()))
threads = []
for i in range(10):
thread = threading.Thread(target=thread_target)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://omotenashicoin.site'
libraries_URL = 'https://github.com/omotenashicoin-project/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "MTNS Testnet" if constants.net.TESTNET else "Omotenashicoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
billingApi.py
|
# -- coding:utf-8 --
'''
Created on 2017. 3. 30.
@author: sanghyun
'''
import json
import os
import threading
import zipfile
import time
import datetime
import xlsxwriter
from flask import Blueprint, request
from flask.globals import current_app, session
from werkzeug import secure_filename
from routes.api.systemMngApi import postBatchMng, putBatchMng
from util.common import API_SERVER_BACKOFFICE, paramEscape, putApiData, postApiData, getApiData, getApiSingleData, getData, allower_file, UPLOAD_FOLDER, \
getParameter, setStringToNumber, EXCEL_FILE_DOWNLOAD_COUNT, parseDate, \
EXCEL_FILE_MAKE_LIMT_COUNT, setUnicodeEncodeTypeToEucKr, setUnicodeFormatToEucKr
from types import NoneType
billingApi = Blueprint('billingApi', __name__)
@billingApi.route('/api/billing/fileUpload' , methods=['POST'])
def fileUpload():
print os.path.join(os.getcwd(), UPLOAD_FOLDER, "test")
print request.files
if 'qqfile' not in request.files:
print 'filenotFound'
return json.dumps({
"success" : "false",
"error" : "fileNotFound"
})
file = request.files['qqfile']
if file.filename == '':
print 'No selected file'
return 'fileNotFound'
else :
filename = secure_filename(file.filename)
file.save(os.path.join(os.getcwd(), UPLOAD_FOLDER, filename))
return json.dumps({
'success' : "true"
})
@billingApi.route('/api/billing/billingHistory' , methods=['GET'])
def billingHistory():
form_data = json.loads(request.args.get("formData"))
searchDate = getParameter(form_data , "searchDate").split(' - ')
queryData = {
'startDate': paramEscape(searchDate[0]),
'endDate': paramEscape(searchDate[1]),
'serviceType': getParameter(form_data,"serviceType"),
'dateType': getParameter(form_data,"dateType"),
'merchantId': getParameter(form_data,"merchantId"),
'serviceId': getParameter(form_data,"serviceId"),
'billingDuration': getParameter(form_data,"bilcType"),
'adjustType': getParameter(form_data,"adjustType"),
'offset': setStringToNumber(request.args.get("start")),
'limit': setStringToNumber(request.args.get("length")),
'excelAllFlag' : '0'
}
result_data = getApiData("/billings" ,queryData)
return json.dumps(result_data)
@billingApi.route('/api/billing/billingDetail', methods=['GET'])
def billingDetail():
url = "/billing?seq=" + request.args.get("seq")
result_data = getApiSingleData(url, {})
return json.dumps(result_data)
@billingApi.route('/api/billing/billings' , methods=['GET'])
def billings():
form_data = json.loads(request.args.get("formData"))
searchDate = getParameter(form_data , "searchDate").split(' - ')
queryData = {
'merchantId': getParameter(form_data, "merchantId"),
'serviceId': getParameter(form_data, "serviceId"),
'serviceType': getParameter(form_data, "serviceType"),
'dateType': getParameter(form_data, "dateType"),
'startDate': paramEscape(searchDate[0]),
'endDate': paramEscape(searchDate[1]),
'billingType': getParameter(form_data, "billingType"),
'status': getParameter(form_data, "status"),
'offset': setStringToNumber(request.args.get("start")),
'limit': setStringToNumber(request.args.get("length")),
'excelAllFlag' : '0'
}
result_data = getApiData("/regbillings", queryData)
return json.dumps(result_data)
@billingApi.route('/api/billing/billingsDetail' , methods=['GET'])
def billingsDetail():
url = "/regbilling?seq=" + request.args.get("seq")
result_data = getApiSingleData(url, {})
return json.dumps(result_data)
@billingApi.route('/api/billing/billingHistory/excelAll', methods=['GET'])
def excelAll():
searchDate = getParameter({} , "searchDate").split(' - ')
queryData = {
'startDate': paramEscape(searchDate[0]),
'endDate': paramEscape(searchDate[1]),
'serviceType': getParameter({}, "serviceType"),
'dateType': getParameter({}, "dateType"),
'merchantId': getParameter({}, "merchantId"),
'serviceId': getParameter({}, "serviceId"),
'billingDuration': getParameter({}, "bilcType"),
'empId' : session['empId'],
'offset': setStringToNumber(request.args.get("start")),
'limit': setStringToNumber(request.args.get("length")),
'excelAllFlag': '1'
}
rootPath = current_app.root_path
t1 = threading.Thread(target=makeBillingExcelFile,args=[queryData,rootPath])
t1.daemon = True
t1.start()
return "엑셀 작업요청"
def makeBillingExcelFile(queryData,rootPath):
excelZip = None
jobStatus = 0
batchId = None
try:
fileCnt = 1
makeTime = str(int(round(time.time()*1000)))
uploads = os.path.join(rootPath, "fileDownload" , "excel" , makeTime)
if not os.path.isdir(uploads):
os.makedirs(uploads)
zipFileName = u'정산명세서_'+ datetime.datetime.now().strftime('%Y%m%d') +'.zip'
#작업 시작 메시지 추가
batchId = postBatchMng({
"reqId" : queryData['empId'],
"status" : "BAT-0001" , # 진행중
"filePath" : os.path.join(uploads ,zipFileName),
"content" : "정산명세서 엑셀 배치작업",
"errMsg" : ""
})["data"]["batchId"]
fileName = '정산명세서_' + datetime.datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format({'align':'right'})
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
title_format = workbook.add_format({'align':'center', 'valign':'vcenter', 'bold':True, 'border':1,'fg_color':'#A9D0F5'})
string_format = workbook.add_format({'align':'center', 'valign':'vcenter'})
row = 1
worksheet.write(0, 0, "순번", title_format)
worksheet.write(0, 1, "거래처명", title_format)
worksheet.write(0, 2, "서비스명", title_format)
worksheet.write(0, 3, "정산기간", title_format)
worksheet.write(0, 4, "정산지급일", title_format)
worksheet.write(0, 5, "거래구분", title_format)
worksheet.write(0, 6, "거래금액", title_format)
worksheet.write(0, 7, "거래 취소 금액", title_format)
worksheet.write(0, 8, "총 거래금액", title_format)
worksheet.write(0, 9, "결제금액", title_format)
worksheet.write(0, 10, "할인금액", title_format)
worksheet.write(0, 11, "정산 타입", title_format)
worksheet.write(0, 12, "정산주기", title_format)
worksheet.write(0, 13, "거래건수", title_format)
worksheet.write(0, 14, "정산 수수료", title_format)
worksheet.write(0, 15, "수수료 타입", title_format)
worksheet.write(0, 16, "부가세 타입", title_format)
worksheet.write(0, 17, "수수료", title_format)
worksheet.write(0, 18, "부가세", title_format)
worksheet.write(0, 19, "수수료 합계", title_format)
worksheet.write(0, 20, "정산금액", title_format)
worksheet.write(0, 21, "은행", title_format)
worksheet.write(0, 22, "예금주", title_format)
worksheet.write(0, 23, "계좌번호", title_format)
while True :
searchData = getData("/billings" ,queryData)
for data in searchData["resultList"]:
row += 1
if(data["merchantCommision"] is not None) :
merchantComission = data["merchantCommision"]+"%" if data["merchantCommType"] =="FEE-0001" else data["merchantCommision"]+"원" if data["merchantCommType"]=="FEE-0002" else "-"
else :
merchantComission = "-"
worksheet.write(row, 0, (row-1), string_format)
worksheet.write(row, 1, data["submerchantName"], string_format)
worksheet.write(row, 2, data["serviceName"], string_format)
worksheet.write(row, 3, data["approvalDtMin"]+" ~ "+data["approvalDtMax"], string_format)
worksheet.write(row, 4, data["billingDt"], string_format)
worksheet.write(row, 5, data["typeName"], string_format)
worksheet.write(row, 6, data["amount"],money_format)
worksheet.write(row, 7, data["cancelAmount"],money_format)
worksheet.write(row, 8, data["totalAmount"],money_format)
worksheet.write(row, 9, data["payAmount"],money_format)
worksheet.write(row, 10, data["dcAmount"],money_format)
worksheet.write(row, 11, data["billingCommType"], string_format)
worksheet.write(row, 12, data["billingDuration"], string_format)
worksheet.write(row, 13, data["cnt"], string_format)
worksheet.write(row, 14, merchantComission, string_format)
worksheet.write(row, 15, data["commTypeName"], string_format)
worksheet.write(row, 16, data["taxTypeName"], string_format)
worksheet.write(row, 17, data["commision"],money_format)
worksheet.write(row, 18, data["tax"],money_format)
worksheet.write(row, 19, data["commTotal"],money_format)
worksheet.write(row, 20, data["billingAmount"],money_format)
worksheet.write(row, 21, data["bankNm"], string_format)
worksheet.write(row, 22, data["bankHolder"], string_format)
worksheet.write(row, 23, data["bankAccNo"], string_format)
if row >= EXCEL_FILE_MAKE_LIMT_COUNT :
row = 2
fileCnt += 1
fileName = '거래내역_결제_' + datetime.datetime.now().strftime('%Y%m%d') + '_' + str(fileCnt) + '.xlsx'
# 디비 조회건수 * 2 row 생성시 파일 재생성
workbook.close()
workbook = xlsxwriter.Workbook(os.path.join(uploads ,setUnicodeEncodeTypeToEucKr(fileName)))
worksheet = workbook.add_worksheet()
money_format = workbook.add_format()
money_format.set_num_format('_- #,##0_-;[red]- #,##0_-;_- "-"_-;_-@_-')
worksheet.write(0, 0, "순번", title_format)
worksheet.write(0, 1, "거래처명", title_format)
worksheet.write(0, 2, "서비스명", title_format)
worksheet.write(0, 3, "정산기간", title_format)
worksheet.write(0, 4, "정산지급일", title_format)
worksheet.write(0, 5, "거래구분", title_format)
worksheet.write(0, 6, "거래금액", title_format)
worksheet.write(0, 7, "거래 취소 금액", title_format)
worksheet.write(0, 8, "총 거래금액", title_format)
worksheet.write(0, 9, "결제금액", title_format)
worksheet.write(0, 10, "할인금액", title_format)
worksheet.write(0, 11, "정산 타입", title_format)
worksheet.write(0, 12, "정산주기", title_format)
worksheet.write(0, 13, "거래건수", title_format)
worksheet.write(0, 14, "정산 수수료", title_format)
worksheet.write(0, 15, "수수료 타입", title_format)
worksheet.write(0, 16, "부가세 타입", title_format)
worksheet.write(0, 17, "수수료", title_format)
worksheet.write(0, 18, "부가세", title_format)
worksheet.write(0, 19, "수수료 합계", title_format)
worksheet.write(0, 20, "정산금액", title_format)
worksheet.write(0, 21, "은행", title_format)
worksheet.write(0, 22, "예금주", title_format)
worksheet.write(0, 23, "계좌번호", title_format)
queryData["offset"] = queryData["offset"] + EXCEL_FILE_DOWNLOAD_COUNT
if len(searchData["resultList"]) < EXCEL_FILE_DOWNLOAD_COUNT :
break
workbook.close()
excelZip = zipfile.ZipFile(os.path.join(uploads ,zipFileName),'w')
for folder, subfolders, files in os.walk(uploads):
for file in files:
if file.endswith('.xlsx'):
excelZip.write(os.path.join(folder ,file), setUnicodeFormatToEucKr(file), compress_type=zipfile.ZIP_DEFLATED)
except Exception as err:
#에러 메시지 추가
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0003" , # 진행중
"errMsg" : str(err)
})
jobStatus = 1
print err
finally:
if excelZip != None:
excelZip.close()
if jobStatus == 0 :
putBatchMng({
"batchId" : str(batchId),
"reqId" : queryData['empId'],
"status" : "BAT-0002" , # 진행중
"errMsg" : ""
})
#성공 메시지 추가
print "성공"
@billingApi.route('/api/billing/', methods=['POST'])
def billing():
form_data = request.json
billingSeq = getParameter(form_data,"billingSeq")
requestData = {
"refTitle" : getParameter(form_data,"serviceName"), #승인 목록을 검색하기 위한 Keyword
"workType" : "AWRK-0006", #승인 요청 구분(Code)
"reqType" : "AREQ-0001",
"reqEmpId" : session['empId'], #요청자
"apprEmpId" : getParameter(form_data,"apprEmpId"), #승인자
"reqMemo" : "정산명세서 등록", #요청 사유
"keyword" : getParameter(form_data,"serviceName"), #승인 목록을 검색하기 위한 Keyword
"seq" : getParameter(form_data,"seq"), #승인요청 번호
"contentSeq" : getParameter(form_data,"contentSeq"), #승인 data 등록번호
"refId" : billingSeq, #중복승인을 막기 위한 고유 검색 Keyword
"contentData" : json.dumps({
"seq" : billingSeq,
"billingAmount" : getParameter(form_data,"billingAmount"),
"dcAmount" : getParameter(form_data,"dcAmount"),
"etcAmount" : paramEscape(getParameter(form_data,"etcAmount")),
"commTotal" : paramEscape(getParameter(form_data,"commTotal")),
"tax" : getParameter(form_data,"tax"),
"adjustmentAmount" : paramEscape(getParameter(form_data,"adjustmentAmount")),
"differenceAmount" : getParameter(form_data,"differenceAmount"),
"amount" : getParameter(form_data,"amount"),
"commision" : getParameter(form_data,"commision"),
"notMatchedAmount" : getParameter(form_data,"notMatchedAmount"),
"totTransAmount" : getParameter(form_data,"totTransAmount"),
"cancelAmount" : getParameter(form_data,"cancelAmount"),
"payAmount" : getParameter(form_data,"payAmount"),
"commRatio" : getParameter(form_data,"commRatio"),
"descMemo" : getParameter(form_data,"memo")
})
}
reponseResult = postApiData("/approval/request/approval", requestData, API_SERVER_BACKOFFICE)
return json.dumps(reponseResult)
@billingApi.route('/api/billing/', methods=['PUT'])
def putBilling():
form_data = request.json
billingSeq = getParameter(form_data,"billingSeq")
requestData = {
"refTitle" : getParameter(form_data,"serviceName"), #승인 목록을 검색하기 위한 Keyword
"workType" : "AWRK-0006", #승인 요청 구분(Code)
"reqType" : "AREQ-0001",
"reqEmpId" : session['empId'], #요청자
"apprEmpId" : getParameter(form_data,"apprEmpId"), #승인자
"reqMemo" : "정산명세서 등록", #요청 사유
"keyword" : getParameter(form_data,"serviceName"), #승인 목록을 검색하기 위한 Keyword
"seq" : getParameter(form_data,"seq"), #승인요청 번호
"contentSeq" : getParameter(form_data,"contentSeq"), #승인 data 등록번호
"refId" : billingSeq, #중복승인을 막기 위한 고유 검색 Keyword
"contentData" : json.dumps({
"seq" : billingSeq,
"billingAmount" : getParameter(form_data,"billingAmount"),
"dcAmount" : getParameter(form_data,"dcAmount"),
"etcAmount" : paramEscape(getParameter(form_data,"etcAmount")),
"commTotal" : paramEscape(getParameter(form_data,"commTotal")),
"tax" : getParameter(form_data,"tax"),
"adjustmentAmount" : paramEscape(getParameter(form_data,"adjustmentAmount")),
"differenceAmount" : getParameter(form_data,"differenceAmount"),
"amount" : getParameter(form_data,"amount"),
"commision" : getParameter(form_data,"commision"),
"notMatchedAmount" : getParameter(form_data,"notMatchedAmount"),
"totTransAmount" : getParameter(form_data,"totTransAmount"),
"cancelAmount" : getParameter(form_data,"cancelAmount"),
"payAmount" : getParameter(form_data,"payAmount"),
"descMemo" : getParameter(form_data,"memo")
})
}
reponseResult = putApiData("/approval/request/approval", requestData, {}, API_SERVER_BACKOFFICE)
return json.dumps(reponseResult)
|
funcs.py
|
__copyright__ = "Copyright 2013-2016, http://radical.rutgers.edu"
__license__ = "MIT"
import os
import stat
import time
import queue
import threading as mt
import subprocess
import radical.utils as ru
from ... import utils as rpu
from ... import states as rps
from ... import constants as rpc
from .. import LaunchMethod
from .base import AgentExecutingComponent
# ------------------------------------------------------------------------------
#
class FUNCS(AgentExecutingComponent) :
# --------------------------------------------------------------------------
#
def __init__(self, cfg, session):
AgentExecutingComponent.__init__ (self, cfg, session)
self._collector = None
self._terminate = mt.Event()
# --------------------------------------------------------------------------
#
def initialize(self):
self._pwd = os.getcwd()
self.gtod = "%s/gtod" % self._pwd
self.register_input(rps.AGENT_EXECUTING_PENDING,
rpc.AGENT_EXECUTING_QUEUE, self.work)
self.register_output(rps.AGENT_STAGING_OUTPUT_PENDING,
rpc.AGENT_STAGING_OUTPUT_QUEUE)
self.register_publisher (rpc.AGENT_UNSCHEDULE_PUBSUB)
self.register_subscriber(rpc.CONTROL_PUBSUB, self.command_cb)
req_cfg = ru.read_json('funcs_req_queue.cfg')
res_cfg = ru.read_json('funcs_res_queue.cfg')
self._req_queue = ru.zmq.Putter('funcs_req_queue', req_cfg['put'])
self._res_queue = ru.zmq.Getter('funcs_res_queue', res_cfg['get'])
self._cancel_lock = ru.RLock()
self._tasks_to_cancel = list()
self._tasks_to_watch = list()
self._watch_queue = queue.Queue ()
self._pid = self._cfg['pid']
# run watcher thread
self._collector = mt.Thread(target=self._collect)
self._collector.daemon = True
self._collector.start()
# we need to launch the executors on all nodes, and use the
# agent_launcher for that
self._launcher = LaunchMethod.create(
name = self._cfg.get('agent_launch_method'),
cfg = self._cfg,
session = self._session)
# now run the func launcher on all nodes
ve = os.environ.get('VIRTUAL_ENV', '')
exe = ru.which('radical-pilot-agent-funcs')
if not exe:
exe = '%s/rp_install/bin/radical-pilot-agent-funcs' % self._pwd
for idx, node in enumerate(self._cfg['rm_info']['node_list']):
uid = 'func_exec.%04d' % idx
pwd = '%s/%s' % (self._pwd, uid)
funcs = {'uid' : uid,
'description': {'executable' : exe,
'arguments' : [pwd, ve],
'cpu_processes': 1,
'environment' : [],
},
'slots' : {'nodes' : [{'name' : node[0],
'uid' : node[1],
'cores' : [[0]],
'gpus' : []
}]
},
'cfg' : {'req_get' : req_cfg['get'],
'res_put' : res_cfg['put']
}
}
self._spawn(self._launcher, funcs)
# --------------------------------------------------------------------------
#
def command_cb(self, topic, msg):
self._log.info('command_cb [%s]: %s', topic, msg)
cmd = msg['cmd']
arg = msg['arg']
if cmd == 'cancel_tasks':
self._log.info("cancel_tasks command (%s)" % arg)
with self._cancel_lock:
self._tasks_to_cancel.extend(arg['uids'])
return True
# --------------------------------------------------------------------------
#
def _spawn(self, launcher, funcs):
# NOTE: see documentation of funcs['sandbox'] semantics in the Task
# class definition.
sandbox = '%s/%s' % (self._pwd, funcs['uid'])
fname = '%s/%s.sh' % (sandbox, funcs['uid'])
cfgname = '%s/%s.cfg' % (sandbox, funcs['uid'])
descr = funcs['description']
rpu.rec_makedir(sandbox)
ru.write_json(funcs.get('cfg'), cfgname)
launch_cmd, hop_cmd = launcher.construct_command(funcs, fname)
if hop_cmd : cmdline = hop_cmd
else : cmdline = fname
with open(fname, "w") as fout:
fout.write('#!/bin/sh\n\n')
# Create string for environment variable setting
fout.write('export RP_SESSION_ID="%s"\n' % self._cfg['sid'])
fout.write('export RP_PILOT_ID="%s"\n' % self._cfg['pid'])
fout.write('export RP_AGENT_ID="%s"\n' % self._cfg['aid'])
fout.write('export RP_SPAWNER_ID="%s"\n' % self.uid)
fout.write('export RP_FUNCS_ID="%s"\n' % funcs['uid'])
fout.write('export RP_GTOD="%s"\n' % self.gtod)
fout.write('export RP_TMP="%s"\n' % self._task_tmp)
# also add any env vars requested in the task description
if descr.get('environment', []):
for key,val in descr['environment'].items():
fout.write('export "%s=%s"\n' % (key, val))
fout.write('\n%s\n\n' % launch_cmd)
fout.write('RETVAL=$?\n')
fout.write("exit $RETVAL\n")
# done writing to launch script, get it ready for execution.
st = os.stat(fname)
os.chmod(fname, st.st_mode | stat.S_IEXEC)
fout = open('%s/%s.out' % (sandbox, funcs['uid']), "w")
ferr = open('%s/%s.err' % (sandbox, funcs['uid']), "w")
self._prof.prof('exec_start', uid=funcs['uid'])
# we really want to use preexec_fn:
# pylint: disable=W1509
funcs['proc'] = subprocess.Popen(args = cmdline,
executable = None,
stdin = None,
stdout = fout,
stderr = ferr,
preexec_fn = os.setsid,
close_fds = True,
shell = True,
cwd = sandbox)
self._prof.prof('exec_ok', uid=funcs['uid'])
# --------------------------------------------------------------------------
#
def work(self, tasks):
if not isinstance(tasks, list):
tasks = [tasks]
self.advance(tasks, rps.AGENT_EXECUTING, publish=True, push=False)
for task in tasks:
assert(task['description']['cpu_process_type'] == 'FUNC')
self._req_queue.put(task)
# --------------------------------------------------------------------------
#
def _collect(self):
while not self._terminate.is_set():
# pull tasks from "funcs_out_queue"
tasks = self._res_queue.get_nowait(1000)
if tasks:
for task in tasks:
task['target_state'] = task['state']
task['pilot'] = self._pid
# self._log.debug('got %s [%s] [%s] [%s]',
# task['uid'], task['state'],
# task['stdout'], task['stderr'])
self.advance(tasks, rps.AGENT_STAGING_OUTPUT_PENDING,
publish=True, push=True)
else:
time.sleep(0.1)
# ------------------------------------------------------------------------------
|
looking-glass-fusion.py
|
#Author-Brian Peiris
#Description-A live viewer for the Looking Glass 3D display
import adsk.core, adsk.fusion, adsk.cam, traceback, datetime
import os
from http import server
from threading import Thread
server_port = 3000
addin_path = os.path.dirname(os.path.abspath(__file__))
log_path = os.path.join(addin_path, "log.txt")
log_file = open(log_path, "w")
def log(msg):
timestamp = datetime.datetime.now().isoformat()
print("{} {}".format(timestamp, msg))
log_file.write("{} {} \n".format(timestamp, msg))
log_file.flush()
files_dir = os.path.join(addin_path, "files")
export_file = os.path.join(files_dir, "export.stl")
log("Export path: {}".format(export_file))
if os.path.exists(export_file):
os.remove(export_file)
class DirHandler(server.SimpleHTTPRequestHandler):
def __init__(self, request, client_address, srv):
try:
super().__init__(request, client_address, srv, directory=files_dir)
except:
log(traceback.format_exc())
dir_server = None
try:
dir_server = server.HTTPServer(("", server_port), DirHandler)
except:
log(traceback.format_exc())
def serve_dir():
log("Starting server at http://localhost:{}".format(server_port))
dir_server.serve_forever()
log("Stopping server")
thread = None
try:
thread = Thread(target=serve_dir)
thread.start()
except:
log(traceback.format_exc())
def exportCurrentDesign(app):
try:
log("Exporting design")
design = app.activeDocument.design
exportManger = design.exportManager
exportOptions = exportManger.createSTLExportOptions(design.rootComponent, export_file)
exportManger.execute(exportOptions)
log("Exported design")
except:
log(traceback.format_exc())
class CommandHandler(adsk.core.ApplicationCommandEventHandler):
def __init__(self, app):
super().__init__()
self.app = app
def notify(self, args):
eventArgs = adsk.core.ApplicationCommandEventArgs.cast(args)
exportCurrentDesign(self.app)
handlers = []
def run(context):
try:
log("Starting addin")
app = adsk.core.Application.get()
ui = app.userInterface
commandTerminatedHandler = CommandHandler(app)
ui.commandTerminated.add(commandTerminatedHandler)
handlers.append(commandTerminatedHandler)
ui.messageBox("looking-glass-fusion running at\nhttp://localhost:{}".format(server_port))
except:
log(traceback.format_exc())
def stop(context):
log("Stopping addin")
if dir_server: dir_server.shutdown()
if thread: thread.join()
|
wait.py
|
# based on example code from https://pymotw.com/2/multiprocessing/basics.html
import multiprocessing
def spawn(num):
print('test!', num)
if __name__ == '__main__':
for i in range(5):
p = multiprocessing.Process(target=spawn, args=(i,))
p.start()
p.join()
|
conftest.py
|
import asyncio
import functools
import json
import os
import threading
import time
import typing
import sys
import pytest
import trio
import trustme
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.serialization import (
BestAvailableEncryption,
Encoding,
PrivateFormat,
load_pem_private_key,
)
import hypercorn.config
import hypercorn.trio
from uvicorn.config import Config
from uvicorn.main import Server
from httpx import URL, AsyncioBackend
from httpx.concurrency.trio import TrioBackend
ENVIRONMENT_VARIABLES = {
"SSL_CERT_FILE",
"SSL_CERT_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"ALL_PROXY",
"NO_PROXY",
"SSLKEYLOGFILE",
}
@pytest.fixture(scope="function", autouse=True)
def clean_environ() -> typing.Dict[str, typing.Any]:
"""Keeps os.environ clean for every test without having to mock os.environ"""
original_environ = os.environ.copy()
os.environ.clear()
os.environ.update(
{
k: v
for k, v in original_environ.items()
if k not in ENVIRONMENT_VARIABLES and k.lower() not in ENVIRONMENT_VARIABLES
}
)
yield
os.environ.clear()
os.environ.update(original_environ)
@pytest.fixture(
params=[
pytest.param(AsyncioBackend, marks=pytest.mark.asyncio),
pytest.param(TrioBackend, marks=pytest.mark.trio),
]
)
def backend(request):
backend_cls = request.param
return backend_cls()
async def app(scope, receive, send):
assert scope["type"] == "http"
if scope["path"].startswith("/slow_response"):
await slow_response(scope, receive, send)
elif scope["path"].startswith("/status"):
await status_code(scope, receive, send)
elif scope["path"].startswith("/echo_body"):
await echo_body(scope, receive, send)
elif scope["path"].startswith("/echo_headers"):
await echo_headers(scope, receive, send)
else:
await hello_world(scope, receive, send)
async def hello_world(scope, receive, send):
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def slow_response(scope, receive, send):
delay_ms_str: str = scope["path"].replace("/slow_response/", "")
try:
delay_ms = float(delay_ms_str)
except ValueError:
delay_ms = 100
await asyncio.sleep(delay_ms / 1000.0)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def status_code(scope, receive, send):
status_code = int(scope["path"].replace("/status/", ""))
await send(
{
"type": "http.response.start",
"status": status_code,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
async def echo_body(scope, receive, send):
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": body})
async def echo_headers(scope, receive, send):
body = {}
for name, value in scope.get("headers", []):
body[name.capitalize().decode()] = value.decode()
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"application/json"]],
}
)
await send({"type": "http.response.body", "body": json.dumps(body).encode()})
SERVER_SCOPE = "session"
@pytest.fixture(scope=SERVER_SCOPE)
def cert_authority():
return trustme.CA()
@pytest.fixture(scope=SERVER_SCOPE)
def ca_cert_pem_file(cert_authority):
with cert_authority.cert_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def localhost_cert(cert_authority):
return cert_authority.issue_cert("localhost")
@pytest.fixture(scope=SERVER_SCOPE)
def cert_pem_file(localhost_cert):
with localhost_cert.cert_chain_pems[0].tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_private_key_file(localhost_cert):
with localhost_cert.private_key_pem.tempfile() as tmp:
yield tmp
@pytest.fixture(scope=SERVER_SCOPE)
def cert_encrypted_private_key_file(localhost_cert):
# Deserialize the private key and then reserialize with a password
private_key = load_pem_private_key(
localhost_cert.private_key_pem.bytes(), password=None, backend=default_backend()
)
encrypted_private_key_pem = trustme.Blob(
private_key.private_bytes(
Encoding.PEM,
PrivateFormat.TraditionalOpenSSL,
BestAvailableEncryption(password=b"password"),
)
)
with encrypted_private_key_pem.tempfile() as tmp:
yield tmp
class TestServer(Server):
@property
def url(self) -> URL:
protocol = "https" if self.config.is_ssl else "http"
return URL(f"{protocol}://{self.config.host}:{self.config.port}/")
def install_signal_handlers(self) -> None:
# Disable the default installation of handlers for signals such as SIGTERM,
# because it can only be done in the main thread.
pass
async def serve(self, sockets=None):
self.restart_requested = asyncio.Event()
loop = asyncio.get_event_loop()
tasks = {
loop.create_task(super().serve(sockets=sockets)),
loop.create_task(self.watch_restarts()),
}
await asyncio.wait(tasks)
async def restart(self) -> None:
# Ensure we are in an asyncio environment.
assert asyncio.get_event_loop() is not None
# This may be called from a different thread than the one the server is
# running on. For this reason, we use an event to coordinate with the server
# instead of calling shutdown()/startup() directly.
self.restart_requested.set()
self.started = False
while not self.started:
await asyncio.sleep(0.5)
async def watch_restarts(self):
while True:
if self.should_exit:
return
try:
await asyncio.wait_for(self.restart_requested.wait(), timeout=0.1)
except asyncio.TimeoutError:
continue
self.restart_requested.clear()
await self.shutdown()
await self.startup()
@pytest.fixture
def restart(backend):
"""Restart the running server from an async test function.
This fixture deals with possible differences between the environment of the
test function and that of the server.
"""
async def restart(server):
await backend.run_in_threadpool(AsyncioBackend().run, server.restart)
return restart
def serve_in_thread(server: Server):
thread = threading.Thread(target=server.run)
thread.start()
try:
while not server.started:
time.sleep(1e-3)
yield server
finally:
server.should_exit = True
thread.join()
@pytest.fixture(scope=SERVER_SCOPE)
def server():
config = Config(app=app, lifespan="off", loop="asyncio")
server = TestServer(config=config)
yield from serve_in_thread(server)
@pytest.fixture(scope=SERVER_SCOPE)
def https_server(cert_pem_file, cert_private_key_file):
config = Config(
app=app,
lifespan="off",
ssl_certfile=cert_pem_file,
ssl_keyfile=cert_private_key_file,
host="localhost",
port=8001,
loop="asyncio",
)
server = TestServer(config=config)
yield from serve_in_thread(server)
async def h2_app(scope, receive, send):
assert scope["type"] == "http"
assert scope["http_version"] == "2"
body = b""
more_body = True
while more_body:
message = await receive()
body += message.get("body", b"")
more_body = message.get("more_body", False)
data = {"method": scope["method"], "path": scope["path"], "body": body.decode()}
content = json.dumps(data).encode()
headers = [(b"content-length", b"%d" % len(content))]
await send({"type": "http.response.start", "status": 200, "headers": headers})
await send({"type": "http.response.body", "body": content})
class H2Server:
"""
An HTTP/2 ASGI server class.
This is a wrapper around Hypercorn that matches the parts of the
uvicorn `Server` interface we use in our tests.
"""
def __init__(
self, app: typing.Callable, host: str, port: int, certfile: str, keyfile: str
):
self.app = app
self.config = hypercorn.config.Config()
self.config.bind = [f"{host}:{port}"]
self.config.certfile = certfile
self.config.keyfile = keyfile
self.config.worker_class = "trio"
self.started = False
self.should_exit = False
@property
def url(self) -> URL:
authority = self.config.bind[0]
return URL(f"https://{authority}")
def run(self) -> None:
async def shutdown_trigger() -> None:
while not self.should_exit:
await trio.sleep(0.1)
bound_serve = functools.partial(
hypercorn.trio.serve, shutdown_trigger=shutdown_trigger
)
async def main() -> None:
async with trio.open_nursery() as nursery:
await nursery.start(bound_serve, self.app, self.config)
self.started = True
trio.run(main)
@pytest.fixture(scope=SERVER_SCOPE)
def h2_server(
cert_pem_file: str, cert_private_key_file: str
) -> typing.Iterator[H2Server]:
if sys.version_info < (3, 7):
pytest.skip(reason="Hypercorn requires Python 3.7 or higher")
server = H2Server(
app=h2_app,
host="127.0.0.1",
port=8002,
certfile=cert_pem_file,
keyfile=cert_private_key_file,
)
yield from serve_in_thread(server)
|
python_ls.py
|
# Copyright 2017 Palantir Technologies, Inc.
import logging
import socketserver
import threading
from multiprocessing import dummy as multiprocessing
from pyls_jsonrpc.dispatchers import MethodDispatcher
from pyls_jsonrpc.endpoint import Endpoint
from pyls_jsonrpc.streams import JsonRpcStreamReader, JsonRpcStreamWriter
from . import lsp, _utils, uris
from .config import config
from .workspace import Workspace
log = logging.getLogger(__name__)
LINT_DEBOUNCE_S = 0.5 # 500 ms
PARENT_PROCESS_WATCH_INTERVAL = 10 # 10 s
MAX_WORKERS = 64
PLUGGY_RACE_POOL_SIZE = 5
PYTHON_FILE_EXTENSIONS = ('.py', '.pyi')
CONFIG_FILEs = ('pycodestyle.cfg', 'setup.cfg', 'tox.ini', '.flake8')
class _StreamHandlerWrapper(socketserver.StreamRequestHandler, object):
"""A wrapper class that is used to construct a custom handler class."""
delegate = None
def setup(self):
super(_StreamHandlerWrapper, self).setup()
# pylint: disable=no-member
self.delegate = self.DELEGATE_CLASS(self.rfile, self.wfile)
def handle(self):
self.delegate.start()
def start_tcp_lang_server(bind_addr, port, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
# Construct a custom wrapper class around the user's handler_class
wrapper_class = type(
handler_class.__name__ + 'Handler',
(_StreamHandlerWrapper,),
{'DELEGATE_CLASS': handler_class}
)
server = socketserver.TCPServer((bind_addr, port), wrapper_class)
server.allow_reuse_address = True
try:
log.info('Serving %s on (%s, %s)', handler_class.__name__, bind_addr, port)
server.serve_forever()
finally:
log.info('Shutting down')
server.server_close()
def start_io_lang_server(rfile, wfile, check_parent_process, handler_class):
if not issubclass(handler_class, PythonLanguageServer):
raise ValueError('Handler class must be an instance of PythonLanguageServer')
log.info('Starting %s IO language server', handler_class.__name__)
server = handler_class(rfile, wfile, check_parent_process)
server.start()
class PythonLanguageServer(MethodDispatcher):
""" Implementation of the Microsoft VSCode Language Server Protocol
https://github.com/Microsoft/language-server-protocol/blob/master/versions/protocol-1-x.md
"""
# pylint: disable=too-many-public-methods,redefined-builtin
def __init__(self, rx, tx, check_parent_process=False):
self.workspace = None
self.config = None
self._pool = None
self._jsonrpc_stream_reader = JsonRpcStreamReader(rx)
self._jsonrpc_stream_writer = JsonRpcStreamWriter(tx)
self._check_parent_process = check_parent_process
self._endpoint = Endpoint(self, self._jsonrpc_stream_writer.write, max_workers=MAX_WORKERS)
self._dispatchers = []
self._shutdown = False
def start(self):
"""Entry point for the server."""
self._jsonrpc_stream_reader.listen(self._endpoint.consume)
def __getitem__(self, item):
"""Override getitem to fallback through multiple dispatchers."""
if self._shutdown and item != 'exit':
# exit is the only allowed method during shutdown
log.debug("Ignoring non-exit method during shutdown: %s", item)
raise KeyError
try:
return super(PythonLanguageServer, self).__getitem__(item)
except KeyError:
# Fallback through extra dispatchers
for dispatcher in self._dispatchers:
try:
return dispatcher[item]
except KeyError:
continue
raise KeyError()
def m_shutdown(self, **_kwargs):
self._shutdown = True
return None
def m_exit(self, **_kwargs):
self._endpoint.shutdown()
self._jsonrpc_stream_reader.close()
self._jsonrpc_stream_writer.close()
def _hook_caller(self, hook_name):
return self.config.plugin_manager.subset_hook_caller(hook_name, self.config.disabled_plugins)
def _hook(self, hook_name, doc_uri=None, **kwargs):
"""Calls hook_name and returns a list of results from all registered handlers"""
doc = self.workspace.get_document(doc_uri) if doc_uri else None
hook_handlers = self._hook_caller(hook_name)
return hook_handlers(config=self.config, workspace=self.workspace, document=doc, **kwargs)
def capabilities(self):
server_capabilities = {
'codeActionProvider': True,
'codeLensProvider': {
'resolveProvider': False, # We may need to make this configurable
},
'completionProvider': {
'resolveProvider': False, # We know everything ahead of time
'triggerCharacters': ['.']
},
'documentFormattingProvider': True,
'documentHighlightProvider': True,
'documentRangeFormattingProvider': True,
'documentSymbolProvider': True,
'definitionProvider': True,
'executeCommandProvider': {
'commands': flatten(self._hook('pyls_commands'))
},
'hoverProvider': True,
'referencesProvider': True,
'renameProvider': True,
'signatureHelpProvider': {
'triggerCharacters': ['(', ',']
},
'textDocumentSync': lsp.TextDocumentSyncKind.INCREMENTAL,
'experimental': merge(self._hook('pyls_experimental_capabilities'))
}
log.info('Server capabilities: %s', server_capabilities)
return server_capabilities
def m_initialize(self, processId=None, rootUri=None, rootPath=None, initializationOptions=None, **_kwargs):
log.debug('Language server initialized with %s %s %s %s', processId, rootUri, rootPath, initializationOptions)
if rootUri is None:
rootUri = uris.from_fs_path(rootPath) if rootPath is not None else ''
self.workspace = Workspace(rootUri, self._endpoint)
self.config = config.Config(rootUri, initializationOptions or {},
processId, _kwargs.get('capabilities', {}))
self._dispatchers = self._hook('pyls_dispatchers')
self._pool = multiprocessing.Pool(PLUGGY_RACE_POOL_SIZE)
self._hook('pyls_initialize')
if self._check_parent_process and processId is not None:
def watch_parent_process(pid):
# exist when the given pid is not alive
if not _utils.is_process_alive(pid):
log.info("parent process %s is not alive", pid)
self.m_exit()
log.debug("parent process %s is still alive", pid)
threading.Timer(PARENT_PROCESS_WATCH_INTERVAL, watch_parent_process, args=[pid]).start()
watching_thread = threading.Thread(target=watch_parent_process, args=(processId,))
watching_thread.daemon = True
watching_thread.start()
# Get our capabilities
return {'capabilities': self.capabilities()}
def m_initialized(self, **_kwargs):
pass
def code_actions(self, doc_uri, range, context):
return flatten(self._hook('pyls_code_actions', doc_uri, range=range, context=context))
def code_lens(self, doc_uri):
return flatten(self._hook('pyls_code_lens', doc_uri))
def completions(self, doc_uri, position):
rope_enabled = self.config.settings()['plugins']['rope_completion']['enabled']
if rope_enabled:
completions = _utils.race_hooks(
self._hook_caller('pyls_completions'),
self._pool,
document=self.workspace.get_document(doc_uri) if doc_uri else None,
position=position,
config=self.config,
workspace=self.workspace
)
else:
completions = self._hook('pyls_completions', doc_uri, position=position)
completions = flatten(completions) if completions else None
return {
'isIncomplete': False,
'items': completions
}
def definitions(self, doc_uri, position):
return flatten(self._hook('pyls_definitions', doc_uri, position=position))
def document_symbols(self, doc_uri):
return flatten(self._hook('pyls_document_symbols', doc_uri))
def execute_command(self, command, arguments):
return self._hook('pyls_execute_command', command=command, arguments=arguments)
def format_document(self, doc_uri):
return self._hook('pyls_format_document', doc_uri)
def format_range(self, doc_uri, range):
return self._hook('pyls_format_range', doc_uri, range=range)
def highlight(self, doc_uri, position):
return flatten(self._hook('pyls_document_highlight', doc_uri, position=position)) or None
def hover(self, doc_uri, position):
return self._hook('pyls_hover', doc_uri, position=position) or {'contents': ''}
@_utils.debounce(LINT_DEBOUNCE_S, keyed_by='doc_uri')
def lint(self, doc_uri, is_saved):
# Since we're debounced, the document may no longer be open
if doc_uri in self.workspace.documents:
self.workspace.publish_diagnostics(
doc_uri,
flatten(self._hook('pyls_lint', doc_uri, is_saved=is_saved))
)
def references(self, doc_uri, position, exclude_declaration):
return flatten(self._hook(
'pyls_references', doc_uri, position=position,
exclude_declaration=exclude_declaration
))
def rename(self, doc_uri, position, new_name):
return self._hook('pyls_rename', doc_uri, position=position, new_name=new_name)
def signature_help(self, doc_uri, position):
return self._hook('pyls_signature_help', doc_uri, position=position)
def m_text_document__did_close(self, textDocument=None, **_kwargs):
self.workspace.rm_document(textDocument['uri'])
def m_text_document__did_open(self, textDocument=None, **_kwargs):
self.workspace.put_document(textDocument['uri'], textDocument['text'], version=textDocument.get('version'))
self._hook('pyls_document_did_open', textDocument['uri'])
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_change(self, contentChanges=None, textDocument=None, **_kwargs):
for change in contentChanges:
self.workspace.update_document(
textDocument['uri'],
change,
version=textDocument.get('version')
)
self.lint(textDocument['uri'], is_saved=False)
def m_text_document__did_save(self, textDocument=None, **_kwargs):
self.lint(textDocument['uri'], is_saved=True)
def m_text_document__code_action(self, textDocument=None, range=None, context=None, **_kwargs):
return self.code_actions(textDocument['uri'], range, context)
def m_text_document__code_lens(self, textDocument=None, **_kwargs):
return self.code_lens(textDocument['uri'])
def m_text_document__completion(self, textDocument=None, position=None, **_kwargs):
return self.completions(textDocument['uri'], position)
def m_text_document__definition(self, textDocument=None, position=None, **_kwargs):
return self.definitions(textDocument['uri'], position)
def m_text_document__document_highlight(self, textDocument=None, position=None, **_kwargs):
return self.highlight(textDocument['uri'], position)
def m_text_document__hover(self, textDocument=None, position=None, **_kwargs):
return self.hover(textDocument['uri'], position)
def m_text_document__document_symbol(self, textDocument=None, **_kwargs):
return self.document_symbols(textDocument['uri'])
def m_text_document__formatting(self, textDocument=None, _options=None, **_kwargs):
# For now we're ignoring formatting options.
return self.format_document(textDocument['uri'])
def m_text_document__rename(self, textDocument=None, position=None, newName=None, **_kwargs):
return self.rename(textDocument['uri'], position, newName)
def m_text_document__range_formatting(self, textDocument=None, range=None, _options=None, **_kwargs):
# Again, we'll ignore formatting options for now.
return self.format_range(textDocument['uri'], range)
def m_text_document__references(self, textDocument=None, position=None, context=None, **_kwargs):
exclude_declaration = not context['includeDeclaration']
return self.references(textDocument['uri'], position, exclude_declaration)
def m_text_document__signature_help(self, textDocument=None, position=None, **_kwargs):
return self.signature_help(textDocument['uri'], position)
def m_workspace__did_change_configuration(self, settings=None):
self.config.update((settings or {}).get('pyls', {}))
for doc_uri in self.workspace.documents:
self.lint(doc_uri, is_saved=False)
def m_workspace__did_change_watched_files(self, changes=None, **_kwargs):
changed_py_files = set()
config_changed = False
for d in (changes or []):
if d['uri'].endswith(PYTHON_FILE_EXTENSIONS):
changed_py_files.add(d['uri'])
elif d['uri'].endswith(CONFIG_FILEs):
config_changed = True
if config_changed:
self.config.settings.cache_clear()
elif not changed_py_files:
# Only externally changed python files and lint configs may result in changed diagnostics.
return
for doc_uri in self.workspace.documents:
# Changes in doc_uri are already handled by m_text_document__did_save
if doc_uri not in changed_py_files:
self.lint(doc_uri, is_saved=False)
def m_workspace__execute_command(self, command=None, arguments=None):
return self.execute_command(command, arguments)
def flatten(list_of_lists):
return [item for lst in list_of_lists for item in lst]
def merge(list_of_dicts):
return {k: v for dictionary in list_of_dicts for k, v in dictionary.items()}
|
helpers.py
|
"""
Helper functions file for OCS QE
"""
import base64
import datetime
import hashlib
import json
import logging
import os
import re
import statistics
import tempfile
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from subprocess import PIPE, TimeoutExpired, run
from uuid import uuid4
import yaml
from ocs_ci.framework import config
from ocs_ci.ocs import constants, defaults, node, ocp
from ocs_ci.ocs.exceptions import (
CommandFailed, ResourceWrongStatusException,
TimeoutExpiredError, UnavailableBuildException,
UnexpectedBehaviour
)
from ocs_ci.ocs.ocp import OCP
from ocs_ci.ocs.resources import pod, pvc
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.utility import templating
from ocs_ci.utility.retry import retry
from ocs_ci.utility.utils import (
TimeoutSampler,
ocsci_log_path,
run_cmd,
update_container_with_mirrored_image,
)
logger = logging.getLogger(__name__)
def create_unique_resource_name(resource_description, resource_type):
"""
Creates a unique object name by using the object_description,
object_type and a random uuid(in hex) as suffix
Args:
resource_description (str): The user provided object description
resource_type (str): The type of object for which the unique name
will be created. For example: project, pvc, etc
Returns:
str: A unique name
"""
return f"{resource_type}-{resource_description[:23]}-{uuid4().hex}"
def create_resource(do_reload=True, **kwargs):
"""
Create a resource
Args:
do_reload (bool): True for reloading the resource following its creation,
False otherwise
kwargs (dict): Dictionary of the OCS resource
Returns:
OCS: An OCS instance
Raises:
AssertionError: In case of any failure
"""
ocs_obj = OCS(**kwargs)
resource_name = kwargs.get('metadata').get('name')
created_resource = ocs_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create resource {resource_name}"
)
return ocs_obj
def wait_for_resource_state(resource, state, timeout=60):
"""
Wait for a resource to get to a given status
Args:
resource (OCS obj): The resource object
state (str): The status to wait for
timeout (int): Time in seconds to wait
Raises:
ResourceWrongStatusException: In case the resource hasn't
reached the desired state
"""
if (
resource.name == constants.DEFAULT_STORAGECLASS_CEPHFS
or resource.name == constants.DEFAULT_STORAGECLASS_RBD
):
logger.info("Attempt to default default Secret or StorageClass")
return
try:
resource.ocp.wait_for_resource(
condition=state, resource_name=resource.name, timeout=timeout
)
except TimeoutExpiredError:
logger.error(f"{resource.kind} {resource.name} failed to reach {state}")
resource.reload()
raise ResourceWrongStatusException(resource.name, resource.describe())
logger.info(f"{resource.kind} {resource.name} reached state {state}")
def create_pod(
interface_type=None, pvc_name=None,
do_reload=True, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
node_name=None, pod_dict_path=None, sa_name=None, dc_deployment=False,
raw_block_pv=False, raw_block_device=constants.RAW_BLOCK_DEVICE, replica_count=1,
pod_name=None, node_selector=None, command=None, command_args=None,
deploy_pod_status=constants.STATUS_COMPLETED
):
"""
Create a pod
Args:
interface_type (str): The interface type (CephFS, RBD, etc.)
pvc_name (str): The PVC that should be attached to the newly created pod
do_reload (bool): True for reloading the object after creation, False otherwise
namespace (str): The namespace for the new resource creation
node_name (str): The name of specific node to schedule the pod
pod_dict_path (str): YAML path for the pod
sa_name (str): Serviceaccount name
dc_deployment (bool): True if creating pod as deploymentconfig
raw_block_pv (bool): True for creating raw block pv based pod, False otherwise
raw_block_device (str): raw block device for the pod
replica_count (int): Replica count for deployment config
pod_name (str): Name of the pod to create
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
command (list): The command to be executed on the pod
command_args (list): The arguments to be sent to the command running
on the pod
deploy_pod_status (str): Expected status of deploy pod. Applicable
only if dc_deployment is True
Returns:
Pod: A Pod instance
Raises:
AssertionError: In case of any failure
"""
if interface_type == constants.CEPHBLOCKPOOL:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_RBD_POD_YAML
interface = constants.RBD_INTERFACE
else:
pod_dict = pod_dict_path if pod_dict_path else constants.CSI_CEPHFS_POD_YAML
interface = constants.CEPHFS_INTERFACE
if dc_deployment:
pod_dict = pod_dict_path if pod_dict_path else constants.FEDORA_DC_YAML
pod_data = templating.load_yaml(pod_dict)
if not pod_name:
pod_name = create_unique_resource_name(
f'test-{interface}', 'pod'
)
pod_data['metadata']['name'] = pod_name
pod_data['metadata']['namespace'] = namespace
if dc_deployment:
pod_data['metadata']['labels']['app'] = pod_name
pod_data['spec']['template']['metadata']['labels']['name'] = pod_name
pod_data['spec']['replicas'] = replica_count
if pvc_name:
if dc_deployment:
pod_data['spec']['template']['spec']['volumes'][0][
'persistentVolumeClaim'
]['claimName'] = pvc_name
else:
pod_data['spec']['volumes'][0]['persistentVolumeClaim']['claimName'] = pvc_name
if interface_type == constants.CEPHBLOCKPOOL and raw_block_pv:
if pod_dict_path in [constants.FEDORA_DC_YAML, constants.FIO_DC_YAML]:
temp_dict = [
{'devicePath': raw_block_device, 'name': pod_data.get('spec').get(
'template').get('spec').get('volumes')[0].get('name')}
]
if pod_dict_path == constants.FEDORA_DC_YAML:
del pod_data['spec']['template']['spec']['containers'][0]['volumeMounts']
security_context = {'capabilities': {'add': ["SYS_ADMIN"]}}
pod_data['spec']['template']['spec']['containers'][0]['securityContext'] = security_context
pod_data['spec']['template']['spec']['containers'][0]['volumeDevices'] = temp_dict
elif pod_dict_path == constants.NGINX_POD_YAML:
temp_dict = [
{'devicePath': raw_block_device, 'name': pod_data.get('spec').get(
'containers')[0].get('volumeMounts')[0].get('name')}
]
del pod_data['spec']['containers'][0]['volumeMounts']
pod_data['spec']['containers'][0]['volumeDevices'] = temp_dict
else:
pod_data['spec']['containers'][0]['volumeDevices'][0]['devicePath'] = raw_block_device
pod_data['spec']['containers'][0]['volumeDevices'][0]['name'] = pod_data.get('spec').get('volumes')[
0].get('name')
if command:
if dc_deployment:
pod_data['spec']['template']['spec']['containers'][0]['command'] = command
else:
pod_data['spec']['containers'][0]['command'] = command
if command_args:
if dc_deployment:
pod_data['spec']['template']['spec']['containers'][0]['args'] = command_args
else:
pod_data['spec']['containers'][0]['args'] = command_args
if node_name:
if dc_deployment:
pod_data['spec']['template']['spec']['nodeName'] = node_name
else:
pod_data['spec']['nodeName'] = node_name
if node_selector:
if dc_deployment:
pod_data['spec']['template']['spec']['nodeSelector'] = node_selector
else:
pod_data['spec']['nodeSelector'] = node_selector
if sa_name and dc_deployment:
pod_data['spec']['template']['spec']['serviceAccountName'] = sa_name
# overwrite used image (required for disconnected installation)
update_container_with_mirrored_image(pod_data)
# configure http[s]_proxy env variable, if required
try:
if 'http_proxy' in config.ENV_DATA:
if 'containers' in pod_data['spec']:
container = pod_data['spec']['containers'][0]
else:
container = pod_data['spec']['template']['spec']['containers'][0]
if 'env' not in container:
container['env'] = []
container['env'].append({
'name': 'http_proxy',
'value': config.ENV_DATA['http_proxy'],
})
container['env'].append({
'name': 'https_proxy',
'value': config.ENV_DATA.get(
'https_proxy', config.ENV_DATA['http_proxy']
),
})
except KeyError as err:
logging.warning(
"Http(s)_proxy variable wasn't configured, "
"'%s' key not found.", err
)
if dc_deployment:
ocs_obj = create_resource(**pod_data)
logger.info(ocs_obj.name)
assert (ocp.OCP(kind='pod', namespace=namespace)).wait_for_resource(
condition=deploy_pod_status,
resource_name=pod_name + '-1-deploy',
resource_count=0, timeout=360, sleep=3
)
dpod_list = pod.get_all_pods(namespace=namespace)
for dpod in dpod_list:
if '-1-deploy' not in dpod.name:
if pod_name in dpod.name:
return dpod
else:
pod_obj = pod.Pod(**pod_data)
pod_name = pod_data.get('metadata').get('name')
logger.info(f'Creating new Pod {pod_name} for test')
created_resource = pod_obj.create(do_reload=do_reload)
assert created_resource, (
f"Failed to create Pod {pod_name}"
)
return pod_obj
def create_project(project_name=None):
"""
Create a project
Args:
project_name (str): The name for the new project
Returns:
OCP: Project object
"""
namespace = project_name or create_unique_resource_name('test', 'namespace')
project_obj = ocp.OCP(kind='Project', namespace=namespace)
assert project_obj.new_project(namespace), f"Failed to create namespace {namespace}"
return project_obj
def create_multilpe_projects(number_of_project):
"""
Create one or more projects
Args:
number_of_project (int): Number of projects to be created
Returns:
list: List of project objects
"""
project_objs = [create_project() for _ in range(number_of_project)]
return project_objs
def create_secret(interface_type):
"""
Create a secret
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: An OCS instance for the secret
"""
secret_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
secret_data = templating.load_yaml(
constants.CSI_RBD_SECRET_YAML
)
secret_data['stringData']['userID'] = constants.ADMIN_USER
secret_data['stringData']['userKey'] = get_admin_key()
interface = constants.RBD_INTERFACE
elif interface_type == constants.CEPHFILESYSTEM:
secret_data = templating.load_yaml(
constants.CSI_CEPHFS_SECRET_YAML
)
del secret_data['stringData']['userID']
del secret_data['stringData']['userKey']
secret_data['stringData']['adminID'] = constants.ADMIN_USER
secret_data['stringData']['adminKey'] = get_admin_key()
interface = constants.CEPHFS_INTERFACE
secret_data['metadata']['name'] = create_unique_resource_name(
f'test-{interface}', 'secret'
)
secret_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
return create_resource(**secret_data)
def default_ceph_block_pool():
"""
Returns default CephBlockPool
Returns:
default CephBlockPool
"""
return constants.DEFAULT_BLOCKPOOL
def create_ceph_block_pool(pool_name=None, failure_domain=None, verify=True):
"""
Create a Ceph block pool
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
failure_domain (str): Failure domain name
verify (bool): True to verify the pool exists after creation,
False otherwise
Returns:
OCS: An OCS instance for the Ceph block pool
"""
cbp_data = templating.load_yaml(constants.CEPHBLOCKPOOL_YAML)
cbp_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cbp'
)
)
cbp_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cbp_data['spec']['failureDomain'] = failure_domain or get_failure_domin()
cbp_obj = create_resource(**cbp_data)
cbp_obj.reload()
if verify:
assert verify_block_pool_exists(cbp_obj.name), (
f"Block pool {cbp_obj.name} does not exist"
)
return cbp_obj
def create_ceph_file_system(pool_name=None):
"""
Create a Ceph file system
** This method should not be used anymore **
** This method is for internal testing only **
Args:
pool_name (str): The pool name to create
Returns:
OCS: An OCS instance for the Ceph file system
"""
cfs_data = templating.load_yaml(constants.CEPHFILESYSTEM_YAML)
cfs_data['metadata']['name'] = (
pool_name if pool_name else create_unique_resource_name(
'test', 'cfs'
)
)
cfs_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
cfs_data = create_resource(**cfs_data)
cfs_data.reload()
assert validate_cephfilesystem(cfs_data.name), (
f"File system {cfs_data.name} does not exist"
)
return cfs_data
def default_storage_class(
interface_type,
):
"""
Return default storage class based on interface_type
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
Returns:
OCS: Existing StorageClass Instance
"""
if interface_type == constants.CEPHBLOCKPOOL:
base_sc = OCP(
kind='storageclass',
resource_name=constants.DEFAULT_STORAGECLASS_RBD
)
elif interface_type == constants.CEPHFILESYSTEM:
base_sc = OCP(
kind='storageclass',
resource_name=constants.DEFAULT_STORAGECLASS_CEPHFS
)
sc = OCS(**base_sc.data)
return sc
def create_storage_class(
interface_type, interface_name, secret_name,
reclaim_policy=constants.RECLAIM_POLICY_DELETE, sc_name=None,
provisioner=None
):
"""
Create a storage class
** This method should not be used anymore **
** This method is for internal testing only **
Args:
interface_type (str): The type of the interface
(e.g. CephBlockPool, CephFileSystem)
interface_name (str): The name of the interface
secret_name (str): The name of the secret
sc_name (str): The name of storage class to create
reclaim_policy (str): Type of reclaim policy. Defaults to 'Delete'
(eg., 'Delete', 'Retain')
Returns:
OCS: An OCS instance for the storage class
"""
sc_data = dict()
if interface_type == constants.CEPHBLOCKPOOL:
sc_data = templating.load_yaml(
constants.CSI_RBD_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.RBD_INTERFACE
sc_data['provisioner'] = (
provisioner if provisioner else defaults.RBD_PROVISIONER
)
elif interface_type == constants.CEPHFILESYSTEM:
sc_data = templating.load_yaml(
constants.CSI_CEPHFS_STORAGECLASS_YAML
)
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/node-stage-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
interface = constants.CEPHFS_INTERFACE
sc_data['parameters']['fsName'] = get_cephfs_name()
sc_data['provisioner'] = (
provisioner if provisioner else defaults.CEPHFS_PROVISIONER
)
sc_data['parameters']['pool'] = interface_name
sc_data['metadata']['name'] = (
sc_name if sc_name else create_unique_resource_name(
f'test-{interface}', 'storageclass'
)
)
sc_data['metadata']['namespace'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/provisioner-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters'][
'csi.storage.k8s.io/controller-expand-secret-name'
] = secret_name
sc_data['parameters'][
'csi.storage.k8s.io/controller-expand-secret-namespace'
] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['parameters']['clusterID'] = defaults.ROOK_CLUSTER_NAMESPACE
sc_data['reclaimPolicy'] = reclaim_policy
try:
del sc_data['parameters']['userid']
except KeyError:
pass
return create_resource(**sc_data)
def create_pvc(
sc_name, pvc_name=None, namespace=defaults.ROOK_CLUSTER_NAMESPACE,
size=None, do_reload=True, access_mode=constants.ACCESS_MODE_RWO,
volume_mode=None
):
"""
Create a PVC
Args:
sc_name (str): The name of the storage class for the PVC to be
associated with
pvc_name (str): The name of the PVC to create
namespace (str): The namespace for the PVC creation
size (str): Size of pvc to create
do_reload (bool): True for wait for reloading PVC after its creation, False otherwise
access_mode (str): The access mode to be used for the PVC
volume_mode (str): Volume mode for rbd RWX pvc i.e. 'Block'
Returns:
PVC: PVC instance
"""
pvc_data = templating.load_yaml(constants.CSI_PVC_YAML)
pvc_data['metadata']['name'] = (
pvc_name if pvc_name else create_unique_resource_name(
'test', 'pvc'
)
)
pvc_data['metadata']['namespace'] = namespace
pvc_data['spec']['accessModes'] = [access_mode]
pvc_data['spec']['storageClassName'] = sc_name
if size:
pvc_data['spec']['resources']['requests']['storage'] = size
if volume_mode:
pvc_data['spec']['volumeMode'] = volume_mode
ocs_obj = pvc.PVC(**pvc_data)
created_pvc = ocs_obj.create(do_reload=do_reload)
assert created_pvc, f"Failed to create resource {pvc_name}"
return ocs_obj
def create_multiple_pvcs(
sc_name, namespace, number_of_pvc=1, size=None, do_reload=False,
access_mode=constants.ACCESS_MODE_RWO
):
"""
Create one or more PVC
Args:
sc_name (str): The name of the storage class to provision the PVCs from
namespace (str): The namespace for the PVCs creation
number_of_pvc (int): Number of PVCs to be created
size (str): The size of the PVCs to create
do_reload (bool): True for wait for reloading PVC after its creation,
False otherwise
access_mode (str): The kind of access mode for PVC
Returns:
list: List of PVC objects
"""
if access_mode == 'ReadWriteMany' and 'rbd' in sc_name:
volume_mode = 'Block'
else:
volume_mode = None
return [
create_pvc(
sc_name=sc_name, size=size, namespace=namespace,
do_reload=do_reload, access_mode=access_mode, volume_mode=volume_mode
) for _ in range(number_of_pvc)
]
def verify_block_pool_exists(pool_name):
"""
Verify if a Ceph block pool exist
Args:
pool_name (str): The name of the Ceph block pool
Returns:
bool: True if the Ceph block pool exists, False otherwise
"""
logger.info(f"Verifying that block pool {pool_name} exists")
ct_pod = pod.get_ceph_tools_pod()
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph osd lspools'
):
logger.info(f'POOLS are {pools}')
for pool in pools:
if pool_name in pool.get('poolname'):
return True
except TimeoutExpiredError:
return False
def get_admin_key():
"""
Fetches admin key secret from Ceph
Returns:
str: The admin key
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph auth get-key client.admin')
return out['key']
def get_cephfs_data_pool_name():
"""
Fetches ceph fs datapool name from Ceph
Returns:
str: fs datapool name
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd('ceph fs ls')
return out[0]['data_pools'][0]
def validate_cephfilesystem(fs_name):
"""
Verify CephFileSystem exists at Ceph and OCP
Args:
fs_name (str): The name of the Ceph FileSystem
Returns:
bool: True if CephFileSystem is created at Ceph and OCP side else
will return False with valid msg i.e Failure cause
"""
cfs = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
ct_pod = pod.get_ceph_tools_pod()
ceph_validate = False
ocp_validate = False
result = cfs.get(resource_name=fs_name)
if result.get('metadata').get('name'):
logger.info("Filesystem %s got created from Openshift Side", fs_name)
ocp_validate = True
else:
logger.info(
"Filesystem %s was not create at Openshift Side", fs_name
)
return False
try:
for pools in TimeoutSampler(
60, 3, ct_pod.exec_ceph_cmd, 'ceph fs ls'
):
for out in pools:
result = out.get('name')
if result == fs_name:
logger.info("FileSystem %s got created from Ceph Side", fs_name)
ceph_validate = True
break
else:
logger.error("FileSystem %s was not present at Ceph Side", fs_name)
ceph_validate = False
if ceph_validate:
break
except TimeoutExpiredError:
pass
return True if (ceph_validate and ocp_validate) else False
def get_all_storageclass_names():
"""
Function for getting all storageclass
Returns:
list: list of storageclass name
"""
sc_obj = ocp.OCP(
kind=constants.STORAGECLASS,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = sc_obj.get()
sample = result['items']
storageclass = [
item.get('metadata').get('name') for item in sample if (
(item.get('metadata').get('name') not in constants.IGNORE_SC_GP2)
and (item.get('metadata').get('name') not in constants.IGNORE_SC_FLEX)
)
]
return storageclass
def delete_storageclasses(sc_objs):
""""
Function for Deleting storageclasses
Args:
sc_objs (list): List of SC objects for deletion
Returns:
bool: True if deletion is successful
"""
for sc in sc_objs:
logger.info("Deleting StorageClass with name %s", sc.name)
sc.delete()
return True
def get_cephblockpool_names():
"""
Function for getting all CephBlockPool
Returns:
list: list of cephblockpool name
"""
pool_obj = ocp.OCP(
kind=constants.CEPHBLOCKPOOL,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = pool_obj.get()
sample = result['items']
pool_list = [
item.get('metadata').get('name') for item in sample
]
return pool_list
def delete_cephblockpools(cbp_objs):
"""
Function for deleting CephBlockPool
Args:
cbp_objs (list): List of CBP objects for deletion
Returns:
bool: True if deletion of CephBlockPool is successful
"""
for cbp in cbp_objs:
logger.info("Deleting CephBlockPool with name %s", cbp.name)
cbp.delete()
return True
def get_cephfs_name():
"""
Function to retrive CephFS name
Returns:
str: Name of CFS
"""
cfs_obj = ocp.OCP(
kind=constants.CEPHFILESYSTEM,
namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
result = cfs_obj.get()
return result['items'][0].get('metadata').get('name')
def pull_images(image_name):
"""
Function to pull images on all nodes
Args:
image_name (str): Name of the container image to be pulled
Returns: None
"""
node_objs = node.get_node_objs(get_worker_nodes())
for node_obj in node_objs:
logging.info(f'pulling image "{image_name} " on node {node_obj.name}')
assert node_obj.ocp.exec_oc_debug_cmd(
node_obj.name, cmd_list=[f'podman pull {image_name}']
)
def run_io_with_rados_bench(**kw):
""" A task for radosbench
Runs radosbench command on specified pod . If parameters are
not provided task assumes few default parameters.This task
runs command in synchronous fashion.
Args:
**kw: Needs a dictionary of various radosbench parameters.
ex: pool_name:pool
pg_num:number of pgs for pool
op: type of operation {read, write}
cleanup: True OR False
Returns:
ret: return value of radosbench command
"""
logger.info("Running radosbench task")
ceph_pods = kw.get('ceph_pods') # list of pod objects of ceph cluster
config = kw.get('config')
role = config.get('role', 'client')
clients = [cpod for cpod in ceph_pods if role in cpod.roles]
idx = config.get('idx', 0)
client = clients[idx]
op = config.get('op', 'write')
cleanup = ['--no-cleanup', '--cleanup'][config.get('cleanup', True)]
pool = config.get('pool')
block = str(config.get('size', 4 << 20))
time = config.get('time', 120)
time = str(time)
rados_bench = (
f"rados --no-log-to-stderr "
f"-b {block} "
f"-p {pool} "
f"bench "
f"{time} "
f"{op} "
f"{cleanup} "
)
try:
ret = client.exec_ceph_cmd(ceph_cmd=rados_bench)
except CommandFailed as ex:
logger.error(f"Rados bench failed\n Error is: {ex}")
return False
logger.info(ret)
logger.info("Finished radosbench")
return ret
def get_all_pvs():
"""
Gets all pv in openshift-storage namespace
Returns:
dict: Dict of all pv in openshift-storage namespace
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
return ocp_pv_obj.get()
# TODO: revert counts of tries and delay,BZ 1726266
@retry(AssertionError, tries=20, delay=10, backoff=1)
def validate_pv_delete(pv_name):
"""
validates if pv is deleted after pvc deletion
Args:
pv_name (str): pv from pvc to validates
Returns:
bool: True if deletion is successful
Raises:
AssertionError: If pv is not deleted
"""
ocp_pv_obj = ocp.OCP(
kind=constants.PV, namespace=defaults.ROOK_CLUSTER_NAMESPACE
)
try:
if ocp_pv_obj.get(resource_name=pv_name):
msg = f"{constants.PV} {pv_name} is not deleted after PVC deletion"
raise AssertionError(msg)
except CommandFailed:
return True
def create_pods(pvc_objs, pod_factory, interface, pods_for_rwx=1, status=""):
"""
Create pods
Args:
pvc_objs (list): List of ocs_ci.ocs.resources.pvc.PVC instances
pod_factory (function): pod_factory function
interface (int): Interface type
pods_for_rwx (int): Number of pods to be created if access mode of
PVC is RWX
status (str): If provided, wait for desired state of each pod before
creating next one
Returns:
list: list of Pod objects
"""
pod_objs = []
for pvc_obj in pvc_objs:
volume_mode = getattr(
pvc_obj, 'volume_mode', pvc_obj.get()['spec']['volumeMode']
)
access_mode = getattr(
pvc_obj, 'access_mode', pvc_obj.get_pvc_access_mode
)
if volume_mode == 'Block':
pod_dict = constants.CSI_RBD_RAW_BLOCK_POD_YAML
raw_block_pv = True
else:
raw_block_pv = False
pod_dict = ''
if access_mode == constants.ACCESS_MODE_RWX:
pod_obj_rwx = [pod_factory(
interface=interface, pvc=pvc_obj, status=status,
pod_dict_path=pod_dict, raw_block_pv=raw_block_pv
) for _ in range(1, pods_for_rwx)]
pod_objs.extend(pod_obj_rwx)
pod_obj = pod_factory(
interface=interface, pvc=pvc_obj, status=status,
pod_dict_path=pod_dict, raw_block_pv=raw_block_pv
)
pod_objs.append(pod_obj)
return pod_objs
def create_build_from_docker_image(
image_name,
install_package,
namespace,
source_image='centos',
source_image_label='latest'
):
"""
Allows to create a build config using a Dockerfile specified as an argument
For eg., oc new-build -D $'FROM centos:7\nRUN yum install -y httpd',
creates a build with 'httpd' installed
Args:
image_name (str): Name of the image to be created
source_image (str): Source image to build docker image from,
Defaults to Centos as base image
namespace (str): project where build config should be created
source_image_label (str): Tag to use along with the image name,
Defaults to 'latest'
install_package (str): package to install over the base image
Returns:
OCP (obj): Returns the OCP object for the image
Fails on UnavailableBuildException exception if build creation
fails
"""
base_image = source_image + ':' + source_image_label
cmd = f'yum install -y {install_package}'
if 'http_proxy' in config.ENV_DATA:
http_proxy = config.ENV_DATA['http_proxy']
https_proxy = config.ENV_DATA.get(
'https_proxy', http_proxy
)
cmd = (
f"http_proxy={http_proxy} https_proxy={https_proxy} {cmd}"
)
docker_file = (
f"FROM {base_image}\n "
f" RUN {cmd}\n"
f"CMD tail -f /dev/null"
)
command = f"new-build -D $\'{docker_file}\' --name={image_name}"
kubeconfig = os.getenv('KUBECONFIG')
oc_cmd = f"oc -n {namespace} "
if kubeconfig:
oc_cmd += f"--kubeconfig {kubeconfig} "
oc_cmd += command
logger.info(f'Running command {oc_cmd}')
result = run(
oc_cmd,
stdout=PIPE,
stderr=PIPE,
timeout=15,
shell=True
)
if result.stderr.decode():
raise UnavailableBuildException(
f'Build creation failed with error: {result.stderr.decode()}'
)
out = result.stdout.decode()
logger.info(out)
if 'Success' in out:
# Build becomes ready once build pod goes into Completed state
pod_obj = OCP(kind='Pod', resource_name=image_name)
if pod_obj.wait_for_resource(
condition='Completed',
resource_name=f'{image_name}' + '-1-build',
timeout=300,
sleep=30
):
logger.info(f'build {image_name} ready')
set_image_lookup(image_name)
logger.info(f'image {image_name} can now be consumed')
image_stream_obj = OCP(
kind='ImageStream', resource_name=image_name
)
return image_stream_obj
else:
raise UnavailableBuildException('Build creation failed')
def set_image_lookup(image_name):
"""
Function to enable lookup, which allows reference to the image stream tag
in the image field of the object. Example,
$ oc set image-lookup mysql
$ oc run mysql --image=mysql
Args:
image_name (str): Name of the image stream to pull
the image locally
Returns:
str: output of set image-lookup command
"""
ocp_obj = ocp.OCP(kind='ImageStream')
command = f'set image-lookup {image_name}'
logger.info(f'image lookup for image"{image_name}" is set')
status = ocp_obj.exec_oc_cmd(command)
return status
def get_worker_nodes():
"""
Fetches all worker nodes.
Returns:
list: List of names of worker nodes
"""
label = 'node-role.kubernetes.io/worker'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
worker_nodes_list = [node.get('metadata').get('name') for node in nodes]
return worker_nodes_list
def get_master_nodes():
"""
Fetches all master nodes.
Returns:
list: List of names of master nodes
"""
label = 'node-role.kubernetes.io/master'
ocp_node_obj = ocp.OCP(kind=constants.NODE)
nodes = ocp_node_obj.get(selector=label).get('items')
master_nodes_list = [node.get('metadata').get('name') for node in nodes]
return master_nodes_list
def get_start_creation_time(interface, pvc_name):
"""
Get the starting creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: Start time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_creation_time(interface, pvc_name):
"""
Get the ending creation time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
datetime object: End time of PVC creation
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_creation_time(interface, pvc_name):
"""
Measure PVC creation time based on logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for creation time measurement
Returns:
float: Creation time for the PVC
"""
start = get_start_creation_time(interface=interface, pvc_name=pvc_name)
end = get_end_creation_time(interface=interface, pvc_name=pvc_name)
total = end - start
return total.total_seconds()
def measure_pvc_creation_time_bulk(interface, pvc_name_list, wait_time=60):
"""
Measure PVC creation time of bulk PVC based on logs.
Args:
interface (str): The interface backed the PVC
pvc_name_list (list): List of PVC Names for measuring creation time
wait_time (int): Seconds to wait before collecting CSI log
Returns:
pvc_dict (dict): Dictionary of pvc_name with creation time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
pvc_dict = dict()
format = '%H:%M:%S.%f'
for pvc_name in pvc_name_list:
# Extract the starting time for the PVC provisioning
start = [
i for i in logs if re.search(f"provision.*{pvc_name}.*started", i)
]
start = start[0].split(' ')[1]
start_time = datetime.datetime.strptime(start, format)
# Extract the end time for the PVC provisioning
end = [
i for i in logs if re.search(f"provision.*{pvc_name}.*succeeded", i)
]
end = end[0].split(' ')[1]
end_time = datetime.datetime.strptime(end, format)
total = end_time - start_time
pvc_dict[pvc_name] = total.total_seconds()
return pvc_dict
def measure_pv_deletion_time_bulk(interface, pv_name_list, wait_time=60):
"""
Measure PV deletion time of bulk PV, based on logs.
Args:
interface (str): The interface backed the PV
pv_name_list (list): List of PV Names for measuring deletion time
wait_time (int): Seconds to wait before collecting CSI log
Returns:
pv_dict (dict): Dictionary of pv_name with deletion time.
"""
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# due to some delay in CSI log generation added wait
time.sleep(wait_time)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
pv_dict = dict()
format = '%H:%M:%S.%f'
for pv_name in pv_name_list:
# Extract the deletion start time for the PV
start = [
i for i in logs if re.search(f"delete \"{pv_name}\": started", i)
]
start = start[0].split(' ')[1]
start_time = datetime.datetime.strptime(start, format)
# Extract the deletion end time for the PV
end = [
i for i in logs if re.search(f"delete \"{pv_name}\": succeeded", i)
]
end = end[0].split(' ')[1]
end_time = datetime.datetime.strptime(end, format)
total = end_time - start_time
pv_dict[pv_name] = total.total_seconds()
return pv_dict
def get_start_deletion_time(interface, pv_name):
"""
Get the starting deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pvc_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: Start time of PVC deletion
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PVC deletion
start = [
i for i in logs if re.search(f"delete \"{pv_name}\": started", i)
]
start = start[0].split(' ')[1]
return datetime.datetime.strptime(start, format)
def get_end_deletion_time(interface, pv_name):
"""
Get the ending deletion time of a PVC based on provisioner logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PVC for deletion time measurement
Returns:
datetime object: End time of PVC deletion
"""
format = '%H:%M:%S.%f'
# Get the correct provisioner pod based on the interface
pod_name = pod.get_csi_provisioner_pod(interface)
# get the logs from the csi-provisioner containers
logs = pod.get_pod_logs(pod_name[0], 'csi-provisioner')
logs += pod.get_pod_logs(pod_name[1], 'csi-provisioner')
logs = logs.split("\n")
# Extract the starting time for the PV deletion
end = [
i for i in logs if re.search(f"delete \"{pv_name}\": succeeded", i)
]
end = end[0].split(' ')[1]
return datetime.datetime.strptime(end, format)
def measure_pvc_deletion_time(interface, pv_name):
"""
Measure PVC deletion time based on logs
Args:
interface (str): The interface backed the PVC
pv_name (str): Name of the PV for creation time measurement
Returns:
float: Deletion time for the PVC
"""
start = get_start_deletion_time(interface=interface, pv_name=pv_name)
end = get_end_deletion_time(interface=interface, pv_name=pv_name)
total = end - start
return total.total_seconds()
def pod_start_time(pod_obj):
"""
Function to measure time taken for container(s) to get into running state
by measuring the difference between container's start time (when container
went into running state) and started time (when container was actually
started)
Args:
pod_obj(obj): pod object to measure start time
Returns:
containers_start_time(dict):
Returns the name and start time of container(s) in a pod
"""
time_format = '%Y-%m-%dT%H:%M:%SZ'
containers_start_time = {}
start_time = pod_obj.data['status']['startTime']
start_time = datetime.datetime.strptime(start_time, time_format)
for container in range(len(pod_obj.data['status']['containerStatuses'])):
started_time = pod_obj.data[
'status']['containerStatuses'][container]['state'][
'running']['startedAt']
started_time = datetime.datetime.strptime(started_time, time_format)
container_name = pod_obj.data[
'status']['containerStatuses'][container]['name']
container_start_time = (started_time - start_time).seconds
containers_start_time[container_name] = container_start_time
return containers_start_time
def get_default_storage_class():
"""
Get the default StorageClass(es)
Returns:
list: default StorageClass(es) list
"""
default_sc_obj = ocp.OCP(kind='StorageClass')
storage_classes = default_sc_obj.get().get('items')
storage_classes = [
sc for sc in storage_classes if 'annotations' in sc.get('metadata')
]
return [
sc.get('metadata').get('name') for sc in storage_classes if sc.get(
'metadata'
).get('annotations').get(
'storageclass.kubernetes.io/is-default-class'
) == 'true'
]
def change_default_storageclass(scname):
"""
Change the default StorageClass to the given SC name
Args:
scname (str): StorageClass name
Returns:
bool: True on success
"""
default_sc = get_default_storage_class()
ocp_obj = ocp.OCP(kind='StorageClass')
if default_sc:
# Change the existing default Storageclass annotation to false
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"false\"}}}' "
patch_cmd = f"patch storageclass {default_sc} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
# Change the new storageclass to default
patch = " '{\"metadata\": {\"annotations\":" \
"{\"storageclass.kubernetes.io/is-default-class\"" \
":\"true\"}}}' "
patch_cmd = f"patch storageclass {scname} -p" + patch
ocp_obj.exec_oc_cmd(command=patch_cmd)
return True
def is_volume_present_in_backend(interface, image_uuid, pool_name=None):
"""
Check whether Image/Subvolume is present in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents
corresponding image/subvolume in backend
eg: oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'
Output is the CSI generated VolID and looks like:
'0001-000c-rook-cluster-0000000000000001-
f301898c-a192-11e9-852a-1eeeb6975c91' where
image_uuid is 'f301898c-a192-11e9-852a-1eeeb6975c91'
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
Returns:
bool: True if volume is present and False if volume is not present
"""
ct_pod = pod.get_ceph_tools_pod()
if interface == constants.CEPHBLOCKPOOL:
valid_error = [f"error opening image csi-vol-{image_uuid}"]
cmd = f"rbd info -p {pool_name} csi-vol-{image_uuid}"
if interface == constants.CEPHFILESYSTEM:
valid_error = [
f"Subvolume 'csi-vol-{image_uuid}' not found",
f"subvolume 'csi-vol-{image_uuid}' does not exist"
]
cmd = (
f"ceph fs subvolume getpath {defaults.CEPHFILESYSTEM_NAME}"
f" csi-vol-{image_uuid} csi"
)
try:
ct_pod.exec_ceph_cmd(ceph_cmd=cmd, format='json')
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} exists "
f"in backend"
)
return True
except CommandFailed as ecf:
assert any([error in str(ecf) for error in valid_error]), (
f"Error occurred while verifying volume is present in backend: "
f"{str(ecf)} ImageUUID: {image_uuid}. Interface type: {interface}"
)
logger.info(
f"Volume corresponding to uuid {image_uuid} does not exist "
f"in backend"
)
return False
def verify_volume_deleted_in_backend(
interface, image_uuid, pool_name=None, timeout=180
):
"""
Ensure that Image/Subvolume is deleted in the backend.
Args:
interface (str): The interface backed the PVC
image_uuid (str): Part of VolID which represents
corresponding image/subvolume in backend
eg: oc get pv/<volumeName> -o jsonpath='{.spec.csi.volumeHandle}'
Output is the CSI generated VolID and looks like:
'0001-000c-rook-cluster-0000000000000001-
f301898c-a192-11e9-852a-1eeeb6975c91' where
image_uuid is 'f301898c-a192-11e9-852a-1eeeb6975c91'
pool_name (str): Name of the rbd-pool if interface is CephBlockPool
timeout (int): Wait time for the volume to be deleted.
Returns:
bool: True if volume is deleted before timeout.
False if volume is not deleted.
"""
try:
for ret in TimeoutSampler(
timeout, 2, is_volume_present_in_backend, interface=interface,
image_uuid=image_uuid, pool_name=pool_name
):
if not ret:
break
logger.info(
f"Verified: Volume corresponding to uuid {image_uuid} is deleted "
f"in backend"
)
return True
except TimeoutExpiredError:
logger.error(
f"Volume corresponding to uuid {image_uuid} is not deleted "
f"in backend"
)
# Log 'ceph progress' and 'ceph rbd task list' for debugging purpose
ct_pod = pod.get_ceph_tools_pod()
ct_pod.exec_ceph_cmd('ceph progress json', format=None)
ct_pod.exec_ceph_cmd('ceph rbd task list')
return False
def create_serviceaccount(namespace):
"""
Create a Serviceaccount
Args:
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
service_account_data = templating.load_yaml(
constants.SERVICE_ACCOUNT_YAML
)
service_account_data['metadata']['name'] = create_unique_resource_name(
'sa', 'serviceaccount'
)
service_account_data['metadata']['namespace'] = namespace
return create_resource(**service_account_data)
def get_serviceaccount_obj(sa_name, namespace):
"""
Get serviceaccount obj
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
OCS: An OCS instance for the service_account
"""
ocp_sa_obj = ocp.OCP(kind=constants.SERVICE_ACCOUNT, namespace=namespace)
try:
sa_dict = ocp_sa_obj.get(resource_name=sa_name)
return OCS(**sa_dict)
except CommandFailed:
logger.error("ServiceAccount not found in specified namespace")
def validate_scc_policy(sa_name, namespace):
"""
Validate serviceaccount is added to scc of privileged
Args:
sa_name (str): Service Account name
namespace (str): The namespace for the serviceaccount creation
Returns:
bool: True if sc_name is present in scc of privileged else False
"""
sa_name = f"system:serviceaccount:{namespace}:{sa_name}"
logger.info(sa_name)
ocp_scc_obj = ocp.OCP(kind=constants.SCC, namespace=namespace)
scc_dict = ocp_scc_obj.get(resource_name=constants.PRIVILEGED)
scc_users_list = scc_dict.get('users')
for scc_user in scc_users_list:
if scc_user == sa_name:
return True
return False
def add_scc_policy(sa_name, namespace):
"""
Adding ServiceAccount to scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy creation
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy add-scc-to-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def remove_scc_policy(sa_name, namespace):
"""
Removing ServiceAccount from scc privileged
Args:
sa_name (str): ServiceAccount name
namespace (str): The namespace for the scc_policy deletion
"""
ocp = OCP()
out = ocp.exec_oc_cmd(
command=f"adm policy remove-scc-from-user privileged system:serviceaccount:{namespace}:{sa_name}",
out_yaml_format=False
)
logger.info(out)
def craft_s3_command(cmd, mcg_obj=None, api=False):
"""
Crafts the AWS CLI S3 command including the
login credentials and command to be ran
Args:
mcg_obj: An MCG object containing the MCG S3 connection credentials
cmd: The AWSCLI command to run
api: True if the call is for s3api, false if s3
Returns:
str: The crafted command, ready to be executed on the pod
"""
api = 'api' if api else ''
if mcg_obj:
base_command = (
f'sh -c "AWS_CA_BUNDLE={constants.SERVICE_CA_CRT_AWSCLI_PATH} '
f'AWS_ACCESS_KEY_ID={mcg_obj.access_key_id} '
f'AWS_SECRET_ACCESS_KEY={mcg_obj.access_key} '
f'AWS_DEFAULT_REGION={mcg_obj.region} '
f'aws s3{api} '
f'--endpoint={mcg_obj.s3_internal_endpoint} '
)
string_wrapper = '"'
else:
base_command = (
f"aws s3{api} --no-sign-request "
)
string_wrapper = ''
return f"{base_command}{cmd}{string_wrapper}"
def wait_for_resource_count_change(
func_to_use, previous_num, namespace, change_type='increase',
min_difference=1, timeout=20, interval=2, **func_kwargs
):
"""
Wait for a change in total count of PVC or pod
Args:
func_to_use (function): Function to be used to fetch resource info
Supported functions: pod.get_all_pvcs(), pod.get_all_pods()
previous_num (int): Previous number of pods/PVCs for comparison
namespace (str): Name of the namespace
change_type (str): Type of change to check. Accepted values are
'increase' and 'decrease'. Default is 'increase'.
min_difference (int): Minimum required difference in PVC/pod count
timeout (int): Maximum wait time in seconds
interval (int): Time in seconds to wait between consecutive checks
Returns:
True if difference in count is greater than or equal to
'min_difference'. False in case of timeout.
"""
try:
for sample in TimeoutSampler(
timeout, interval, func_to_use, namespace, **func_kwargs
):
if func_to_use == pod.get_all_pods:
current_num = len(sample)
else:
current_num = len(sample['items'])
if change_type == 'increase':
count_diff = current_num - previous_num
else:
count_diff = previous_num - current_num
if count_diff >= min_difference:
return True
except TimeoutExpiredError:
return False
def verify_pv_mounted_on_node(node_pv_dict):
"""
Check if mount point of a PV exists on a node
Args:
node_pv_dict (dict): Node to PV list mapping
eg: {'node1': ['pv1', 'pv2', 'pv3'], 'node2': ['pv4', 'pv5']}
Returns:
dict: Node to existing PV list mapping
eg: {'node1': ['pv1', 'pv3'], 'node2': ['pv5']}
"""
existing_pvs = {}
for node_name, pvs in node_pv_dict.items():
cmd = f'oc debug nodes/{node_name} -- df'
df_on_node = run_cmd(cmd)
existing_pvs[node_name] = []
for pv_name in pvs:
if f"/pv/{pv_name}/" in df_on_node:
existing_pvs[node_name].append(pv_name)
return existing_pvs
def converge_lists(list_to_converge):
"""
Function to flatten and remove the sublist created during future obj
Args:
list_to_converge (list): arg list of lists, eg: [[1,2],[3,4]]
Returns:
list (list): return converged list eg: [1,2,3,4]
"""
return [item for sublist in list_to_converge for item in sublist]
def create_multiple_pvc_parallel(
sc_obj, namespace, number_of_pvc, size, access_modes
):
"""
Funtion to create multiple PVC in parallel using threads
Function will create PVCs based on the available access modes
Args:
sc_obj (str): Storage Class object
namespace (str): The namespace for creating pvc
number_of_pvc (int): NUmber of pvc to be created
size (str): size of the pvc eg: '10Gi'
access_modes (list): List of access modes for PVC creation
Returns:
pvc_objs_list (list): List of pvc objs created in function
"""
obj_status_list, result_lists = ([] for i in range(2))
with ThreadPoolExecutor() as executor:
for mode in access_modes:
result_lists.append(
executor.submit(
create_multiple_pvcs, sc_name=sc_obj.name,
namespace=namespace, number_of_pvc=number_of_pvc,
access_mode=mode, size=size)
)
result_list = [result.result() for result in result_lists]
pvc_objs_list = converge_lists(result_list)
# Check for all the pvcs in Bound state
with ThreadPoolExecutor() as executor:
for objs in pvc_objs_list:
obj_status_list.append(
executor.submit(wait_for_resource_state, objs, 'Bound', 90)
)
if False in [obj.result() for obj in obj_status_list]:
raise TimeoutExpiredError
return pvc_objs_list
def create_pods_parallel(
pvc_list, namespace, interface, pod_dict_path=None, sa_name=None, raw_block_pv=False,
dc_deployment=False, node_selector=None
):
"""
Function to create pods in parallel
Args:
pvc_list (list): List of pvcs to be attached in pods
namespace (str): The namespace for creating pod
interface (str): The interface backed the PVC
pod_dict_path (str): pod_dict_path for yaml
sa_name (str): sa_name for providing permission
raw_block_pv (bool): Either RAW block or not
dc_deployment (bool): Either DC deployment or not
node_selector (dict): dict of key-value pair to be used for nodeSelector field
eg: {'nodetype': 'app-pod'}
Returns:
pod_objs (list): Returns list of pods created
"""
future_pod_objs = []
# Added 300 sec wait time since in scale test once the setup has more
# PODs time taken for the pod to be up will be based on resource available
wait_time = 300
if raw_block_pv and not pod_dict_path:
pod_dict_path = constants.CSI_RBD_RAW_BLOCK_POD_YAML
with ThreadPoolExecutor() as executor:
for pvc_obj in pvc_list:
future_pod_objs.append(executor.submit(
create_pod, interface_type=interface,
pvc_name=pvc_obj.name, do_reload=False, namespace=namespace,
raw_block_pv=raw_block_pv, pod_dict_path=pod_dict_path,
sa_name=sa_name, dc_deployment=dc_deployment, node_selector=node_selector
))
pod_objs = [pvc_obj.result() for pvc_obj in future_pod_objs]
# Check for all the pods are in Running state
# In above pod creation not waiting for the pod to be created because of threads usage
with ThreadPoolExecutor() as executor:
for obj in pod_objs:
future_pod_objs.append(
executor.submit(wait_for_resource_state, obj, 'Running', timeout=wait_time)
)
# If pods not up raise exception/failure
if False in [obj.result() for obj in future_pod_objs]:
raise TimeoutExpiredError
return pod_objs
def delete_objs_parallel(obj_list):
"""
Function to delete objs specified in list
Args:
obj_list(list): List can be obj of pod, pvc, etc
Returns:
bool: True if obj deleted else False
"""
threads = list()
for obj in obj_list:
process = threading.Thread(target=obj.delete)
process.start()
threads.append(process)
for process in threads:
process.join()
return True
def memory_leak_analysis(median_dict):
"""
Function to analyse Memory leak after execution of test case
Memory leak is analyzed based on top output "RES" value of ceph-osd daemon,
i.e. list[7] in code
Args:
median_dict (dict): dict of worker nodes and respective median value
eg: median_dict = {'worker_node_1':102400, 'worker_node_2':204800, ...}
More Detail on Median value:
For calculating memory leak require a constant value, which should not be
start or end of test, so calculating it by getting memory for 180 sec
before TC execution and take a median out of it.
Memory value could be different for each nodes, so identify constant value
for each node and update in median_dict
Usage:
test_case(.., memory_leak_function):
.....
median_dict = helpers.get_memory_leak_median_value()
.....
TC execution part, memory_leak_fun will capture data
....
helpers.memory_leak_analysis(median_dict)
....
"""
# dict to store memory leak difference for each worker
diff = {}
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
number_of_lines = len(memory_leak_data) - 1
# Get the start value form median_dict arg for respective worker
start_value = median_dict[f"{worker}"]
end_value = memory_leak_data[number_of_lines]
logging.info(f"Median value {start_value}")
logging.info(f"End value {end_value}")
# Convert the values to kb for calculations
if start_value.__contains__('g'):
start_value = float(1024 ** 2 * float(start_value[:-1]))
elif start_value.__contains__('m'):
start_value = float(1024 * float(start_value[:-1]))
else:
start_value = float(start_value)
if end_value.__contains__('g'):
end_value = float(1024 ** 2 * float(end_value[:-1]))
elif end_value.__contains__('m'):
end_value = float(1024 * float(end_value[:-1]))
else:
end_value = float(end_value)
# Calculate the percentage of diff between start and end value
# Based on value decide TC pass or fail
diff[worker] = ((end_value - start_value) / start_value) * 100
logging.info(f"Percentage diff in start and end value {diff[worker]}")
if diff[worker] <= 20:
logging.info(f"No memory leak in worker {worker} passing the test")
else:
logging.info(f"There is a memory leak in worker {worker}")
logging.info(f"Memory median value start of the test {start_value}")
logging.info(f"Memory value end of the test {end_value}")
raise UnexpectedBehaviour
def get_memory_leak_median_value():
"""
Function to calculate memory leak Median value by collecting the data for 180 sec
and find the median value which will be considered as starting point
to evaluate memory leak using "RES" value of ceph-osd daemon i.e. list[7] in code
Returns:
median_dict (dict): dict of worker nodes and respective median value
"""
median_dict = {}
timeout = 180 # wait for 180 sec to evaluate memory leak median data.
logger.info(f"waiting for {timeout} sec to evaluate the median value")
time.sleep(timeout)
for worker in get_worker_nodes():
memory_leak_data = []
if os.path.exists(f"/tmp/{worker}-top-output.txt"):
with open(f"/tmp/{worker}-top-output.txt", "r") as f:
data = f.readline()
list = data.split(" ")
list = [i for i in list if i]
memory_leak_data.append(list[7])
else:
logging.info(f"worker {worker} memory leak file not found")
raise UnexpectedBehaviour
median_dict[f"{worker}"] = statistics.median(memory_leak_data)
return median_dict
def refresh_oc_login_connection(user=None, password=None):
"""
Function to refresh oc user login
Default login using kubeadmin user and password
Args:
user (str): Username to login
password (str): Password to login
"""
user = user or config.RUN['username']
if not password:
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['password_location']
)
with open(filename) as f:
password = f.read()
ocs_obj = ocp.OCP()
ocs_obj.login(user=user, password=password)
def rsync_kubeconf_to_node(node):
"""
Function to copy kubeconfig to OCP node
Args:
node (str): OCP node to copy kubeconfig if not present
"""
# ocp_obj = ocp.OCP()
filename = os.path.join(
config.ENV_DATA['cluster_path'],
config.RUN['kubeconfig_location']
)
file_path = os.path.dirname(filename)
master_list = get_master_nodes()
ocp_obj = ocp.OCP()
check_auth = 'auth'
check_conf = 'kubeconfig'
node_path = '/home/core/'
if check_auth not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
elif check_conf not in ocp_obj.exec_oc_debug_cmd(node=master_list[0], cmd_list=[f"ls {node_path}auth"]):
ocp.rsync(
src=file_path, dst=f"{node_path}", node=node, dst_node=True
)
def create_dummy_osd(deployment):
"""
Replace one of OSD pods with pod that contains all data from original
OSD but doesn't run osd daemon. This can be used e.g. for direct acccess
to Ceph Placement Groups.
Args:
deployment (str): Name of deployment to use
Returns:
list: first item is dummy deployment object, second item is dummy pod
object
"""
oc = OCP(
kind=constants.DEPLOYMENT,
namespace=config.ENV_DATA.get('cluster_namespace')
)
osd_data = oc.get(deployment)
dummy_deployment = create_unique_resource_name('dummy', 'osd')
osd_data['metadata']['name'] = dummy_deployment
osd_containers = osd_data.get('spec').get('template').get('spec').get(
'containers'
)
# get osd container spec
original_osd_args = osd_containers[0].get('args')
osd_data['spec']['template']['spec']['containers'][0]['args'] = []
osd_data['spec']['template']['spec']['containers'][0]['command'] = [
'/bin/bash',
'-c',
'sleep infinity'
]
osd_file = tempfile.NamedTemporaryFile(
mode='w+', prefix=dummy_deployment, delete=False
)
with open(osd_file.name, "w") as temp:
yaml.dump(osd_data, temp)
oc.create(osd_file.name)
# downscale the original deployment and start dummy deployment instead
oc.exec_oc_cmd(f"scale --replicas=0 deployment/{deployment}")
oc.exec_oc_cmd(f"scale --replicas=1 deployment/{dummy_deployment}")
osd_list = pod.get_osd_pods()
dummy_pod = [pod for pod in osd_list if dummy_deployment in pod.name][0]
wait_for_resource_state(
resource=dummy_pod,
state=constants.STATUS_RUNNING,
timeout=60
)
ceph_init_cmd = '/rook/tini' + ' ' + ' '.join(original_osd_args)
try:
logger.info('Following command should expire after 7 seconds')
dummy_pod.exec_cmd_on_pod(ceph_init_cmd, timeout=7)
except TimeoutExpired:
logger.info('Killing /rook/tini process')
try:
dummy_pod.exec_sh_cmd_on_pod(
"kill $(ps aux | grep '[/]rook/tini' | awk '{print $2}')"
)
except CommandFailed:
pass
return dummy_deployment, dummy_pod
def get_failure_domin():
"""
Function is used to getting failure domain of pool
Returns:
str: Failure domain from cephblockpool
"""
ct_pod = pod.get_ceph_tools_pod()
out = ct_pod.exec_ceph_cmd(ceph_cmd="ceph osd crush rule dump", format='json')
assert out, "Failed to get cmd output"
for crush_rule in out:
if constants.CEPHBLOCKPOOL.lower() in crush_rule.get("rule_name"):
for steps in crush_rule.get("steps"):
if "type" in steps:
return steps.get("type")
def wait_for_ct_pod_recovery():
"""
In case the of node failures scenarios, in which the selected node is
running the ceph tools pod, we'll want to wait for the pod recovery
Returns:
bool: True in case the ceph tools pod was recovered, False otherwise
"""
try:
_ = get_admin_key()
except CommandFailed as ex:
logger.info(str(ex))
if "connection timed out" in str(ex):
logger.info(
"Ceph tools box was running on the node that had a failure. "
"Hence, waiting for a new Ceph tools box pod to spin up"
)
wait_for_resource_count_change(
func_to_use=pod.get_all_pods, previous_num=1,
namespace=config.ENV_DATA['cluster_namespace'], timeout=120,
selector=constants.TOOL_APP_LABEL
)
return True
else:
return False
return True
def label_worker_node(node_list, label_key, label_value):
"""
Function to label worker node for running app pods on specific worker nodes.
Args:
node_list (list): List of node name
label_key (str): Label_key to be added in worker
label_value (str): Label_value
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}={label_value}", out_yaml_format=False
)
logger.info(out)
def remove_label_from_worker_node(node_list, label_key):
"""
Function to remove label from worker node.
Args:
node_list (list): List of node name
label_key (str): Label_key to be remove from worker node
"""
ocp_obj = OCP()
out = ocp_obj.exec_oc_cmd(
command=f"label node {' '.join(node_list)} {label_key}-", out_yaml_format=False
)
logger.info(out)
def get_pods_nodes_logs():
"""
Get logs from all pods and nodes
Returns:
dict: node/pod name as key, logs content as value (string)
"""
all_logs = {}
all_pods = pod.get_all_pods()
all_nodes = node.get_node_objs()
for node_obj in all_nodes:
node_name = node_obj.name
log_content = node.get_node_logs(node_name)
all_logs.update({node_name: log_content})
for pod_obj in all_pods:
try:
pod_name = pod_obj.name
log_content = pod.get_pod_logs(pod_name)
all_logs.update({pod_name: log_content})
except CommandFailed:
pass
return all_logs
def get_logs_with_errors(errors=None):
"""
From logs of all pods and nodes, get only logs
containing any of specified errors
Args:
errors (list): List of errors to look for
Returns:
dict: node/pod name as key, logs content as value; may be empty
"""
all_logs = get_pods_nodes_logs()
output_logs = {}
errors_list = constants.CRITICAL_ERRORS
if errors:
errors_list = errors_list + errors
for name, log_content in all_logs.items():
for error_msg in errors_list:
if error_msg in log_content:
logger.debug(f"Found '{error_msg}' in log of {name}")
output_logs.update({name: log_content})
log_path = f"{ocsci_log_path()}/{name}.log"
with open(log_path, 'w') as fh:
fh.write(log_content)
return output_logs
def modify_osd_replica_count(resource_name, replica_count):
"""
Function to modify osd replica count to 0 or 1
Args:
resource_name (str): Name of osd i.e, 'rook-ceph-osd-0-c9c4bc7c-bkf4b'
replica_count (int): osd replica count to be changed to
Returns:
bool: True in case if changes are applied. False otherwise
"""
ocp_obj = ocp.OCP(kind=constants.DEPLOYMENT, namespace=defaults.ROOK_CLUSTER_NAMESPACE)
params = f'{{"spec": {{"replicas": {replica_count}}}}}'
resource_name = '-'.join(resource_name.split('-')[0:4])
return ocp_obj.patch(resource_name=resource_name, params=params)
def collect_performance_stats(dir_name):
"""
Collect performance stats and saves them in file in json format.
dir_name (str): directory name to store stats.
Performance stats include:
IOPs and throughput percentage of cluster
CPU, memory consumption of each nodes
"""
from ocs_ci.ocs.cluster import CephCluster
log_dir_path = os.path.join(
os.path.expanduser(config.RUN['log_dir']),
f"failed_testcase_ocs_logs_{config.RUN['run_id']}",
f"{dir_name}_performance_stats"
)
if not os.path.exists(log_dir_path):
logger.info(f'Creating directory {log_dir_path}')
os.makedirs(log_dir_path)
ceph_obj = CephCluster()
performance_stats = {}
# Get iops and throughput percentage of cluster
iops_percentage = ceph_obj.get_iops_percentage()
throughput_percentage = ceph_obj.get_throughput_percentage()
# ToDo: Get iops and throughput percentage of each nodes
# Get the cpu and memory of each nodes from adm top
master_node_utilization_from_adm_top = \
node.get_node_resource_utilization_from_adm_top(node_type='master')
worker_node_utilization_from_adm_top = \
node.get_node_resource_utilization_from_adm_top(node_type='worker')
# Get the cpu and memory from describe of nodes
master_node_utilization_from_oc_describe = \
node.get_node_resource_utilization_from_oc_describe(node_type='master')
worker_node_utilization_from_oc_describe = \
node.get_node_resource_utilization_from_oc_describe(node_type='worker')
performance_stats['iops_percentage'] = iops_percentage
performance_stats['throughput_percentage'] = throughput_percentage
performance_stats['master_node_utilization'] = master_node_utilization_from_adm_top
performance_stats['worker_node_utilization'] = worker_node_utilization_from_adm_top
performance_stats['master_node_utilization_from_oc_describe'] = master_node_utilization_from_oc_describe
performance_stats['worker_node_utilization_from_oc_describe'] = worker_node_utilization_from_oc_describe
file_name = os.path.join(log_dir_path, 'performance')
with open(file_name, 'w') as outfile:
json.dump(performance_stats, outfile)
def validate_pod_oomkilled(
pod_name, namespace=defaults.ROOK_CLUSTER_NAMESPACE, container=None
):
"""
Validate pod oomkilled message are found on log
Args:
pod_name (str): Name of the pod
namespace (str): Namespace of the pod
container (str): Name of the container
Returns:
bool : True if oomkill messages are not found on log.
False Otherwise.
Raises:
Assertion if failed to fetch logs
"""
rc = True
try:
pod_log = pod.get_pod_logs(
pod_name=pod_name, namespace=namespace,
container=container, previous=True
)
result = pod_log.find("signal: killed")
if result != -1:
rc = False
except CommandFailed as ecf:
assert f'previous terminated container "{container}" in pod "{pod_name}" not found' in str(ecf), (
"Failed to fetch logs"
)
return rc
def validate_pods_are_running_and_not_restarted(
pod_name, pod_restart_count, namespace
):
"""
Validate given pod is in running state and not restarted or re-spinned
Args:
pod_name (str): Name of the pod
pod_restart_count (int): Restart count of pod
namespace (str): Namespace of the pod
Returns:
bool : True if pod is in running state and restart
count matches the previous one
"""
ocp_obj = ocp.OCP(kind=constants.POD, namespace=namespace)
pod_obj = ocp_obj.get(resource_name=pod_name)
restart_count = pod_obj.get('status').get('containerStatuses')[0].get('restartCount')
pod_state = pod_obj.get('status').get('phase')
if pod_state == 'Running' and restart_count == pod_restart_count:
logger.info("Pod is running state and restart count matches with previous one")
return True
logger.error(f"Pod is in {pod_state} state and restart count of pod {restart_count}")
logger.info(f"{pod_obj}")
return False
def calc_local_file_md5_sum(path):
"""
Calculate and return the MD5 checksum of a local file
Arguments:
path(str): The path to the file
Returns:
str: The MD5 checksum
"""
with open(path, 'rb') as file_to_hash:
file_as_bytes = file_to_hash.read()
return hashlib.md5(file_as_bytes).hexdigest()
def retrieve_default_ingress_crt():
"""
Copy the default ingress certificate from the router-ca secret
to the local code runner for usage with boto3.
"""
default_ingress_crt_b64 = OCP(
kind='secret',
namespace='openshift-ingress-operator',
resource_name='router-ca'
).get().get('data').get('tls.crt')
decoded_crt = base64.b64decode(default_ingress_crt_b64).decode('utf-8')
with open(constants.DEFAULT_INGRESS_CRT_LOCAL_PATH, 'w') as crtfile:
crtfile.write(decoded_crt)
def get_pv_size(storageclass=None):
"""
Get Pv size from requested storageclass
Args:
storageclass (str): Name of storageclass
Returns:
list: list of pv's size
"""
return_list = []
ocp_obj = ocp.OCP(kind=constants.PV)
pv_objs = ocp_obj.get()['items']
for pv_obj in pv_objs:
if pv_obj['spec']['storageClassName'] == storageclass:
return_list.append(pv_obj['spec']['capacity']['storage'])
return return_list
|
grab_huaban_board.py
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
__version__ = "4.0"
__author__ = "Mr.tao"
__doc__ = "http://www.saintic.com/blog/204.html"
import requests, re, os, logging, json
from multiprocessing import cpu_count, Process
from multiprocessing.dummy import Pool as ThreadPool
logging.basicConfig(level=logging.DEBUG,
format='[ %(levelname)s ] %(asctime)s %(filename)s:%(threadName)s:%(process)d:%(lineno)d %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename='huaban.log',
filemode='a')
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36", "Referer": 'https://www.saintic.com/'}
title_pat = re.compile(r'<title>.*\((\d+).*\).*</title>')
pin_pat = re.compile(r'("pin_id":)(\w*)')
pindata_pat = re.compile('"pin_id":(.*?),.+?"key":"(.*?)",.+?"type":"image/(.*?)"', re.S)
def print_green(msg):
print '\033[92m{}\033[0m'.format(str(msg))
def print_blue(msg):
print '\033[94m{}\033[0m'.format(str(msg))
def print_yellow(msg):
print '\033[93m{}\033[0m'.format(str(msg))
def print_header(msg):
print '\033[95m{}\033[0m'.format(str(msg))
def Mkdir(d):
d = str(d)
if not os.path.exists(d):
os.makedirs(d)
if os.path.exists(d):
return True
else:
return False
def BoardGetTitleImageNum(board):
""" 查询画板的pin数量 """
logging.debug("{}, start to get the title number".format(board))
url = "http://huaban.com/boards/%s/" %(board)
data = requests.get(url, timeout=10, verify=False, headers=headers).text.encode('utf-8')
title= re.findall(title_pat, data)[0]
logging.info(title)
return title
def BoardGetPins(board):
""" 获取画板下所有pin """
logging.debug("{}, start to get the pins data".format(board))
#get first pin data
url = "http://huaban.com/boards/%s/?limit=100" % board
data = requests.get(url, timeout=10, verify=False, headers=headers).text.encode('utf-8')
pins = [ _[-1] for _ in re.findall(pin_pat, data) if _[-1] ]
while 1:
#get ajax pin data
url = "http://huaban.com/boards/%s/?max=%s&limit=100&wfl=1" %(board, pins[-1])
try:
data = requests.get(url, timeout=10, verify=False, headers=headers).text.encode('utf-8')
except requests.exceptions.ReadTimeout,e:
logging.exception(e, exc_info=True)
continue
else:
_pins = [ _[-1] for _ in re.findall(pin_pat, data) if _[-1] ]
pins += _pins
print_blue("ajax get {} pins, last pin is {}, merged".format(len(_pins), pins[-1]))
if len(_pins) == 0:
break
return pins
def DownloadPinImg(pin):
""" 下载单个pin图片 """
logging.debug("{}, start to download itself".format(pin))
url = "http://huaban.com/pins/%s/" %pin
try:
r = requests.get(url, timeout=15, verify=False, headers=headers)
data = re.findall(pindata_pat, r.text.encode('utf-8').split('\n')[-9].split('},')[0])[0]
HtmlPin, QianNiuKey, ImgType = data
# 有部分返回头返回的格式不标准,例如有 "jpeg,image/gif" ( -b 30628524 ),无法根据返回头创建文件,因此需要过滤
# by mingcheng 2017-02-27
if len(ImgType.split(",")) > 1:
ImgType = ImgType.split(",")[0]
logging.info((HtmlPin,QianNiuKey, len(QianNiuKey), ImgType))
except Exception,e:
logging.error(e, exc_info=True)
else:
if HtmlPin == pin:
ImgUrl = "http://img.hb.aicdn.com/%s_fw658" %QianNiuKey
try:
headers.update(Referer=url)
req = requests.get(ImgUrl, timeout=10, verify=False, headers=headers)
except Exception,e:
logging.warn(e, exc_info=True)
else:
imageName = "{}.{}".format(pin, ImgType)
with open(imageName, 'wb') as fp:
fp.write(req.content)
print "Successful, pin: {}, save as {}".format(pin, imageName)
return True
else:
print "Failed download, pin: {}".format(pin)
return False
def ExecuteDownloadPins(pins, processes):
""" 并发processes个线程下载所有pins """
pool = ThreadPool(processes=processes)
data = pool.map(DownloadPinImg, pins)
pool.close()
pool.join()
return data
def GetUserBoards(user, limit=10):
""" 查询user的画板, 默认limit=10, 表示最多下载10个画板, 虽然可能会下载不全, 但是此值不宜过大, 每个画板下载会开启一个进程, 过大会使系统崩溃 """
try:
r = requests.get("http://huaban.com/{}/?limit={}".format(user, limit))
except Exception,e:
logging.error(e, exc_info=True)
else:
if r.status_code == 200:
try:
data =json.loads(r.text.split('app.page["user"] = ')[-1].split("app.route();")[0].split("app._csr")[0].strip(";\n"))
except Exception,e:
logging.error(e, exc_info=True)
else:
boards = [ _.get("board_id") for _ in data.get("boards") ]
logging.info("query user boards is {}".format(boards))
return boards
else:
return print_yellow("No such user {}".format(user))
def ExecuteDownloadBoard(board, processes):
""" 执行下载:抓取花瓣网某画板图片 """
logging.debug("{}, start to download the board with processes={}".format(board, processes))
_base_dir = os.path.dirname(os.path.abspath(__file__))
if isinstance(board, int) and isinstance(processes, int):
if Mkdir(board):
os.chdir(str(board))
print_header("Current board <{}> pins number that title is {}".format(board, BoardGetTitleImageNum(board)))
pins = BoardGetPins(board)
print_blue("Current board {} pins number that requests is {}, will ExecuteDownloadPins".format(board, len(pins)))
resp = ExecuteDownloadPins(pins, processes)
print_green("Current board {} download number is {}".format(board, len([ _ for _ in resp if _ == True ])))
else:
print_yellow("mkdir failed for {}".format(board))
else:
print "Params Error"
os.chdir(_base_dir)
def ExecuteDownloadUser(user, processes):
""" 执行下载:抓取花瓣网某用户所有画板 """
logging.debug("{}, start to download the user board with processes={}".format(user, processes))
boards = GetUserBoards(user)
_base_dir = os.path.dirname(os.path.abspath(__file__))
logging.info("query user boards, the number is {}, data is {}".format(len(boards), boards))
if boards:
if Mkdir(user):
os.chdir(user)
worker = []
for board in boards:
p = Process(target=ExecuteDownloadBoard, args=(int(board), int(processes)), name="grab.user.board.{}.huaban".format(board))
#p.daemon=True
worker.append(p)
for p in worker: p.start()
for p in worker: p.join()
else:
return "mkdir failed for user {}".format(user)
else:
return "No boards data"
os.chdir(_base_dir)
def main(users=None, boards=None, processes=6):
""" 引导函数 """
if users:
Mkdir("users")
os.chdir("users")
worker = []
for user in users:
p = Process(target=ExecuteDownloadUser, args=(user, int(processes)), name="grab.user.{}.huaban".format(user))
#p.daemon=True
worker.append(p)
for p in worker: p.start()
for p in worker: p.join()
elif boards:
Mkdir("boards")
os.chdir("boards")
worker = []
for board in boards:
p = Process(target=ExecuteDownloadBoard, args=(int(board), int(processes)), name="grab.board.{}.huaban".format(board))
#p.daemon=True
worker.append(p)
for p in worker: p.start()
for p in worker: p.join()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-b", "--board", help="The board id for Huanban.com")
parser.add_argument("-v", "--version", help="The version for grab_huaban_board project", action='store_true')
parser.add_argument("-p", "--processes", help="Concurrent number", type=int)
parser.add_argument("-u", "--user", help="The user for Huanban.com")
args = parser.parse_args()
user = args.user
board = args.board
version = args.version
processes = args.processes or cpu_count()
if version:
print "From https://github.com/staugur/grab_huaban_board,", __version__
elif user:
users = user.split(",")
main(users=users, processes=processes)
elif board:
boards = board.split(",")
main(boards=boards, processes=processes)
else:
parser.print_help()
|
main.py
|
from service import mi2app_utils
import os
import sys
import threading
import time
import traceback
import logging
import datetime as dt
import signal
from kivy.config import ConfigParser
from service import GpsListener
def receive_signal(signum, stack):
print 'Received:', signum
def alive_worker(secs):
"""
Keep service alive
"""
while True:
time.sleep(secs)
class MyFormatter(logging.Formatter):
converter = dt.datetime.fromtimestamp
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created)
if datefmt:
s = ct.strftime(datefmt)
else:
# t = ct.strftime("%Y-%m-%d %H:%M:%S")
# s = "%s,%03d" % (t, record.msecs)
s = ct.strftime(" ")
return s
def setup_logger(app_name):
'''Setup the analyzer logger.
NOTE: All analyzers share the same logger.
:param level: the loggoing level. The default value is logging.INFO.
'''
level = logging.INFO
config = ConfigParser()
config.read('/sdcard/.mobileinsight.ini')
if config.has_option('mi_general', 'log_level'):
level_config = config.get('mi_general', 'log_level')
if level_config == "info":
level = logging.INFO
elif level_config == "debug":
level = logging.DEBUG
elif level_config == "warning":
level = logging.WARNING
elif level_config == "error":
level = logging.ERROR
elif level_config == "critical":
level = logging.CRITICAL
l = logging.getLogger("mobileinsight_logger")
if len(l.handlers) < 1:
# formatter = MyFormatter(
# '%(asctime)s %(message)s',
# datefmt='%Y-%m-%d,%H:%M:%S.%f')
formatter = MyFormatter('%(message)s')
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(streamHandler)
l.propagate = False
log_file = os.path.join(
mi2app_utils.get_mobileinsight_analysis_path(),
app_name + "_log.txt")
fileHandler = logging.FileHandler(log_file, mode='w')
fileHandler.setFormatter(formatter)
l.addHandler(fileHandler)
l.disabled = False
def on_gps(provider, eventname, *args):
if eventname == 'provider-disabled':
pass
elif eventname == 'location':
location = args[0]
# print 'on_gps()', location.getLatitude(), location.getLongitude()
if __name__ == "__main__":
try:
signal.signal(signal.SIGINT, receive_signal)
arg = os.getenv("PYTHON_SERVICE_ARGUMENT") # get the argument passed
tmp = arg.split(":")
if len(tmp) < 2:
raise AssertionError("Error: incorrect service path:" + arg)
app_name = tmp[0]
app_path = tmp[1]
# print "Service: app_name=",app_name," app_path=",app_path
setup_logger(app_name)
t = threading.Thread(target=alive_worker, args=(30.0,))
t.start()
app_dir = os.path.join(mi2app_utils.get_files_dir(), "app")
# add this dir to module search path
sys.path.append(os.path.join(app_dir, app_path))
app_file = os.path.join(app_dir, app_path, "main.mi2app")
print "Phone model: " + mi2app_utils.get_phone_model()
print "Running app: " + app_file
# print arg,app_dir,os.path.join(app_dir, arg)
namespace = {"service_context": mi2app_utils.get_service_context()}
# Load configurations as global variables
config = ConfigParser()
config.read('/sdcard/.mobileinsight.ini')
ii = arg.rfind('/')
section_name = arg[ii + 1:]
plugin_config = {}
if section_name in config.sections():
config_options = config.options(section_name)
for item in config_options:
plugin_config[item] = config.get(section_name, item)
namespace["plugin_config"] = plugin_config
#
gps_provider = GpsListener(on_gps)
gps_provider.start()
execfile(app_file, namespace)
# print app_name, "stops normally"
except Exception as e:
# print "Exceptions!!!"
# Print traceback logs to analysis
import traceback
l = logging.getLogger("mobileinsight_logger")
l.error(str(traceback.format_exc()))
sys.exit(str(traceback.format_exc()))
|
_app.py
|
"""
"""
"""
_app.py
websocket - WebSocket client library for Python
Copyright 2022 engn33r
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import selectors
import sys
import threading
import time
import traceback
from ._abnf import ABNF
from ._core import WebSocket, getdefaulttimeout
from ._exceptions import *
from . import _logging
__all__ = ["WebSocketApp"]
class Dispatcher:
"""
Dispatcher
"""
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
sel = selectors.DefaultSelector()
sel.register(self.app.sock.sock, selectors.EVENT_READ)
r = sel.select(self.ping_timeout)
if r:
if not read_callback():
break
check_callback()
sel.close()
class SSLDispatcher:
"""
SSLDispatcher
"""
def __init__(self, app, ping_timeout):
self.app = app
self.ping_timeout = ping_timeout
def read(self, sock, read_callback, check_callback):
while self.app.keep_running:
r = self.select()
if r:
if not read_callback():
break
check_callback()
def select(self):
sock = self.app.sock.sock
if sock.pending():
return [sock,]
sel = selectors.DefaultSelector()
sel.register(sock, selectors.EVENT_READ)
r = sel.select(self.ping_timeout)
sel.close()
if len(r) > 0:
return r[0][0]
class WrappedDispatcher:
"""
WrappedDispatcher
"""
def __init__(self, app, ping_timeout, dispatcher):
self.app = app
self.ping_timeout = ping_timeout
self.dispatcher = dispatcher
def read(self, sock, read_callback, check_callback):
self.dispatcher.read(sock, read_callback)
self.ping_timeout and self.dispatcher.timeout(self.ping_timeout, check_callback)
class WebSocketApp:
"""
Higher level of APIs are provided. The interface is like JavaScript WebSocket object.
"""
def __init__(self, url, header=None,
on_open=None, on_message=None, on_error=None,
on_close=None, on_ping=None, on_pong=None,
on_cont_message=None,
keep_running=True, get_mask_key=None, cookie=None,
subprotocols=None,
on_data=None):
"""
WebSocketApp initialization
Parameters
----------
url: str
Websocket url.
header: list or dict
Custom header for websocket handshake.
on_open: function
Callback object which is called at opening websocket.
on_open has one argument.
The 1st argument is this class object.
on_message: function
Callback object which is called when received data.
on_message has 2 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 data received from the server.
on_error: function
Callback object which is called when we get error.
on_error has 2 arguments.
The 1st argument is this class object.
The 2nd argument is exception object.
on_close: function
Callback object which is called when connection is closed.
on_close has 3 arguments.
The 1st argument is this class object.
The 2nd argument is close_status_code.
The 3rd argument is close_msg.
on_cont_message: function
Callback object which is called when a continuation
frame is received.
on_cont_message has 3 arguments.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is continue flag. if 0, the data continue
to next frame data
on_data: function
Callback object which is called when a message received.
This is called before on_message or on_cont_message,
and then on_message or on_cont_message is called.
on_data has 4 argument.
The 1st argument is this class object.
The 2nd argument is utf-8 string which we get from the server.
The 3rd argument is data type. ABNF.OPCODE_TEXT or ABNF.OPCODE_BINARY will be came.
The 4th argument is continue flag. If 0, the data continue
keep_running: bool
This parameter is obsolete and ignored.
get_mask_key: function
A callable function to get new mask keys, see the
WebSocket.set_mask_key's docstring for more information.
cookie: str
Cookie value.
subprotocols: list
List of available sub protocols. Default is None.
"""
self.url = url
self.header = header if header is not None else []
self.cookie = cookie
self.on_open = on_open
self.on_message = on_message
self.on_data = on_data
self.on_error = on_error
self.on_close = on_close
self.on_ping = on_ping
self.on_pong = on_pong
self.on_cont_message = on_cont_message
self.keep_running = False
self.get_mask_key = get_mask_key
self.sock = None
self.last_ping_tm = 0
self.last_pong_tm = 0
self.subprotocols = subprotocols
def send(self, data, opcode=ABNF.OPCODE_TEXT):
"""
send message
Parameters
----------
data: str
Message to send. If you set opcode to OPCODE_TEXT,
data must be utf-8 string or unicode.
opcode: int
Operation code of data. Default is OPCODE_TEXT.
"""
if not self.sock or self.sock.send(data, opcode) == 0:
raise WebSocketConnectionClosedException(
"Connection is already closed.")
def close(self, **kwargs):
"""
Close websocket connection.
"""
self.keep_running = False
if self.sock:
self.sock.close(**kwargs)
self.sock = None
def _send_ping(self, interval, event, payload):
while not event.wait(interval):
self.last_ping_tm = time.time()
if self.sock:
try:
self.sock.ping(payload)
except Exception as ex:
_logging.warning("send_ping routine terminated: {}".format(ex))
break
def run_forever(self, sockopt=None, sslopt=None,
ping_interval=0, ping_timeout=None,
ping_payload="",
http_proxy_host=None, http_proxy_port=None,
http_no_proxy=None, http_proxy_auth=None,
skip_utf8_validation=False,
host=None, origin=None, dispatcher=None,
suppress_origin=False, proxy_type=None):
"""
Run event loop for WebSocket framework.
This loop is an infinite loop and is alive while websocket is available.
Parameters
----------
sockopt: tuple
Values for socket.setsockopt.
sockopt must be tuple
and each element is argument of sock.setsockopt.
sslopt: dict
Optional dict object for ssl socket option.
ping_interval: int or float
Automatically send "ping" command
every specified period (in seconds).
If set to 0, no ping is sent periodically.
ping_timeout: int or float
Timeout (in seconds) if the pong message is not received.
ping_payload: str
Payload message to send with each ping.
http_proxy_host: str
HTTP proxy host name.
http_proxy_port: int or str
HTTP proxy port. If not set, set to 80.
http_no_proxy: list
Whitelisted host names that don't use the proxy.
skip_utf8_validation: bool
skip utf8 validation.
host: str
update host header.
origin: str
update origin header.
dispatcher: Dispatcher object
customize reading data from socket.
suppress_origin: bool
suppress outputting origin header.
Returns
-------
teardown: bool
False if the `WebSocketApp` is closed or caught KeyboardInterrupt,
True if any other exception was raised during a loop.
"""
if ping_timeout is not None and ping_timeout <= 0:
raise WebSocketException("Ensure ping_timeout > 0")
if ping_interval is not None and ping_interval < 0:
raise WebSocketException("Ensure ping_interval >= 0")
if ping_timeout and ping_interval and ping_interval <= ping_timeout:
raise WebSocketException("Ensure ping_interval > ping_timeout")
if not sockopt:
sockopt = []
if not sslopt:
sslopt = {}
if self.sock:
raise WebSocketException("socket is already opened")
thread = None
self.keep_running = True
self.last_ping_tm = 0
self.last_pong_tm = 0
def teardown(close_frame=None):
"""
Tears down the connection.
Parameters
----------
close_frame: ABNF frame
If close_frame is set, the on_close handler is invoked
with the statusCode and reason from the provided frame.
"""
if thread and thread.is_alive():
event.set()
thread.join()
self.keep_running = False
if self.sock:
self.sock.close()
close_status_code, close_reason = self._get_close_args(
close_frame if close_frame else None)
self.sock = None
# Finally call the callback AFTER all teardown is complete
self._callback(self.on_close, close_status_code, close_reason)
try:
self.sock = WebSocket(
self.get_mask_key, sockopt=sockopt, sslopt=sslopt,
fire_cont_frame=self.on_cont_message is not None,
skip_utf8_validation=skip_utf8_validation,
enable_multithread=True)
self.sock.settimeout(getdefaulttimeout())
self.sock.connect(
self.url, header=self.header, cookie=self.cookie,
http_proxy_host=http_proxy_host,
http_proxy_port=http_proxy_port, http_no_proxy=http_no_proxy,
http_proxy_auth=http_proxy_auth, subprotocols=self.subprotocols,
host=host, origin=origin, suppress_origin=suppress_origin,
proxy_type=proxy_type)
dispatcher = self.create_dispatcher(ping_timeout, dispatcher)
self._callback(self.on_open)
if ping_interval:
event = threading.Event()
thread = threading.Thread(
target=self._send_ping, args=(ping_interval, event, ping_payload))
thread.daemon = True
thread.start()
def read():
if not self.keep_running:
return teardown()
op_code, frame = self.sock.recv_data_frame(True)
if op_code == ABNF.OPCODE_CLOSE:
return teardown(frame)
elif op_code == ABNF.OPCODE_PING:
self._callback(self.on_ping, frame.data)
elif op_code == ABNF.OPCODE_PONG:
self.last_pong_tm = time.time()
self._callback(self.on_pong, frame.data)
elif op_code == ABNF.OPCODE_CONT and self.on_cont_message:
self._callback(self.on_data, frame.data,
frame.opcode, frame.fin)
self._callback(self.on_cont_message,
frame.data, frame.fin)
else:
data = frame.data
if op_code == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
self._callback(self.on_data, data, frame.opcode, True)
self._callback(self.on_message, data)
return True
def check():
if (ping_timeout):
has_timeout_expired = time.time() - self.last_ping_tm > ping_timeout
has_pong_not_arrived_after_last_ping = self.last_pong_tm - self.last_ping_tm < 0
has_pong_arrived_too_late = self.last_pong_tm - self.last_ping_tm > ping_timeout
if (self.last_ping_tm and
has_timeout_expired and
(has_pong_not_arrived_after_last_ping or has_pong_arrived_too_late)):
raise WebSocketTimeoutException("ping/pong timed out")
return True
dispatcher.read(self.sock.sock, read, check)
return False
except (Exception, KeyboardInterrupt, SystemExit) as e:
self._callback(self.on_error, e)
if isinstance(e, SystemExit):
# propagate SystemExit further
raise
teardown()
return not isinstance(e, KeyboardInterrupt)
def create_dispatcher(self, ping_timeout, dispatcher=None):
if dispatcher: # If custom dispatcher is set, use WrappedDispatcher
return WrappedDispatcher(self, ping_timeout, dispatcher)
timeout = ping_timeout or 10
if self.sock.is_ssl():
return SSLDispatcher(self, timeout)
return Dispatcher(self, timeout)
def _get_close_args(self, close_frame):
"""
_get_close_args extracts the close code and reason from the close body
if it exists (RFC6455 says WebSocket Connection Close Code is optional)
"""
# Need to catch the case where close_frame is None
# Otherwise the following if statement causes an error
if not self.on_close or not close_frame:
return [None, None]
# Extract close frame status code
if close_frame.data and len(close_frame.data) >= 2:
close_status_code = 256 * close_frame.data[0] + close_frame.data[1]
reason = close_frame.data[2:].decode('utf-8')
return [close_status_code, reason]
else:
# Most likely reached this because len(close_frame_data.data) < 2
return [None, None]
def _callback(self, callback, *args):
if callback:
try:
callback(self, *args)
except Exception as e:
_logging.error("error from callback {}: {}".format(callback, e))
if self.on_error:
self.on_error(self, e)
|
broadcast.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import multiprocessing as mp
from maro.communication import Proxy, SessionType
def worker(group_name):
"""
The main worker logic includes initialize proxy and handle jobs from the master.
Args:
group_name (str): Identifier for the group of all communication components.
"""
proxy = Proxy(group_name=group_name,
component_type="worker",
expected_peers={"master": 1})
counter = 0
print(f"{proxy.component_name}'s counter is {counter}.")
# nonrecurring receive the message from the proxy.
for msg in proxy.receive(is_continuous=False):
print(f"{proxy.component_name} receive message from {msg.source}.")
if msg.tag == "INC":
counter += 1
print(f"{proxy.component_name} receive INC request, {proxy.component_name}'s count is {counter}.")
proxy.reply(received_message=msg, tag="done")
def master(group_name: str, worker_num: int, is_immediate: bool = False):
"""
The main master logic includes initialize proxy and allocate jobs to workers.
Args:
group_name (str): Identifier for the group of all communication components,
worker_num (int): The number of workers,
is_immediate (bool): If True, it will be an async mode; otherwise, it will be an sync mode.
Async Mode: The proxy only returns the session id for sending messages. Based on the local task priority,
you can do something with high priority before receiving replied messages from peers.
Sync Mode: It will block until the proxy returns all the replied messages.
"""
proxy = Proxy(group_name=group_name,
component_type="master",
expected_peers={"worker": worker_num})
if is_immediate:
session_ids = proxy.ibroadcast(tag="INC",
session_type=SessionType.NOTIFICATION)
# do some tasks with higher priority here.
replied_msgs = proxy.receive_by_id(session_ids)
else:
replied_msgs = proxy.broadcast(tag="INC",
session_type=SessionType.NOTIFICATION)
for msg in replied_msgs:
print(f"{proxy.component_name} get receive notification from {msg.source} with message session stage " +
f"{msg.session_stage}.")
if __name__ == "__main__":
"""
This is a single-host multiprocess program used to simulate the communication in the distributed system.
For the completed usage experience of the distributed cluster, please use the MARO CLI.
"""
mp.set_start_method("spawn")
group_name = "proxy_broadcast_INC_example"
worker_number = 5
is_immediate = True
workers = mp.Pool(worker_number)
master_process = mp.Process(target=master, args=(group_name, worker_number, is_immediate,))
master_process.start()
workers.map(worker, [group_name] * worker_number)
workers.close()
master_process.join()
workers.join()
|
executor.py
|
#!/usr/bin/env python3
from gi.repository import GLib
import subprocess
import threading
from nwg_panel.tools import check_key, update_image
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf
class Executor(Gtk.EventBox):
def __init__(self, settings, icons_path):
self.settings = settings
self.icons_path = icons_path
Gtk.EventBox.__init__(self)
self.box = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=0)
self.add(self.box)
self.image = Gtk.Image()
self.label = Gtk.Label("")
self.icon_path = None
check_key(settings, "script", "")
check_key(settings, "interval", 0)
check_key(settings, "root-css-name", "root-executor")
check_key(settings, "css-name", "")
check_key(settings, "icon-placement", "left")
check_key(settings, "icon-size", 16)
check_key(settings, "tooltip-text", "")
check_key(settings, "on-left-click", "")
check_key(settings, "on-right-click", "")
check_key(settings, "on-middle-click", "")
check_key(settings, "on-scroll-up", "")
check_key(settings, "on-scroll-down", "")
update_image(self.image, "view-refresh-symbolic", self.settings["icon-size"], self.icons_path)
self.set_property("name", settings["root-css-name"])
# reverting #57, as check_key only adds keys if MISSING, not if empty
if settings["css-name"]:
self.label.set_property("name", settings["css-name"])
else:
self.label.set_property("name", "executor-label")
if settings["tooltip-text"]:
self.set_tooltip_text(settings["tooltip-text"])
if settings["on-left-click"] or settings["on-right-click"] or settings["on-middle-click"] or settings[
"on-scroll-up"] or settings["on-scroll-down"]:
self.connect('button-press-event', self.on_button_press)
self.add_events(Gdk.EventMask.SCROLL_MASK)
self.connect('scroll-event', self.on_scroll)
self.connect('enter-notify-event', self.on_enter_notify_event)
self.connect('leave-notify-event', self.on_leave_notify_event)
self.build_box()
self.refresh()
if settings["interval"] > 0:
Gdk.threads_add_timeout_seconds(GLib.PRIORITY_LOW, settings["interval"], self.refresh)
def update_widget(self, output):
if output:
if len(output) == 1:
if output[0].endswith(".svg") or output[0].endswith(".png"):
new_path = output[0].strip()
if new_path != self.icon_path:
if "/" not in new_path and "." not in new_path: # name given instead of path
update_image(self.image, new_path, self.settings["icon-size"], self.icons_path)
self.icon_path = new_path
else:
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(
new_path, self.settings["icon-size"], self.settings["icon-size"])
self.image.set_from_pixbuf(pixbuf)
self.icon_path = new_path
except:
print("Failed setting image from {}".format(output[0].strip()))
if not self.image.get_visible():
self.image.show()
if self.label.get_visible():
self.label.hide()
else:
if self.image.get_visible():
self.image.hide()
self.label.set_text(output[0].strip())
if not self.label.get_visible():
self.label.show()
elif len(output) == 2:
new_path = output[0].strip()
if "/" not in new_path and "." not in new_path: # name given instead of path
update_image(self.image, new_path, self.settings["icon-size"], self.icons_path)
self.icon_path = new_path
else:
if new_path != self.icon_path:
try:
pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(
new_path, self.settings["icon-size"], self.settings["icon-size"])
self.image.set_from_pixbuf(pixbuf)
self.icon_path = new_path
except:
print("Failed setting image from {}".format(output[0].strip()))
if not self.image.get_visible():
self.image.show()
self.label.set_text(output[1].strip())
else:
if self.image.get_visible():
self.image.hide()
if self.label.get_visible():
self.label.hide()
return False
def get_output(self):
if "script" in self.settings and self.settings["script"]:
try:
output = subprocess.check_output(self.settings["script"].split()).decode("utf-8").splitlines()
GLib.idle_add(self.update_widget, output)
except Exception as e:
print(e)
def refresh(self):
thread = threading.Thread(target=self.get_output)
thread.daemon = True
thread.start()
return True
def build_box(self):
if self.settings["icon-placement"] == "left":
self.box.pack_start(self.image, False, False, 2)
self.box.pack_start(self.label, False, False, 2)
if self.settings["icon-placement"] != "left":
self.box.pack_start(self.image, False, False, 2)
def on_enter_notify_event(self, widget, event):
self.get_style_context().set_state(Gtk.StateFlags.SELECTED)
def on_leave_notify_event(self, widget, event):
self.get_style_context().set_state(Gtk.StateFlags.NORMAL)
def on_button_press(self, widget, event):
if event.button == 1 and self.settings["on-left-click"]:
self.launch(self.settings["on-left-click"])
elif event.button == 2 and self.settings["on-middle-click"]:
self.launch(self.settings["on-middle-click"])
elif event.button == 3 and self.settings["on-right-click"]:
self.launch(self.settings["on-right-click"])
def on_scroll(self, widget, event):
if event.direction == Gdk.ScrollDirection.UP and self.settings["on-scroll-up"]:
self.launch(self.settings["on-scroll-up"])
elif event.direction == Gdk.ScrollDirection.DOWN and self.settings["on-scroll-up"]:
self.launch(self.settings["on-scroll-up"])
else:
print("No command assigned")
def launch(self, cmd):
print("Executing '{}'".format(cmd))
subprocess.Popen('exec {}'.format(cmd), shell=True)
|
test_sys.py
|
import unittest, test.support
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import platform
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
try:
import threading
except ImportError:
threading = None
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit as exc:
self.assertEqual(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit as exc:
self.assertEqual(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit as exc:
self.assertEqual(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit as exc:
self.assertEqual(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (ascii(stderr), ascii(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'fatal error if run with a trace function')
def test_recursionlimit_recovery(self):
# NOTE: this test is slightly fragile in that it depends on the current
# recursion count when executing the test being low enough so as to
# trigger the recursion recovery detection in the _Py_MakeEndRecCheck
# macro (see ceval.h).
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for i in (50, 1000):
# Issue #5392: stack overflow after hitting recursion limit twice
sys.setrecursionlimit(i)
self.assertRaises(RuntimeError, f)
self.assertRaises(RuntimeError, f)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RuntimeError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import _thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.support.reap_threads
def current_frames_with_threads(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
@unittest.skipUnless(hasattr(sys, 'thread_info'),
'Threading required for this test.')
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
elif sys.platform == 'win32':
expected = 'mbcs'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
# Some sanity checks
with_pymalloc = sysconfig.get_config_var('WITH_PYMALLOC')
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.support.unlink(test.support.TESTFN)
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('3P')) # XXX check layout
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
check(get_cell().__code__, size('5i9Pi3P'))
check(get_cell.__code__, size('5i9Pi3P'))
def get_cell2(x):
def inner():
return x
return inner
check(get_cell2.__code__, size('5i9Pi3P') + 1)
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('n2P' + '2nPn' + 8*'n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('n2P' + '2nPn') + 16*struct.calcsize('n2P'))
# dictionary-keyiterator
check({}.keys(), size('P'))
# dictionary-valueiterator
check({}.values(), size('P'))
# dictionary-itemiterator
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('12P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# memoryview
check(memoryview(b''), size('Pnin 2P2n2i5P 3cPn'))
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3n2P' + PySet_MINSIZE*'nP' + 'nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('nP'))
check(frozenset(sample), s + newsize*struct.calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
s = vsize('P2n15Pl4Pn9Pn11PIP')
check(int, s)
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs + 4P)
s = vsize('P2n15Pl4Pn9Pn11PIP') + struct.calcsize('34P 3P 10P 2P 4P')
# Separate block for PyDictKeysObject with 4 entries
s += struct.calcsize("2nPn") + 4*struct.calcsize("n2P")
# class
class newstyleclass(object): pass
check(newstyleclass, s)
# dict with shared keys
check(newstyleclass().__dict__, size('n2P' + '2nPn'))
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test.support.run_unittest(SysModuleTest, SizeofTest)
if __name__ == "__main__":
test_main()
|
integration_tests.py
|
from __future__ import print_function
import filecmp
from time import sleep
import unittest
import os
import sys
import urllib
import subprocess
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from openport.apps.keyhandling import get_or_create_public_key, create_new_key_pair
from openport.apps.openport_api import PortForwardResponse, request_port
from openport.services.logger_service import set_log_level, get_logger
from openport.services.crypt_service import get_token
from openport.services import osinteraction
from openport.apps.openport_api import request_open_port
import logging
import xmlrunner
from openport.common.share import Share
from openport.common.session import Session
from .test_utils import SimpleHTTPClient, TestHTTPServer, click_open_for_ip_link, check_tcp_port_forward
from .test_utils import start_openportit_session, start_openport_session, wait_for_response
from openport.services.utils import run_method_with_timeout
TOKEN = 'tokentest'
logger = get_logger(__name__)
class IntegrationTest(unittest.TestCase):
def setUp(self):
print(self._testMethodName)
set_log_level(logging.DEBUG)
#self.test_server = 'http://test.openport.be'
self.test_server = 'https://test2.openport.io'
self.osinteraction = osinteraction.getInstance()
def tearDown(self):
if hasattr(self, 'app'):
self.app.stop()
def test_start_share(self):
path = os.path.join(os.path.dirname(__file__), '../resources/logo-base.ico')
self.assertTrue(os.path.exists(path), 'file does not exist %s' % path)
share = self.get_share(path)
self.app = start_openportit_session(self, share)
click_open_for_ip_link(share.open_port_for_ip_link)
temp_file = os.path.join(os.path.dirname(__file__), 'testfiles', 'tmp',
os.path.basename(share.filePath) + get_token(3))
print('temp file: ' + temp_file)
self.downloadAndCheckFile(share, temp_file)
def test_start_session(self):
port_out = self.osinteraction.get_open_port()
out_session = Session()
out_session.local_port = port_out
out_session.server_session_token = None
out_app = None
try:
out_app = start_openport_session(self, out_session)
remote_host, remote_port, link = out_session.server, out_session.server_port, out_session.open_port_for_ip_link
click_open_for_ip_link(link)
print(remote_port)
sleep(10)
#sleep(1000)
check_tcp_port_forward(self, remote_host=remote_host, local_port=port_out, remote_port=remote_port)
finally:
if out_app:
out_app.stop()
def get_share(self, path):
share = Share()
share.filePath = path
share.token = TOKEN
return share
def downloadAndCheckFile(self, share, temp_file):
print("removing file %s" % temp_file)
if os.path.exists(temp_file):
os.remove(temp_file)
self.assertFalse(os.path.exists(temp_file))
print("file %s removed" % temp_file)
url = share.get_link()
print('downloading %s' % url)
try:
urllib.urlretrieve(url, temp_file)
except Exception as e:
print(e)
print("url %s downloaded to %s" % (url, temp_file))
self.assertTrue(os.path.exists(temp_file), 'the downloaded file does not exist')
self.assertTrue(filecmp.cmp(share.filePath, temp_file), 'the file compare did not succeed')
os.remove(temp_file)
# @unittest.skip("openport it not released")
# def test_multi_thread(self):
# path = os.path.join(os.path.dirname(__file__), 'testfiles/WALL_DANGER_SOFTWARE.jpg')
# share = self.get_share(path)
# self.app = start_openportit_session(self, share)
# sleep(3)
#
# temp_file_path = os.path.join(os.path.dirname(__file__), 'testfiles', 'tmp', os.path.basename(share.filePath))
# number_of_threads = 8 # Less than 10 for the brute force protection
# errors = []
#
# def download(file_path):
# try:
# self.downloadAndCheckFile(share, file_path)
# print "download successful: %s" % file_path
# except Exception as e:
# errors.append(e)
#
# threads = []
# for i in range(number_of_threads):
# threads.append(threading.Thread(target=download, args=['%s%s' % (temp_file_path, i)]))
# threads[-1].setDaemon(True)
# threads[-1].start()
#
# seen_multiple_files_at_the_same_time = False
#
# for j in range(6000):
# seen_one_file = False
# for i in range(number_of_threads):
# if os.path.exists('%s%s' % (temp_file_path, i)) and seen_one_file:
# seen_multiple_files_at_the_same_time = True
# break
# elif os.path.exists('%s%s' % (temp_file_path, i)):
# seen_one_file = True
# print 'seen one file from thread %s' % i
# if seen_multiple_files_at_the_same_time:
# break
#
# some_threads_are_still_running = False
# for thread in threads:
# if thread.isAlive():
# some_threads_are_still_running = True
# break
# if not some_threads_are_still_running:
# print "all threads stopped"
# break
# sleep(0.01)
#
# self.assertTrue(seen_multiple_files_at_the_same_time)
#
# if errors:
# self.fail('number of errors: %s First error: %s %s' % (len(errors), errors[0], errors))
def test_same_port(self):
path = os.path.join(os.path.dirname(__file__), '../logo-base.ico')
share = self.get_share(path)
self.app = start_openportit_session(self, share)
port = share.server_port
dict = request_port(
url='%s/api/v1/request-port' % self.test_server,
public_key=get_or_create_public_key(),
restart_session_token=share.server_session_token,
request_server_port=port
)
response = PortForwardResponse(dict)
self.assertEqual(port, response.remote_port)
dict = request_port(
url='%s/api/v1/request-port' % self.test_server,
public_key=get_or_create_public_key(),
restart_session_token='not the same token',
request_server_port=port
)
response = PortForwardResponse(dict)
self.assertNotEqual(port, response.remote_port)
def test_same_port_new_key(self):
logger.debug('getting key pair')
private_key, public_key = create_new_key_pair()
logger.debug('requesting port')
dictionary = request_port(
url='%s/api/v1/request-port' % self.test_server,
public_key=public_key
)
response = PortForwardResponse(dictionary)
self.assertNotEqual(None, response.open_port_for_ip_link)
logger.debug('requesting port')
dictionary2 = request_port(
url='%s/api/v1/request-port' % self.test_server,
public_key=public_key,
restart_session_token=response.session_token,
request_server_port=response.remote_port
)
response2 = PortForwardResponse(dictionary2)
self.assertEqual(response2.remote_port, response.remote_port)
logger.debug('requesting port')
dictionary3 = request_port(
url='%s/api/v1/request-port' % self.test_server,
public_key=public_key,
restart_session_token='not the same token',
request_server_port=response.remote_port
)
response3 = PortForwardResponse(dictionary3)
self.assertNotEqual(response3.remote_port, response.remote_port)
logger.debug('test done')
def test_long_key(self):
private_key_file = os.path.join(os.path.dirname(__file__), 'testfiles', 'tmp', 'id_rsa_tmp')
public_key_file = os.path.join(os.path.dirname(__file__), 'testfiles', 'tmp', 'id_rsa_tmp.pub')
logger.debug('getting key pair')
private_key, public_key = create_new_key_pair(4096)
with open(private_key_file, 'w') as f:
f.write(private_key)
with open(public_key_file, 'w') as f:
f.write(public_key)
port_out = self.osinteraction.get_open_port()
out_session = Session()
out_session.local_port = port_out
out_session.server_session_token = None
out_session.public_key_file = public_key_file
out_session.private_key_file = private_key_file
out_app = None
try:
out_app = start_openport_session(self, out_session)
remote_host, remote_port, link = out_session.server, out_session.server_port, out_session.open_port_for_ip_link
click_open_for_ip_link(link)
print(remote_port)
sleep(10)
#sleep(1000)
check_tcp_port_forward(self, remote_host=remote_host, local_port=port_out, remote_port=remote_port)
finally:
if out_app:
out_app.stop()
def test_new_key__not_clicking_open_for_ip_link(self):
private_key_file = os.path.join(os.path.dirname(__file__), 'testfiles', 'tmp', 'id_rsa_tmp')
public_key_file = os.path.join(os.path.dirname(__file__), 'testfiles', 'tmp', 'id_rsa_tmp.pub')
logger.debug('getting key pair')
private_key, public_key = create_new_key_pair()
with open(private_key_file, 'w') as f:
f.write(private_key)
with open(public_key_file, 'w') as f:
f.write(public_key)
path = os.path.join(os.path.dirname(__file__), '../resources/logo-base.ico')
self.assertTrue(os.path.exists(path), 'file does not exist %s' % path)
share = self.get_share(path)
share.private_key_file = private_key_file
share.public_key_file = public_key_file
self.app = start_openportit_session(self, share)
self.assertTrue(share.open_port_for_ip_link)
temp_file = os.path.join(os.path.dirname(__file__), 'testfiles', 'tmp',
os.path.basename(share.filePath) + get_token(3))
try:
urllib.urlretrieve(share.get_link(), temp_file)
self.fail('the download should have failed.')
except self.failureException as e:
raise e
except Exception as e:
print(e)
click_open_for_ip_link(share.open_port_for_ip_link)
sleep(5)
print ('temp file: ' + temp_file)
self.downloadAndCheckFile(share, temp_file)
def exceptionTest(self):
try:
raise ValueError
except (ValueError, TypeError):
print ("huray!")
def test_http_forward(self):
response = 'cha cha cha'
port = self.osinteraction.get_open_port()
s = self.start_http_server(port, response)
session = Session()
session.local_port = port
session.server_port = 80
session.server_session_token = None
session.http_forward = True
self.app = start_openport_session(self, session)
i=0
while i < 20 and not session.http_forward_address:
i += 1
sleep(1)
# remote_port = session.server_port
# self.assertEqual(80, remote_port)
remote_host = session.http_forward_address
print ('remote host:' + remote_host)
self.assertTrue('.u.' in remote_host, 'expect .u. in remote_host: %s' % remote_host)
c = SimpleHTTPClient()
actual_response = c.get('http://localhost:%s' % port)
self.assertEqual(actual_response, response.strip())
actual_response = c.get('http://%s' % remote_host)
self.assertEqual(actual_response, response.strip())
def test_http_forward__same_address(self):
response = 'cha cha cha'
port = self.osinteraction.get_open_port()
s = self.start_http_server(port, response)
session = Session()
session.local_port = port
session.server_session_token = None
session.http_forward = True
self.app = start_openport_session(self, session)
remote_host = session.http_forward_address
print ('remote host:' + remote_host)
self.assertTrue('.u.' in remote_host, 'expect .u. in remote_host: %s' % remote_host)
c = SimpleHTTPClient()
actual_response = c.get('http://localhost:%s' % port)
self.assertEqual(actual_response, response.strip())
actual_response = c.get('http://%s' % remote_host)
self.assertEqual(actual_response, response.strip())
session2 = Session()
session2.local_port = port
session2.server_session_token = None
session2.http_forward = True
session2.server_port = session.server_port
session2.server_session_token = session.server_session_token
self.app = start_openport_session(self, session2)
# self.assertEqual(session.server_port, session2.server_port)
self.assertEqual(session.http_forward_address, session2.http_forward_address)
def start_http_server(self, port, response):
s = TestHTTPServer(port)
s.reply(response)
s.runThreaded()
return s
def test_brute_force_blocked(self):
port = self.osinteraction.get_open_port()
expected_response = 'cha cha cha'
server1 = self.start_http_server(port, expected_response)
session = Session()
session.local_port = port
session.server_session_token = None
#session.http_forward = True
self.app = start_openport_session(self, session)
click_open_for_ip_link(session.open_port_for_ip_link)
link = session.get_link()
print ('link: %s' % link)
self.assertTrue(session.server_port > 1000)
c = SimpleHTTPClient()
actual_response = c.get('http://localhost:%s' % port)
self.assertEqual(actual_response, expected_response.strip())
i = -1
try:
for i in range(20):
print ("connection %s" % i)
actual_response = c.get('http://%s' % link)
self.assertEqual(actual_response, expected_response.strip())
except (urllib2.HTTPError, urllib2.URLError) as e:
print (e)
self.assertTrue(5 < i < 20, 'i should be around 10 but was %s' % i)
# check download on different port is still ok
port2 = self.osinteraction.get_open_port()
session2 = Session()
session2.local_port = port2
session2.server_session_token = None
server2 = self.start_http_server(port2, expected_response)
openport2 = start_openport_session(self, session2)
sleep(3)
print ('http://%s' % session2.get_link())
click_open_for_ip_link(session2.open_port_for_ip_link)
actual_response = c.get('http://%s' % session2.get_link())
self.assertEqual(actual_response, expected_response.strip())
server1.stop()
server2.stop()
openport2.stop_port_forward()
def test_brute_force_blocked__not_for_http_forward(self):
port = self.osinteraction.get_open_port()
response = 'cha cha cha'
s = self.start_http_server(port, response)
session = Session()
session.local_port = port
session.server_port = 80
session.server_session_token = None
session.http_forward = True
self.app = start_openport_session(self, session)
click_open_for_ip_link(session.open_port_for_ip_link)
link = session.http_forward_address
print ('link: %s' % link)
c = SimpleHTTPClient()
actual_response = c.get('http://localhost:%s' % port)
self.assertEqual(actual_response, response.strip())
i = -1
try:
for i in range(20):
print ("connection %s" % i)
actual_response = c.get('http://%s' % link)
self.assertEqual(actual_response, response.strip())
except (urllib2.HTTPError, urllib2.URLError) as e:
self.fail('url error on connection nr %s' % i)
def test_forward_tunnel(self):
port_out = self.osinteraction.get_open_port()
out_session = Session()
out_session.local_port = port_out
out_session.server_session_token = None
out_app, in_app = None, None
try:
out_app = start_openport_session(self, out_session)
remote_host, remote_port, link = out_session.server, out_session.server_port, out_session.open_port_for_ip_link
click_open_for_ip_link(link)
check_tcp_port_forward(self, remote_host=remote_host, local_port=port_out, remote_port=remote_port)
port_in = self.osinteraction.get_open_port()
logger.info('port_in: %s' % port_in)
in_session = Session()
in_session.forward_tunnel = True
in_session.server_port = out_session.server_port
in_session.local_port = port_in
in_app = start_openport_session(self, in_session)
sleep(10)
check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out, remote_port=port_in)
port_bad_in = self.osinteraction.get_open_port()
bad_session = Session()
bad_session.forward_tunnel = True
bad_session.server_port = out_session.server_port
bad_session.local_port = port_bad_in
keys = create_new_key_pair()
private_key_file = 'testfiles/tmp/tmp_key'
with open(private_key_file, 'w') as f:
f.write(keys[0])
public_key_file = 'testfiles/tmp/tmp_key.pub'
with open(public_key_file, 'w') as f:
f.write(keys[1])
bad_session.public_key_file = public_key_file
bad_session.private_key_file = private_key_file
fail = False
try:
in_app = start_openport_session(self, bad_session)
fail = True
except AssertionError:
pass
self.assertFalse(fail)
self.assertFalse(check_tcp_port_forward(self, remote_host='127.0.0.1', local_port=port_out, remote_port=port_bad_in, fail_on_error=False))
finally:
if out_app:
out_app.stop()
if in_app:
in_app.stop()
def test_rogue_ssh_sessions(self):
port = self.osinteraction.get_open_port()
port2 = self.osinteraction.get_open_port()
self.assertNotEqual(port, port2)
request_open_port(port, server=self.test_server)
command = ['/usr/bin/ssh', 'open@%s' % self.test_server.split('//')[1], '-R',
'%s:localhost:%s' % (port2, port2), 'wrong_session_token']
print (command)
p = subprocess.Popen(command,
bufsize=2048, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False)
failed = wait_for_response(lambda: p.poll() is not None, timeout=10, throw=False)
sleep(3)
output = self.osinteraction.non_block_read(p)
print (output)
self.assertTrue('remote port forwarding failed for listen port' in output[1])
self.assertFalse(failed)
def test_rogue_ssh_session__correct(self):
port = self.osinteraction.get_open_port()
response = request_open_port(port, server=self.test_server)
command = ['/usr/bin/ssh', 'open@%s' % self.test_server.split('//')[1], '-R',
'%s:localhost:%s' % (response.remote_port, port), response.session_token]
print (command)
p = subprocess.Popen(command,
bufsize=2048, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False)
run_method_with_timeout(lambda: wait_for_response(lambda: p.poll() is not None, timeout=10, throw=False), 10, raise_exception=False)
if p.returncode:
print (p.communicate())
self.assertEqual(p.returncode, None)
def test_rogue_ssh_session__correct__old_version(self):
port = self.osinteraction.get_open_port()
response = request_open_port(port, server=self.test_server, client_version='0.9.3')
command = ['/usr/bin/ssh', 'open@%s' % self.test_server.split('//')[1], '-R',
'%s:localhost:%s' % (response.remote_port, port)] # No response.session_token!
print (command)
p = subprocess.Popen(command,
bufsize=2048, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,
shell=False)
run_method_with_timeout(lambda: wait_for_response(lambda: p.poll() is not None, timeout=10, throw=False), 10, raise_exception=False)
if p.returncode is not None:
print (p.communicate())
self.assertEqual(p.returncode, None)
def test_request_restart_while_still_running(self):
port_out = self.osinteraction.get_open_port()
session = Session()
session.local_port = port_out
session.server_session_token = None
openport = start_openport_session(self, session)
print('session started')
sleep(3)
session2 = Session()
session2.local_port = port_out
session2.server_session_token = session.server_session_token
openport2 = start_openport_session(self, session)
if __name__ == '__main__':
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='test-reports'))
|
example18_iot_chime_n.py
|
#!/usr/bin/env python3
# coding: utf-8
# Example 18 IoT チャイム WSGI 版 【チャイム音の排他処理対応】
port = 4 # GPIO ポート番号
ping_f = 554 # チャイム音の周波数1
pong_f = 440 # チャイム音の周波数2
from wsgiref.simple_server import make_server
from RPi import GPIO # GPIOモジュールの取得
from time import sleep # スリープ実行モジュールの取得
from sys import argv # 本プログラムの引数argvを取得
import threading # スレッド用ライブラリの取得
def chime(): # チャイム(スレッド用)
mutex.acquire() # mutex状態に設定(排他処理開始)
pwm.ChangeFrequency(ping_f) # PWM周波数の変更
pwm.start(50) # PWM出力を開始。デューティ50%
sleep(0.5) # 0.5秒の待ち時間処理
pwm.ChangeFrequency(pong_f) # PWM周波数の変更
sleep(0.5) # 0.5秒の待ち時間処理
pwm.stop() # PWM出力停止
mutex.release() # mutex状態の開放(排他処理終了)
def wsgi_app(environ, start_response): # HTTPアクセス受信時の処理
if environ['PATH_INFO'] == '/': # リクエスト先がルートのとき
thread = threading.Thread(target=chime) # 関数chimeをスレッド化
thread.start() # スレッドchimeの起動
ok = 'OK\r\n' # 応答メッセージ作成
ok = ok.encode() # バイト列へ変換
start_response('200 OK', [('Content-type', 'text/plain; charset=utf-8')])
return [ok] # 応答メッセージを返却
print(argv[0]) # プログラム名を表示する
if len(argv) >= 2: # 引数があるとき
port = int(argv[1]) # GPIOポート番号をportへ代入
GPIO.setmode(GPIO.BCM) # ポート番号の指定方法の設定
GPIO.setup(port, GPIO.OUT) # ポート番号portのGPIOを出力に
pwm = GPIO.PWM(port, ping_f) # PWM出力用のインスタンスを生成
mutex = threading.Lock() # 排他処理用のオブジェクト生成
try:
httpd = make_server('', 80, wsgi_app) # TCPポート80でHTTPサーバ実体化
print("HTTP port 80") # ポート確保時にポート番号を表示
except PermissionError: # 例外処理発生時(アクセス拒否)
httpd = make_server('', 8080, wsgi_app) # ポート8080でHTTPサーバ実体化
print("HTTP port 8080") # 起動ポート番号の表示
try:
httpd.serve_forever() # HTTPサーバを起動
except KeyboardInterrupt: # キー割り込み発生時
print('\nKeyboardInterrupt') # キーボード割り込み表示
GPIO.cleanup(port) # GPIOを未使用状態に戻す
exit() # プログラムの終了
|
utils.py
|
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, glob, os, numpy as np, math
from .sparseConvNetTensor import SparseConvNetTensor
from .metadata import Metadata
def toLongTensor(dimension, x):
if hasattr(x, 'type') and x.type() == 'torch.LongTensor':
return x
elif isinstance(x, (list, tuple)):
assert len(x) == dimension
return torch.LongTensor(x)
else:
return torch.LongTensor(dimension).fill_(x)
def optionalTensor(a, b):
return getattr(a, b) if hasattr(a, b) else torch.Tensor()
def optionalTensorReturn(a):
return a if a.numel() else None
def threadDatasetIterator(d):
try:
import queue
except BaseException:
import Queue as queue
import threading
def iterator():
def worker(i):
for k in range(i, len(d), 8):
q.put(d[k])
q = queue.Queue(16)
for i in range(8):
t = threading.Thread(target=worker, args=(i,))
t.start()
for _ in range(len(d)):
item = q.get()
yield item
q.task_done()
q.join()
return iterator
def concatenate_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = torch.cat([i.features for i in input], 1)
return output
def add_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = sum([i.features for i in input])
return output
def append_tensors(tensors):
spatial_size=tensors[0].spatial_size
dimension=len(spatial_size)
x=SparseConvNetTensor(
features=torch.cat([t.features for t in tensors],0),
metadata=Metadata(dimension),
spatial_size=spatial_size)
for t in tensors:
x.metadata.appendMetadata(t.metadata,spatial_size)
return x
class AddCoords(torch.nn.Module):
def forward(self, input):
output = SparseConvNetTensor()
if input.features.numel():
with torch.no_grad():
coords = input.get_spatial_locations()
d = (input.spatial_size.type_as(input.features)-1)/2
coords=coords[:,:-1].type_as(input.features)/ d[None,:] - 1
output.features = torch.cat([input.features,coords],1)
else:
output.features = input.features
output.metadata = input.metadata
output.spatial_size = input.spatial_size
return output
def compare_sparse(x, y):
cL,cR,L,R = x.metadata.compareSparseHelper(y.metadata, x.spatial_size)
if x.features.is_cuda:
cL=cL.cuda()
cR=cR.cuda()
L=L.cuda()
R=R.cuda()
e = 0
if cL.numel():
e += (x.features[cL]-y.features[cR]).pow(2).sum()
if L.numel():
e += x.features[L].pow(2).sum()
if R.numel():
e += y.features[R].pow(2).sum()
return e / (cL.numel() + L.numel() + R.numel())
def spectral_norm_svd(module):
w=module.weight
if w.ndimension()==3:
w=w.view(-1,w.size(2))
_,s,_=torch.svd(w)
return s[0]
def pad_with_batch_idx(x,idx): #add a batch index to the list of coordinates
return torch.cat([x,torch.LongTensor(x.size(0),1).fill_(idx)],1)
def batch_location_tensors(location_tensors):
a=[]
for batch_idx, lt in enumerate(location_tensors):
if lt.numel():
a.append(pad_with_batch_idx(lt,batch_idx))
return torch.cat(a,0)
def prepare_BLInput(l,f):
with torch.no_grad():
n=max([x.size(0) for x in l])
L=torch.empty(len(l),n,l[0].size(1)).fill_(-1)
F=torch.zeros(len(l),n,f[0].size(1))
for i, (ll, ff) in enumerate(zip(l,f)):
L[i,:ll.size(0),:].copy_(ll)
F[i,:ff.size(0),:].copy_(ff)
return [L,F]
def checkpoint_restore(model, exp_name, use_cuda=True):
if use_cuda:
model.cpu()
epoch = -1
f = sorted(glob.glob(exp_name+'/model*epoch*'+'.pth'))
if len(f) > 0:
checpoint=f[-1]
print('Restore from ' + checpoint)
model.load_state_dict(torch.load(checpoint))
epoch = int(checpoint[checpoint.find('epoch')+5: checpoint.find('.pth')])
else:
print('No existing model, starting training from scratch...')
if use_cuda:
model.cuda()
return epoch + 1
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def has_only_one_nonzero_digit(num): #https://oeis.org/A037124
return num != 0 and (num/10**math.floor(math.log(num,10))).is_integer()
def random_rotation(dimension=3,allow_mirror=False):
r=torch.qr(torch.randn(dimension,dimension))[0]
f=torch.randint(2,(3,))
if f.sum()%2==0 and not allow_mirror:
f=1-f
return r*(2*f-1).float()
def squareroot_rotation(a):
import scipy.spatial
b=scipy.spatial.transform.Slerp(
[0,1],
scipy.spatial.transform.Rotation.from_dcm(torch.stack([torch.eye(3),a])))([0.5]).as_dcm()
return torch.from_numpy(b).float()[0]
def voxelize_pointcloud(xyz,rgb,average=True,accumulate=False):
if xyz.numel()==0:
return xyz, rgb
if average or accumulate:
xyz,inv,counts=np.unique(xyz.numpy(),axis=0,return_inverse=True,return_counts=True)
xyz=torch.from_numpy(xyz)
inv=torch.from_numpy(inv)
rgb_out=torch.zeros(xyz.size(0),rgb.size(1),dtype=torch.float32)
rgb_out.index_add_(0,inv,rgb)
if average:
rgb=rgb_out/torch.from_numpy(counts[:,None]).float()
return xyz, rgb
else:
xyz,idxs=np.unique(xyz,axis=0,return_index=True)
xyz=torch.from_numpy(xyz)
rgb=rgb[idxs]
return xyz, rgb
class checkpointFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, run_function, x_features, x_metadata, x_spatial_size):
ctx.run_function = run_function
ctx.save_for_backward(x_features, x_spatial_size)
ctx.x_metadata=x_metadata
with torch.no_grad():
y = run_function(
SparseConvNetTensor
(x_features, x_metadata, x_spatial_size))
return y.features
@staticmethod
def backward(ctx, grad_y_features):
x_features, x_spatial_size = ctx.saved_tensors
x_features = x_features.detach()
x_features.requires_grad = True
with torch.enable_grad():
y = ctx.run_function(
SparseConvNetTensor
(x_features, ctx.x_metadata, x_spatial_size))
torch.autograd.backward(y.features, grad_y_features,retain_graph=False)
return None, x_features.grad, None, None
def checkpoint101(run_function, x, down=1):
f=checkpointFunction.apply(run_function, x.features, x.metadata, x.spatial_size)
s=x.spatial_size//down
return SparseConvNetTensor(f, x.metadata, s)
def matplotlib_cubes(ax, positions):
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15,15))
ax = fig.gca(projection='3d')
...
plt.show()
"""
try:
positions=positions.numpy()
# colors=colors.numpy()
X = np.array([[[0, 1, 0], [0, 0, 0], [1, 0, 0], [1, 1, 0]],
[[0, 0, 0], [0, 0, 1], [1, 0, 1], [1, 0, 0]],
[[1, 0, 1], [1, 0, 0], [1, 1, 0], [1, 1, 1]],
[[0, 0, 1], [0, 0, 0], [0, 1, 0], [0, 1, 1]],
[[0, 1, 0], [0, 1, 1], [1, 1, 1], [1, 1, 0]],
[[0, 1, 1], [0, 0, 1], [1, 0, 1], [1, 1, 1]]]).astype(np.float32)[None]-0.5
X=X+positions[:,None,None,:]
X.resize(X.shape[0]*6,4,3)
m=positions.min(0)
M=positions.max(0)+1
ax.set_xlim([m[0],M[0]])
ax.set_ylim([m[1],M[1]])
ax.set_zlim([m[2],M[2]])
ax.add_collection3d(Poly3DCollection(X))
except:
print('matplotlibcubes fail!?!')
pass
ax.set_axis_off()
def matplotlib_planes(ax, positions,colors):
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
"""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15,15))
ax = fig.gca(projection='3d')
...
plt.show()
"""
try:
positions=positions.numpy()
colors=colors.numpy()
X = np.array([[[0, -0.5, 0.5], [0, -0.5, -0.5], [0, 0.5, -0.5], [0, 0.5, 0.5]]]).astype(np.float32)[None]
X=X+positions[:,None,None,:]
X.resize(X.shape[0]*1,4,3)
m=positions.min(0)
M=positions.max(0)+1
ax.set_xlim([m[0],M[0]])
ax.set_ylim([m[1],M[1]])
ax.set_zlim([m[2],M[2]])
ax.add_collection3d(Poly3DCollection(X,
facecolors=np.repeat(colors,1, axis=0)))
except:
pass
ax.set_axis_off()
def visdom_scatter(vis, xyz, rgb, win='3d', markersize=3):
vis.scatter(
xyz,
opts={'markersize': markersize,'markercolor': rgb},
win=win)
|
manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import os
import sys
import json
import time
import operator
import itertools
import threading
import multiprocessing
from functools import partial
from functools import wraps
from .instance import LfInstance
from .cli import LfCli
from .utils import *
from .fuzzyMatch import FuzzyMatch
from .asyncExecutor import AsyncExecutor
from .devicons import (
webDevIconsGetFileTypeSymbol,
removeDevIcons
)
is_fuzzyEngine_C = False
try:
import fuzzyEngine
is_fuzzyEngine_C = True
cpu_count = multiprocessing.cpu_count()
lfCmd("let g:Lf_fuzzyEngine_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyEngine_C = 0")
is_fuzzyMatch_C = False
try:
import fuzzyMatchC
is_fuzzyMatch_C = True
lfCmd("let g:Lf_fuzzyMatch_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyMatch_C = 0")
if sys.version_info >= (3, 0):
def isAscii(str):
try:
str.encode("ascii")
return True
except UnicodeEncodeError:
return False
else:
def isAscii(str):
try:
str.decode("ascii")
return True
except UnicodeDecodeError:
return False
def modifiableController(func):
@wraps(func)
def deco(self, *args, **kwargs):
self._getInstance().buffer.options['modifiable'] = True
func(self, *args, **kwargs)
self._getInstance().buffer.options['modifiable'] = False
return deco
def catchException(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except vim.error as e: # for neovim
if str(e) != "b'Keyboard interrupt'" and str(e) != 'Keyboard interrupt':
raise e
elif self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
except KeyboardInterrupt: # <C-C>, this does not work in vim
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
return deco
def ignoreEvent(events):
def wrapper(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = events
func(self, *args, **kwargs)
finally:
vim.options['eventignore'] = saved_eventignore
return deco
return wrapper
#*****************************************************
# Manager
#*****************************************************
class Manager(object):
def __init__(self):
self._autochdir = 0
self._instance = None
self._cli = LfCli()
self._explorer = None
self._content = []
self._index = 0
self._help_length = 0
self._show_help = False
self._selections = {}
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
self._highlight_ids = []
self._orig_line = ''
self._ctrlp_pressed = False
self._fuzzy_engine = None
self._result_content = []
self._reader_thread = None
self._timer_id = None
self._highlight_method = lambda : None
self._orig_cwd = None
self._cursorline_dict = {}
self._empty_query = lfEval("get(g:, 'Lf_EmptyQuery', 1)") == '1'
self._preview_winid = 0
self._is_previewed = False
self._match_ids = []
self._vim_file_autoloaded = False
self._arguments = {}
self._getExplClass()
#**************************************************************
# abstract methods, in fact all the functions can be overridden
#**************************************************************
def _getExplClass(self):
"""
this function MUST be overridden
return the name of Explorer class
"""
raise NotImplementedError("Can't instantiate abstract class Manager "
"with abstract methods _getExplClass")
def _defineMaps(self):
pass
def _defineCommonMaps(self):
normal_map = lfEval("get(g:, 'Lf_NormalMap', {})")
if "_" not in normal_map:
return
for [lhs, rhs] in normal_map["_"]:
# If a buffer-local mapping does not exist, map it
maparg = lfEval("maparg('{}', 'n', 0, 1)".format(lhs))
if maparg == {} or maparg.get("buffer", "0") == "0" :
lfCmd("nnoremap <buffer> <silent> {} {}".format(lhs, rhs))
def _cmdExtension(self, cmd):
"""
this function can be overridden to add new cmd
if return true, exit the input loop
"""
pass
@removeDevIcons
def _argaddFiles(self, files):
# It will raise E480 without 'silent!'
lfCmd("silent! argdelete *")
for file in files:
lfCmd("argadd %s" % escSpecial(file))
def _issue_422_set_option(self):
if lfEval("has('nvim')") == '1' and self._is_previewed:
lfCmd("silent! setlocal number<")
lfCmd("silent! setlocal relativenumber<")
lfCmd("silent! setlocal cursorline<")
lfCmd("silent! setlocal colorcolumn<")
lfCmd("silent! setlocal winhighlight<")
def _acceptSelection(self, *args, **kwargs):
pass
def _getDigest(self, line, mode):
"""
this function can be overridden
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the full path
1, return the name only
2, return the directory name
"""
if mode == 0:
return line
elif mode == 1:
return getBasename(line)
else:
return getDirname(line)
def _getDigestStartPos(self, line, mode):
"""
this function can be overridden
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start postion of full path
1, return the start postion of name only
2, return the start postion of directory name
"""
if mode == 0 or mode == 2:
return 0
else:
return lfBytesLen(getDirname(line))
def _createHelp(self):
return []
def _setStlMode(self, **kwargs):
if self._cli.isFuzzy:
if self._getExplorer().supportsNameOnly():
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
else:
mode = 'Fuzzy'
else:
mode = 'Regex'
modes = {"--nameOnly", "--fullPath", "--fuzzy", "--regexMode"}
for opt in kwargs.get("arguments", {}):
if opt in modes:
if opt == "--regexMode":
mode = 'Regex'
elif self._getExplorer().supportsNameOnly():
if opt == "--nameOnly":
mode = 'NameOnly'
elif opt == "--fullPath":
mode = 'FullPath'
else: # "--fuzzy"
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
elif opt in ("--nameOnly", "--fullPath", "--fuzzy"):
mode = 'Fuzzy'
break
self._getInstance().setStlMode(mode)
self._cli.setCurrentMode(mode)
def _beforeEnter(self):
self._resetAutochdir()
self._cur_buffer = vim.current.buffer
self._laststatus = lfEval("&laststatus")
if self._laststatus == '0':
lfCmd("set laststatus=2")
def _afterEnter(self):
if self._vim_file_autoloaded == False:
category = self._getExplorer().getStlCategory()
if category == 'Colorscheme':
category = 'Colors'
lfCmd("silent! call leaderf#%s#a_nonexistent_function()" % category)
self._vim_file_autoloaded = True
if "--nowrap" in self._arguments:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal nowrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:false)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = False
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal wrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:true)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = True
if self._getInstance().getWinPos() != 'popup':
self._defineMaps()
self._defineCommonMaps()
id = int(lfEval("matchadd('Lf_hl_cursorline', '.*\%#.*', 9)"))
self._match_ids.append(id)
else:
lfCmd("""call win_execute({}, 'let matchid = matchadd(''Lf_hl_cursorline'', ''.*\%#.*'', 9)')"""
.format(self._getInstance().getPopupWinId()))
id = int(lfEval("matchid"))
self._match_ids.append(id)
if is_fuzzyEngine_C:
self._fuzzy_engine = fuzzyEngine.createFuzzyEngine(cpu_count, False)
def _beforeExit(self):
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
self.clearSelections()
self._getExplorer().cleanup()
if self._fuzzy_engine:
fuzzyEngine.closeFuzzyEngine(self._fuzzy_engine)
self._fuzzy_engine = None
if self._reader_thread and self._reader_thread.is_alive():
self._stop_reader_thread = True
self._closePreviewPopup()
if self._getInstance().getWinPos() == 'popup':
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._match_ids = []
def _afterExit(self):
if self._laststatus == '0':
lfCmd("set laststatus=%s" % self._laststatus)
def _bangEnter(self):
self._preview_open = False
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
self._cli.hideCursor()
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._resetHighlights()
if self._cli.pattern and self._index == 0:
self._search(self._content)
if len(self._getInstance().buffer) < len(self._result_content):
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
def _bangReadFinished(self):
if self._preview_open == False and self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._previewResult(False)
self._preview_open = True
def _getList(self, pairs):
"""
this function can be overridden
return a list constructed from pairs
Args:
pairs: a list of tuple(weight, line, ...)
"""
return [p[1] for p in pairs]
def _getUnit(self):
"""
indicates how many lines are considered as a unit
"""
return 1
def _supportsRefine(self):
return False
def _previewInPopup(self, *args, **kwargs):
pass
def _closePreviewPopup(self):
if lfEval("has('nvim')") == '1':
if self._preview_winid:
if int(lfEval("nvim_win_is_valid(%d) == v:true" % self._preview_winid)):
lfCmd("noautocmd call nvim_win_close(%d, 1)" % self._preview_winid)
self._preview_winid = 0
else:
if self._preview_winid:
lfCmd("noautocmd call popup_close(%d)" % self._preview_winid)
self._preview_winid = 0
def _previewResult(self, preview):
if self._getInstance().getWinPos() == 'floatwin':
self._cli.buildPopupPrompt()
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
if self._orig_line != self._getInstance().currentLine:
self._closePreviewPopup()
else:
return
if not self._needPreview(preview):
return
line = self._getInstance().currentLine
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
line_nr = self._getInstance().window.cursor[0]
self._previewInPopup(line, self._getInstance().buffer, line_nr)
return
orig_pos = self._getInstance().getOriginalPos()
cur_pos = (vim.current.tabpage, vim.current.window, vim.current.buffer)
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = 'BufLeave,WinEnter,BufEnter'
try:
vim.current.tabpage, vim.current.window = orig_pos[:2]
line_nr = self._getInstance().window.cursor[0]
self._acceptSelection(line, self._getInstance().buffer, line_nr, preview=True)
lfCmd("augroup Lf_Cursorline")
lfCmd("autocmd! BufwinEnter <buffer> setlocal cursorline<")
lfCmd("augroup END")
finally:
if self._getInstance().getWinPos() != 'popup':
vim.current.tabpage, vim.current.window, vim.current.buffer = cur_pos
vim.options['eventignore'] = saved_eventignore
def _restoreOrigCwd(self):
if self._orig_cwd is None:
return
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
try:
if int(lfEval("&autochdir")) == 0 and lfGetCwd() != self._orig_cwd:
chdir(self._orig_cwd)
except:
if lfGetCwd() != self._orig_cwd:
chdir(self._orig_cwd)
def _needExit(self, line, arguments):
return True
def setArguments(self, arguments):
self._arguments = arguments
def getArguments(self):
return self._arguments
#**************************************************************
@ignoreEvent('BufWinEnter,BufEnter')
def _createPopupModePreview(self, title, source, line_nr, jump_cmd):
"""
Args:
source:
if the type is int, it is a buffer number
if the type is str, it is a file name
"""
self._is_previewed = True
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width <= 0:
maxwidth = int(lfEval("&columns"))//2
else:
maxwidth = min(width, int(lfEval("&columns")))
relative = 'editor'
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
lfCmd("let scratch_buffer = nvim_create_buf(0, 1)")
lfCmd("call setbufline(scratch_buffer, 1, content)")
lfCmd("call nvim_buf_set_option(scratch_buffer, 'bufhidden', 'wipe')")
float_window = self._getInstance().window
# row and col start from 0
float_win_row = int(float(lfEval("nvim_win_get_config(%d).row" % float_window.id)))
float_win_col = int(float(lfEval("nvim_win_get_config(%d).col" % float_window.id)))
float_win_height = int(float(lfEval("nvim_win_get_config(%d).height" % float_window.id)))
float_win_width= int(float(lfEval("nvim_win_get_config(%d).width" % float_window.id)))
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
popup_borders = lfEval("g:Lf_PopupBorders")
borderchars = [popup_borders[4], popup_borders[0], popup_borders[5], popup_borders[1],
popup_borders[6], popup_borders[2], popup_borders[7], popup_borders[3]]
if preview_pos.lower() == 'bottom':
anchor = "NW"
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
row = float_win_row + float_window.height + statusline_height
col = float_win_col
height = int(lfEval("&lines")) - row - 2
if height < 1:
return
width = float_window.width
borderchars[0] = ''
borderchars[1] = ''
borderchars[2] = ''
elif preview_pos.lower() == 'top':
anchor = "SW"
row = float_win_row - 1
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
row -= 1
col = float_win_col
height = row
if height < 1:
return
width = float_window.width
borderchars[4] = ''
borderchars[5] = ''
borderchars[6] = ''
elif preview_pos.lower() == 'right':
anchor = "NW"
row = float_win_row - 1
col = float_win_col + float_win_width
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
row -= 1
col += 2
height = self._getInstance().getPopupHeight() + 1
if width <= 0:
width = float_win_width
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
width = min(width, int(lfEval("&columns")) - col - 2)
else:
width = min(width, int(lfEval("&columns")) - col)
elif preview_pos.lower() == 'left':
anchor = "NE"
row = float_win_row - 1
col = float_win_col
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
row -= 1
col -= 2
height = self._getInstance().getPopupHeight() + 1
if width <= 0:
width = float_win_width
width = min(width, col)
else:
anchor = "SW"
start = int(lfEval("line('w0')")) - 1
end = int(lfEval("line('.')")) - 1
col_width = float_window.width - int(lfEval("&numberwidth")) - 1
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
row = float_win_row + delta_height
col = float_win_col + int(lfEval("&numberwidth")) + 1 + float_window.cursor[1]
height = row
width = maxwidth
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col,
"noautocmd": 1
}
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
config["border"] = borderchars
if isinstance(source, int):
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (source, str(config))))
else:
self._preview_winid = int(lfEval("nvim_open_win(scratch_buffer, 0, %s)" % str(config)))
lfCmd("let g:Lf_PreviewWindowID[%d] = %d" % (id(self), self._preview_winid))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldmethod', 'manual')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldcolumn', '0')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'signcolumn', 'no')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call nvim_win_set_option(%d, 'cursorlineopt', 'both')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'winhighlight', 'Normal:Lf_hl_popup_window')" % self._preview_winid)
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
if not isinstance(source, int):
lfCmd("doautocmd filetypedetect BufNewFile %s" % source)
lfCmd("silent! %foldopen!")
lfCmd("norm! zz")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
# lfCmd("redraw!") # maybe we don't need it, it makes the preview slow
else:
popup_window = self._getInstance().window
popup_pos = lfEval("popup_getpos(%d)" % popup_window.id)
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width <= 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"])
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
line = int(popup_pos["line"]) + int(popup_pos["height"]) + statusline_height
pos = "topleft"
maxheight = int(lfEval("&lines")) - line
if maxheight < 1:
return
elif preview_pos.lower() == 'top':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"])
# int(popup_pos["line"]) - 1(exclude the first line) - 1(input window) - 1(title)
maxheight = int(popup_pos["line"]) - 3
if maxheight < 1:
return
pos = "botleft"
line = maxheight + 1
elif preview_pos.lower() == 'right':
col = int(popup_pos["col"]) + int(popup_pos["width"])
line = int(popup_pos["line"]) - 1
maxheight = self._getInstance().getPopupHeight()
pos = "topleft"
if width == 0:
maxwidth = int(popup_pos["width"])
maxwidth = min(maxwidth, int(lfEval("&columns")) - col + 1)
elif preview_pos.lower() == 'left':
col = int(popup_pos["col"]) - 1
line = int(popup_pos["line"]) - 1
maxheight = self._getInstance().getPopupHeight()
pos = "topright"
if width == 0:
maxwidth = int(popup_pos["width"])
maxwidth = min(maxwidth, col)
else: # cursor
lfCmd("""call win_execute(%d, "let numberwidth = &numberwidth")""" % popup_window.id)
col = int(popup_pos["core_col"]) + int(lfEval("numberwidth")) + popup_window.cursor[1]
lfCmd("""call win_execute(%d, "let delta_height = line('.') - line('w0')")""" % popup_window.id)
# the line of buffer starts from 0, while the line of line() starts from 1
start = int(lfEval("line('w0', %d)" % popup_window.id)) - 1
end = int(lfEval("line('.', %d)" % popup_window.id)) - 1
col_width = int(popup_pos["core_width"]) - int(lfEval("numberwidth"))
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
# int(popup_pos["core_line"]) - 1(exclude the first line) - 1(input window)
maxheight = int(popup_pos["core_line"]) + delta_height - 2
pos = "botleft"
line = maxheight + 1
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": pos,
"line": line,
"col": col,
"scrollbar": 0,
"padding": [0, 0, 0, 0],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
}
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
options["borderchars"] = lfEval("g:Lf_PopupBorders")
options["maxwidth"] -= 2
options["minwidth"] -= 2
options["borderhighlight"] = ["Lf_hl_popupBorder"]
if preview_pos.lower() == 'bottom':
del options["title"]
options["border"] = [0, 0, 1, 0]
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
options["border"] = [0, 1, 1, 1]
elif preview_pos.lower() == 'top':
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
options["border"] = [1, 1, 0, 1]
elif preview_pos.lower() == 'right':
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
options["border"] = [1, 1, 1, 1]
options["line"] -= 1
# options["col"] += 1
options["maxheight"] += 1
options["minheight"] += 1
elif preview_pos.lower() == 'left':
if lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
options["border"] = [1, 1, 1, 1]
options["line"] -= 1
# options["col"] -= 1
options["maxheight"] += 1
options["minheight"] += 1
elif preview_pos.lower() == 'cursor' and maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
if isinstance(source, int):
lfCmd("noautocmd silent! let winid = popup_create(%d, %s)" % (source, json.dumps(options)))
else:
lfCmd("silent! let winid = popup_create(content, %s)" % json.dumps(options))
lfCmd("call win_execute(winid, 'doautocmd filetypedetect BufNewFile %s')" % escQuote(source))
self._preview_winid = int(lfEval("winid"))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "call cursor(%d, 1)")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber colorcolumn= ')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal foldmethod=manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call win_execute(%d, 'setlocal cursorlineopt=both')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal wincolor=Lf_hl_popup_window')" % self._preview_winid)
if lfEval("get(g:, 'Lf_PopupShowFoldcolumn', 1)") == '0' or lfEval("get(g:, 'Lf_PopupShowBorder', 0)") == '1':
lfCmd("call win_execute(%d, 'setlocal foldcolumn=0')" % self._preview_winid)
else:
lfCmd("call win_execute(%d, 'setlocal foldcolumn=1')" % self._preview_winid)
lfCmd("call win_execute(%d, 'norm! zz')" % self._preview_winid)
@ignoreEvent('BufRead,BufReadPre,BufReadPost')
def _createPopupPreview(self, title, source, line_nr, jump_cmd=''):
"""
Args:
source:
if the type is int, it is a buffer number
if the type is str, it is a file name
"""
self._is_previewed = True
line_nr = int(line_nr)
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._createPopupModePreview(title, source, line_nr, jump_cmd)
return
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
width = int(lfEval("&columns"))//2
else:
width = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 3"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
relative = 'editor'
anchor = "SW"
row = maxheight
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
lfCmd("let scratch_buffer = nvim_create_buf(0, 1)")
lfCmd("call setbufline(scratch_buffer, 1, content)")
lfCmd("call nvim_buf_set_option(scratch_buffer, 'bufhidden', 'wipe')")
height = min(maxheight, buffer_len)
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'right')")
if preview_pos.lower() == 'center':
col = (int(lfEval("&columns")) - width) // 2
elif preview_pos.lower() == 'left':
col = 0
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns")) - width
else:
relative = 'cursor'
row = 0
col = 0
if maxheight < int(lfEval("&lines"))//2 - 2:
anchor = "NW"
if relative == 'cursor':
row = 1
else:
row = maxheight + 1
height = min(int(lfEval("&lines")) - maxheight - 3, buffer_len)
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
if isinstance(source, int):
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (source, str(config))))
else:
self._preview_winid = int(lfEval("nvim_open_win(scratch_buffer, 0, %s)" % str(config)))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldmethod', 'manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call nvim_win_set_option(%d, 'cursorlineopt', 'both')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
if not isinstance(source, int):
lfCmd("doautocmd filetypedetect BufNewFile %s" % source)
lfCmd("silent! %foldopen!")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'right')")
if preview_pos.lower() == 'center':
col = 0
elif preview_pos.lower() == 'left':
col = 1
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns"))//2 + 2
else:
col = "cursor"
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 4"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": "botleft",
"line": "cursor-1",
"col": col,
"padding": [0, 0, 0, 1],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
}
if maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
if isinstance(source, int):
lfCmd("noautocmd silent! let winid = popup_create(%d, %s)" % (source, json.dumps(options)))
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
lfCmd("silent! let winid = popup_create(content, %s)" % json.dumps(options))
lfCmd("call win_execute(winid, 'doautocmd filetypedetect BufNewFile %s')" % escQuote(source))
self._preview_winid = int(lfEval("winid"))
if self._current_mode == 'NORMAL':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#normalModePreviewFilter', [%d]))"
% (self._preview_winid, id(self)))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "exec 'norm! %dG'")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal foldmethod=manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call win_execute(%d, 'setlocal cursorlineopt=both')" % self._preview_winid)
def _needPreview(self, preview):
"""
Args:
preview:
if True, always preview the result no matter what `g:Lf_PreviewResult` is.
"""
if "--auto-preview" in self._arguments:
return True
preview_dict = {k.lower(): v for k, v in lfEval("g:Lf_PreviewResult").items()}
category = self._getExplorer().getStlCategory()
if not preview and int(preview_dict.get(category.lower(), 0)) == 0:
return False
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
self._orig_line = self._getInstance().currentLine
return False
elif self._getInstance().window.cursor[0] <= self._help_length:
self._orig_line = self._getInstance().currentLine
return False
if self._getInstance().empty() or (self._getInstance().getWinPos() != 'popup' and
vim.current.buffer != self._getInstance().buffer):
return False
if self._ctrlp_pressed == True:
return True
line = self._getInstance().currentLine
if self._orig_line == line and (self._getInstance().buffer.options['modifiable']
or self._getInstance().getWinPos() in ('popup', 'floatwin')):
return False
self._orig_line = self._getInstance().currentLine
return True
def _getInstance(self):
if self._instance is None:
self._instance = LfInstance(self, self._getExplorer().getStlCategory(),
self._cli,
self._beforeEnter,
self._afterEnter,
self._beforeExit,
self._afterExit)
return self._instance
def _createHelpHint(self):
help = []
if not self._show_help:
if lfEval("get(g:, 'Lf_HideHelp', 0)") == '0':
help.append('" Press <F1> for help')
help.append('" ---------------------------------------------------------')
else:
help += self._createHelp()
self._help_length = len(help)
orig_row = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help[::-1])
self._getInstance().buffer.options['modifiable'] = False
buffer_len = len(self._getInstance().buffer)
if buffer_len < self._initial_count:
if "--nowrap" not in self._arguments:
self._getInstance().window.height = min(self._initial_count,
self._getInstance()._actualLength(self._getInstance().buffer))
else:
self._getInstance().window.height = buffer_len
elif self._getInstance().window.height < self._initial_count:
self._getInstance().window.height = self._initial_count
lfCmd("normal! Gzb")
self._getInstance().window.cursor = (orig_row, 0)
else:
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help, 0)
self._getInstance().buffer.options['modifiable'] = False
self._getInstance().window.cursor = (orig_row + self._help_length, 0)
self._getInstance().mimicCursor()
self._getInstance().refreshPopupStatusline()
def _hideHelp(self):
self._getInstance().buffer.options['modifiable'] = True
if self._getInstance().isReverseOrder():
orig_row = self._getInstance().window.cursor[0]
countdown = len(self._getInstance().buffer) - orig_row - self._help_length
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
self._getInstance().buffer[:] = self._getInstance().buffer[-self._initial_count:]
lfCmd("normal! Gzb")
if 0 < countdown < self._initial_count:
self._getInstance().window.cursor = (len(self._getInstance().buffer) - countdown, 0)
else:
self._getInstance().window.cursor = (len(self._getInstance().buffer), 0)
self._getInstance().setLineNumber()
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._help_length = 0
self._getInstance().refreshPopupStatusline()
def _inHelpLines(self):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
return True
elif self._getInstance().window.cursor[0] <= self._help_length:
return True
return False
def _getExplorer(self):
if self._explorer is None:
self._explorer = self._getExplClass()()
return self._explorer
def _resetAutochdir(self):
if int(lfEval("&autochdir")) == 1:
self._autochdir = 1
lfCmd("set noautochdir")
else:
self._autochdir = 0
def _setAutochdir(self):
if self._autochdir == 1:
# When autochdir is set, Vim will change the current working directory
# to the directory containing the file which was opened or selected.
lfCmd("set autochdir")
def _toUpInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! k")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! k')" % (self._preview_winid))
def _toDownInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! j")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! j')" % (self._preview_winid))
def _toUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! k')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
adjust = False
if self._getInstance().isReverseOrder() and self._getInstance().getCurrentPos()[0] == 1:
adjust = True
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd("norm! k")
if adjust:
lfCmd("norm! zt")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _toDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! j')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder() \
and self._getInstance().getCurrentPos()[0] == self._getInstance().window.height:
self._setResultContent()
lfCmd("norm! j")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _pageUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageUp>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if self._getInstance().isReverseOrder():
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd('exec "norm! \<PageUp>"')
self._getInstance().setLineNumber()
def _pageDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageDown>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder():
self._setResultContent()
lfCmd('exec "norm! \<PageDown>"')
self._getInstance().setLineNumber()
def _leftClick(self):
if self._getInstance().getWinPos() == 'popup':
if int(lfEval("has('patch-8.1.2266')")) == 1:
if self._getInstance().getPopupWinId() == int(lfEval("v:mouse_winid")):
lfCmd("""call win_execute(%d, "exec v:mouse_lnum")"""
% (self._getInstance().getPopupWinId()))
lfCmd("""call win_execute(%d, "exec 'norm!'.v:mouse_col.'|'")"""
% (self._getInstance().getPopupWinId()))
exit_loop = False
elif self._getInstance().window.number == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._getInstance().setLineNumber()
self.clearSelections()
exit_loop = False
elif self._preview_winid == int(lfEval("v:mouse_winid")):
if lfEval("has('nvim')") == '1':
lfCmd("call win_gotoid(%d)" % self._preview_winid)
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._current_mode = 'NORMAL'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
exit_loop = True
else:
self.quit()
exit_loop = True
return exit_loop
def _search(self, content, is_continue=False, step=0):
if not is_continue:
self.clearSelections()
self._clearHighlights()
self._clearHighlightsPos()
self._cli.highlightMatches()
if not self._cli.pattern: # e.g., when <BS> or <Del> is typed
if self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
else:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
self._previewResult(False)
return
if self._cli.isFuzzy:
self._fuzzySearch(content, is_continue, step)
else:
self._regexSearch(content, is_continue, step)
self._previewResult(False)
def _filter(self, step, filter_method, content, is_continue,
use_fuzzy_engine=False, return_index=False):
""" Construct a list from result of filter_method(content).
Args:
step: An integer to indicate the number of lines to filter one time.
filter_method: A function to apply `content` as parameter and
return an iterable.
content: The list to be filtered.
"""
unit = self._getUnit()
step = step // unit * unit
length = len(content)
if self._index == 0:
self._cb_content = []
self._result_content = []
self._index = min(step, length)
cur_content = content[:self._index]
else:
if not is_continue and self._result_content:
if self._cb_content:
self._cb_content += self._result_content
else:
self._cb_content = self._result_content
if len(self._cb_content) >= step:
cur_content = self._cb_content[:step]
self._cb_content = self._cb_content[step:]
else:
cur_content = self._cb_content
left = step - len(self._cb_content)
self._cb_content = []
if self._index < length:
end = min(self._index + left, length)
cur_content += content[self._index:end]
self._index = end
if self._cli.isAndMode:
result, highlight_methods = filter_method(cur_content)
if is_continue:
self._previous_result = (self._previous_result[0] + result[0],
self._previous_result[1] + result[1])
result = self._previous_result
else:
self._previous_result = result
return (result, highlight_methods)
elif use_fuzzy_engine:
if return_index:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
result = (result[0], [cur_content[i] for i in result[1]])
else:
result = filter_method(source=cur_content)
if is_continue:
result = fuzzyEngine.merge(self._previous_result, result)
self._previous_result = result
else:
result = list(filter_method(cur_content))
if is_continue:
self._previous_result += result
result = self._previous_result
else:
self._previous_result = result
return result
def _fuzzyFilter(self, is_full_path, get_weight, iterable):
"""
return a list, each item is a pair (weight, line)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
pairs = ((get_weight(getDigest(line)), line) for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return (p for p in pairs if p[0] > MIN_WEIGHT)
def _fuzzyFilterEx(self, is_full_path, get_weight, iterable):
"""
return a tuple, (weights, indices)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
iterable = itertools.islice(iterable, 0, None, self._getUnit())
pairs = ((get_weight(getDigest(line)), i) for i, line in enumerate(iterable))
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
result = [p for p in pairs if p[0] > MIN_WEIGHT]
if len(result) == 0:
weights, indices = [], []
else:
weights, indices = zip(*result)
return (list(weights), list(indices))
def _refineFilter(self, first_get_weight, get_weight, iterable):
getDigest = self._getDigest
triples = ((first_get_weight(getDigest(line, 1)),
get_weight(getDigest(line, 2)), line)
for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return ((i[0] + i[1], i[2]) for i in triples if i[0] > MIN_WEIGHT and i[1] > MIN_WEIGHT)
def _andModeFilter(self, iterable):
encoding = lfEval("&encoding")
cur_content = iterable
weight_lists = []
highlight_methods = []
for p in self._cli.pattern:
use_fuzzy_engine = False
if self._fuzzy_engine and isAscii(p) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=False, is_and_mode=True)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=False, is_and_mode=True)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=False, is_and_mode=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True, clear=False)
elif is_fuzzyMatch_C and isAscii(p):
pattern = fuzzyMatchC.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, clear=False)
else:
fuzzy_match = FuzzyMatch(p, encoding)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights,
clear=False)
if use_fuzzy_engine:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
else:
result = filter_method(cur_content)
for i, wl in enumerate(weight_lists):
weight_lists[i] = [wl[j] for j in result[1]]
weight_lists.append(result[0])
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
unit = self._getUnit()
result_content = [cur_content[i*unit:i*unit + unit] for i in result[1]]
cur_content = list(itertools.chain.from_iterable(result_content))
else:
cur_content = [cur_content[i] for i in result[1]]
result_content = cur_content
highlight_methods.append(highlight_method)
weights = [sum(i) for i in zip(*weight_lists)]
return ((weights, result_content), highlight_methods)
def _fuzzySearch(self, content, is_continue, step):
encoding = lfEval("&encoding")
use_fuzzy_engine = False
use_fuzzy_match_c = False
do_sort = "--no-sort" not in self._arguments
if self._cli.isAndMode:
filter_method = self._andModeFilter
elif self._cli.isRefinement:
if self._cli.pattern[1] == '': # e.g. abc;
if self._fuzzy_engine and isAscii(self._cli.pattern[0]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[0])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[0], encoding)
if "--no-sort" in self._arguments:
getWeight = fuzzy_match.getWeightNoSort
else:
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
elif self._cli.pattern[0] == '': # e.g. ;abc
if self._fuzzy_engine and isAscii(self._cli.pattern[1]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[1])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[1], encoding)
if "--no-sort" in self._arguments:
getWeight = fuzzy_match.getWeightNoSort
else:
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else: # e.g. abc;def
if is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
is_ascii_0 = True
pattern_0 = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight_0 = partial(fuzzyMatchC.getWeight, pattern=pattern_0, is_name_only=True)
getHighlights_0 = partial(fuzzyMatchC.getHighlights, pattern=pattern_0, is_name_only=True)
else:
is_ascii_0 = False
fuzzy_match_0 = FuzzyMatch(self._cli.pattern[0], encoding)
if "--no-sort" in self._arguments:
getWeight_0 = fuzzy_match_0.getWeightNoSort
else:
getWeight_0 = fuzzy_match_0.getWeight
getHighlights_0 = fuzzy_match_0.getHighlights
if is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
is_ascii_1 = True
pattern_1 = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight_1 = partial(fuzzyMatchC.getWeight, pattern=pattern_1, is_name_only=False)
getHighlights_1 = partial(fuzzyMatchC.getHighlights, pattern=pattern_1, is_name_only=False)
else:
is_ascii_1 = False
fuzzy_match_1 = FuzzyMatch(self._cli.pattern[1], encoding)
if "--no-sort" in self._arguments:
getWeight_1 = fuzzy_match_1.getWeightNoSort
else:
getWeight_1 = fuzzy_match_1.getWeight
getHighlights_1 = fuzzy_match_1.getHighlights
use_fuzzy_match_c = is_ascii_0 and is_ascii_1
filter_method = partial(self._refineFilter, getWeight_0, getWeight_1)
highlight_method = partial(self._highlightRefine, getHighlights_0, getHighlights_1)
else:
if self._fuzzy_engine and isAscii(self._cli.pattern) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File":
return_index = False
if self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=do_sort)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_File,
param=fuzzyEngine.createParameter(1),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Rg":
return_index = False
if "--match-path" in self._arguments:
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=do_sort)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Rg,
param=fuzzyEngine.createRgParameter(self._getExplorer().displayMulti(),
self._getExplorer().getContextSeparator(), self._has_column),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Tag":
return_index = False
mode = 0 if self._cli.isFullPath else 1
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Tag,
param=fuzzyEngine.createParameter(mode), is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Gtags":
return_index = False
result_format = 1
if self._getExplorer().getResultFormat() in [None, "ctags-mod"]:
result_format = 0
elif self._getExplorer().getResultFormat() == "ctags-x":
result_format = 2
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Gtags,
param=fuzzyEngine.createGtagsParameter(0, result_format, self._match_path),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Line":
return_index = False
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Line,
param=fuzzyEngine.createParameter(1), is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=do_sort)
else:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern, encoding)
if "--no-sort" in self._arguments:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeightNoSort)
elif self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights)
if self._cli.isAndMode:
if self._fuzzy_engine and isAscii(''.join(self._cli.pattern)):
step = 20000 * cpu_count
else:
step = 10000
pair, highlight_methods = self._filter(step, filter_method, content, is_continue)
if do_sort:
pairs = sorted(zip(*pair), key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
else:
self._result_content = pair[1]
elif use_fuzzy_engine:
if step == 0:
if return_index == True:
step = 30000 * cpu_count
else:
step = 60000 * cpu_count
_, self._result_content = self._filter(step, filter_method, content, is_continue, True, return_index)
else:
if step == 0:
if use_fuzzy_match_c:
step = 60000
elif self._getExplorer().supportsNameOnly() and self._cli.isFullPath:
step = 6000
else:
step = 12000
pairs = self._filter(step, filter_method, content, is_continue)
if "--no-sort" not in self._arguments:
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
if self._cli.isAndMode:
self._highlight_method = partial(self._highlight_and_mode, highlight_methods)
self._highlight_method()
else:
self._highlight_method = highlight_method
self._highlight_method()
if len(self._cli.pattern) > 1 and not is_continue:
lfCmd("redraw")
def _guessFilter(self, filename, suffix, dirname, icon, iterable):
"""
return a list, each item is a pair (weight, line)
"""
icon_len = len(icon)
return ((FuzzyMatch.getPathWeight(filename, suffix, dirname, line[icon_len:]), line) for line in iterable)
def _guessSearch(self, content, is_continue=False, step=0):
if self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
return
buffer_name = os.path.normpath(lfDecode(self._cur_buffer.name))
if lfEval("g:Lf_ShowRelativePath") == '1':
try:
buffer_name = os.path.relpath(buffer_name)
except ValueError:
pass
buffer_name = lfEncode(buffer_name)
dirname, basename = os.path.split(buffer_name)
filename, suffix = os.path.splitext(basename)
if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == "1":
icon = webDevIconsGetFileTypeSymbol(basename)
else:
icon = ''
if self._fuzzy_engine:
filter_method = partial(fuzzyEngine.guessMatch, engine=self._fuzzy_engine, filename=filename,
suffix=suffix, dirname=dirname, icon=icon, sort_results=True)
step = len(content)
_, self._result_content = self._filter(step, filter_method, content, is_continue, True)
else:
step = len(content)
filter_method = partial(self._guessFilter, filename, suffix, dirname, icon)
pairs = self._filter(step, filter_method, content, is_continue)
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def _highlight_and_mode(self, highlight_methods):
self._clearHighlights()
for i, highlight_method in enumerate(highlight_methods):
highlight_method(hl_group='Lf_hl_match' + str(i % 5))
def _clearHighlights(self):
if self._getInstance().getWinPos() == 'popup':
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._highlight_ids = []
def _clearHighlightsPos(self):
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
def _resetHighlights(self):
self._clearHighlights()
unit = self._getUnit()
bottom = len(self._getInstance().buffer) - self._help_length
if self._cli.isAndMode:
highlight_pos_list = self._highlight_pos_list
else:
highlight_pos_list = [self._highlight_pos]
for n, highlight_pos in enumerate(highlight_pos_list):
hl_group = 'Lf_hl_match' + str(n % 5)
for i, pos in enumerate(highlight_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")"""
% (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
for i, pos in enumerate(self._highlight_refine_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _highlight(self, is_full_path, get_highlights, use_fuzzy_engine=False, clear=True, hl_group='Lf_hl_match'):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
if clear:
self._clearHighlights()
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
if use_fuzzy_engine:
self._highlight_pos = get_highlights(source=[getDigest(line)
for line in content[:highlight_number:unit]])
else:
# e.g., self._highlight_pos = [ [ [2,3], [6,2] ], [ [1,4], [7,6], ... ], ... ]
# where [2, 3] indicates the highlight starts at the 2nd column with the
# length of 3 in bytes
self._highlight_pos = [get_highlights(getDigest(line))
for line in content[:highlight_number:unit]]
if self._cli.isAndMode:
self._highlight_pos_list.append(self._highlight_pos)
bottom = len(content)
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 0 if is_full_path else 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")"""
% (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
def _highlightRefine(self, first_get_highlights, get_highlights):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
self._clearHighlights()
getDigest = self._getDigest
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
bottom = len(content)
self._highlight_pos = [first_get_highlights(getDigest(line, 1))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_match', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_match', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
self._highlight_refine_pos = [get_highlights(getDigest(line, 2))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_refine_pos):
start_pos = self._getDigestStartPos(content[unit*i], 2)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _regexFilter(self, iterable):
def noErrMatch(text, pattern):
try:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text, pattern))
except TypeError: # python 2
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except ValueError: # python 3
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
try:
if ('-2' == lfEval("g:LfNoErrMsgMatch('', '%s')" % escQuote(self._cli.pattern))):
return iter([])
else:
return (line for line in iterable
if noErrMatch(escQuote(self._getDigest(line, 0)), escQuote(self._cli.pattern)))
except vim.error:
return iter([])
def _regexSearch(self, content, is_continue, step):
if not is_continue and not self._cli.isPrefix:
self._index = 0
self._result_content = self._filter(8000, self._regexFilter, content, is_continue)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def clearSelections(self):
for i in self._selections.values():
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % i)
self._selections.clear()
def _cleanup(self):
if not ("--recall" in self._arguments or lfEval("g:Lf_RememberLastSearch") == '1'):
self._pattern_bak = self._cli.pattern
self._cli.clear()
self._clearHighlights()
self._clearHighlightsPos()
self._help_length = 0
self._show_help = False
@modifiableController
def toggleHelp(self):
self._show_help = not self._show_help
if self._getInstance().isReverseOrder():
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._createHelpHint()
self.clearSelections()
self._resetHighlights()
def _accept(self, file, mode, *args, **kwargs):
if file:
if self._getExplorer().getStlCategory() != "Jumps":
lfCmd("norm! m'")
if self._getExplorer().getStlCategory() != "Help":
if mode == '':
pass
elif mode == 'h':
lfCmd("split")
elif mode == 'v':
lfCmd("bel vsplit")
kwargs["mode"] = mode
tabpage_count = len(vim.tabpages)
self._acceptSelection(file, *args, **kwargs)
for k, v in self._cursorline_dict.items():
if k.valid:
k.options["cursorline"] = v
self._cursorline_dict.clear()
self._issue_422_set_option()
if mode == 't' and len(vim.tabpages) > tabpage_count:
tab_pos = int(lfEval("g:Lf_TabpagePosition"))
if tab_pos == 0:
lfCmd("tabm 0")
elif tab_pos == 1:
lfCmd("tabm -1")
elif tab_pos == 3:
lfCmd("tabm")
def accept(self, mode=''):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if self._getInstance().window.cursor[0] <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if self._getExplorer().getStlCategory() == "Rg":
if self._getInstance().currentLine == self._getExplorer().getContextSeparator():
return
if "--heading" in self._arguments and not re.match(r'^\d+[:-]', self._getInstance().currentLine):
return
self._cli.writeHistory(self._getExplorer().getStlCategory())
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
cwd = lfGetCwd()
if len(self._selections) > 0:
files = []
for i in sorted(self._selections.keys()):
files.append(self._getInstance().buffer[i-1])
if "--stayOpen" in self._arguments:
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd()")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = lfGetCwd()
if mode == '' and self._getExplorer().getStlCategory() == "File":
self._accept(files[0], mode)
self._argaddFiles(files)
self._accept(files[0], mode)
lfCmd("doautocmd BufwinEnter")
else:
for file in files:
self._accept(file, mode)
if lfGetCwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
need_exit = True
else:
file = self._getInstance().currentLine
line_nr = self._getInstance().window.cursor[0]
need_exit = self._needExit(file, self._arguments)
if need_exit:
if "--stayOpen" in self._arguments:
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd()")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = lfGetCwd()
self._accept(file, mode, self._getInstance().buffer, line_nr) # for bufTag
if lfGetCwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
if need_exit:
self._setAutochdir()
if dir_changed_by_autocmd == False:
self._restoreOrigCwd()
return None
else:
self._beforeExit()
self._content = vim.current.buffer[:]
return False
def _jumpNext(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] > len(instance.buffer) - self._help_length:
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
elif instance.window.cursor[0] == 1: # at the first line
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow > len(instance.buffer) - instance.helpLength:
instance.cursorRow = len(instance.buffer) - instance.helpLength
line_nr = instance.cursorRow
elif instance.cursorRow == 1: # at the last line
line_nr = instance.cursorRow
instance.cursorRow = len(instance.buffer) - instance.helpLength
else:
line_nr = instance.cursorRow
instance.cursorRow -= 1
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE"
% (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1,
len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length:
instance.window.cursor = (self._help_length + 1, 0)
elif instance.window.cursor[0] == len(instance.buffer): # at the last line
instance.window.cursor = (self._help_length + 1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow <= instance.helpLength:
instance.cursorRow = instance.helpLength + 1
line_nr = instance.cursorRow
elif instance.cursorRow == len(instance.buffer): # at the last line
line_nr = instance.cursorRow
instance.cursorRow = instance.helpLength + 1
else:
line_nr = instance.cursorRow
instance.cursorRow += 1
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
return True
def _jumpPrevious(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] >= len(instance.buffer) - self._help_length:
instance.window.cursor = (1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow >= len(instance.buffer) - instance.helpLength:
instance.cursorRow = 1
line_nr = instance.cursorRow
else:
line_nr = instance.cursorRow
instance.cursorRow += 1
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE"
% (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1,
len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length + 1:
instance.window.cursor = (len(instance.buffer), 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow <= instance.helpLength + 1:
instance.cursorRow = len(instance.buffer)
line_nr = instance.cursorRow
else:
line_nr = instance.cursorRow
instance.cursorRow -= 1
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
def quit(self):
self._getInstance().exitBuffer()
self._setAutochdir()
self._restoreOrigCwd()
def refresh(self, normal_mode=True):
self._getExplorer().cleanup()
content = self._getExplorer().getFreshContent()
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
if normal_mode: # when called in Normal mode
self._getInstance().buffer.options['modifiable'] = True
self._clearHighlights()
self._clearHighlightsPos()
self.clearSelections()
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._iteration_end = True
if self._cli.pattern:
self._index = 0
self._search(self._content)
if normal_mode: # when called in Normal mode
self._createHelpHint()
self._resetHighlights()
self._getInstance().buffer.options['modifiable'] = False
def addSelections(self):
nr = self._getInstance().window.number
if self._getInstance().getWinPos() != 'popup':
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
line_nr = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
if line_nr > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if line_nr <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if line_nr in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (self._selections[line_nr], self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % self._selections[line_nr])
del self._selections[line_nr]
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), line_nr))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % line_nr))
self._selections[line_nr] = id
def selectMulti(self):
orig_line = self._getInstance().window.cursor[0]
nr = self._getInstance().window.number
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
cur_line = int(lfEval("v:mouse_lnum"))
self.clearSelections()
for i in range(min(orig_line, cur_line), max(orig_line, cur_line)+1):
if i > self._help_length and i not in self._selections:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i)))
self._selections[i] = id
def selectAll(self):
line_num = len(self._getInstance().buffer)
if line_num > 300:
lfCmd("echohl Error | redraw | echo ' Too many files selected!' | echohl NONE")
lfCmd("sleep 1")
return
for i in range(line_num):
if i >= self._help_length and i+1 not in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), i+1))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i+1)))
self._selections[i+1] = id
def _gotoFirstLine(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("normal! gg")
def _readFinished(self):
pass
def startExplorer(self, win_pos, *args, **kwargs):
arguments_dict = kwargs.get("arguments", {})
if "--recall" in arguments_dict:
self._arguments.update(arguments_dict)
elif "--previous" in arguments_dict:
self._arguments["--previous"] = arguments_dict["--previous"]
elif "--next" in arguments_dict:
self._arguments["--next"] = arguments_dict["--next"]
else:
self.setArguments(arguments_dict)
self._cli.setArguments(arguments_dict)
self._cli.setNameOnlyFeature(self._getExplorer().supportsNameOnly())
self._cli.setRefineFeature(self._supportsRefine())
self._orig_line = ''
if self._getExplorer().getStlCategory() in ["Gtags"]:
if "--update" in self._arguments or "--remove" in self._arguments:
self._getExplorer().getContent(*args, **kwargs)
return
if "--next" in arguments_dict:
if self._jumpNext() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
elif "--previous" in arguments_dict:
if self._jumpPrevious() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
self._cleanup()
# lfCmd("echohl WarningMsg | redraw | echo ' searching ...' | echohl NONE")
self._getInstance().setArguments(self._arguments)
empty_query = self._empty_query and self._getExplorer().getStlCategory() in ["File"]
remember_last_status = "--recall" in self._arguments \
or lfEval("g:Lf_RememberLastSearch") == '1' and self._cli.pattern
if remember_last_status:
content = self._content
self._getInstance().useLastReverseOrder()
win_pos = self._getInstance().getWinPos()
else:
content = self._getExplorer().getContent(*args, **kwargs)
self._getInstance().setCwd(lfGetCwd())
if self._getExplorer().getStlCategory() in ["Gtags"] and "--auto-jump" in self._arguments \
and isinstance(content, list) and len(content) == 1:
mode = self._arguments["--auto-jump"][0] if len(self._arguments["--auto-jump"]) else ""
self._accept(content[0], mode)
return
self._index = 0
pattern = kwargs.get("pattern", "") or arguments_dict.get("--input", [""])[0]
if len(pattern) > 1 and (pattern[0] == '"' and pattern[-1] == '"'
or pattern[0] == "'" and pattern[-1] == "'"):
pattern = pattern[1:-1]
self._cli.setPattern(pattern)
self._result_content = []
self._cb_content = []
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
# clear the buffer only when the content is not a list
self._getInstance().enterBuffer(win_pos, not isinstance(content, list))
self._initial_count = self._getInstance().getInitialWinHeight()
self._getInstance().setStlCategory(self._getExplorer().getStlCategory())
self._setStlMode(**kwargs)
self._getInstance().setStlCwd(self._getExplorer().getStlCurDir())
if kwargs.get('bang', 0):
self._current_mode = 'NORMAL'
else:
self._current_mode = 'INPUT'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if not remember_last_status:
self._gotoFirstLine()
self._start_time = time.time()
self._bang_start_time = self._start_time
self._bang_count = 0
self._getInstance().buffer.vars['Lf_category'] = self._getExplorer().getStlCategory()
self._read_content_exception = None
if isinstance(content, list):
self._is_content_list = True
self._read_finished = 2
if not remember_last_status:
if len(content[0]) == len(content[0].rstrip("\r\n")):
self._content = content
else:
self._content = [line.rstrip("\r\n") for line in content]
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlResultsCount(len(self._content))
if not empty_query:
self._getInstance().setBuffer(self._content[:self._initial_count])
if lfEval("has('nvim')") == '1':
lfCmd("redrawstatus")
self._callback = self._workInIdle
if not kwargs.get('bang', 0):
self._readFinished()
self.input()
else:
if not remember_last_status and not empty_query:
self._getInstance().appendBuffer(self._content[self._initial_count:])
elif remember_last_status and len(self._getInstance().buffer) < len(self._result_content):
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
lfCmd("echo")
if self._cli.pattern:
self._cli._buildPrompt()
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
if not remember_last_status and not self._cli.pattern and empty_query:
self._gotoFirstLine()
self._guessSearch(self._content)
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
elif isinstance(content, AsyncExecutor.Result):
self._is_content_list = False
self._callback = self._workInIdle
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
if self._getExplorer().getStlCategory() in ["Rg", "Gtags"]:
if "--append" in self.getArguments():
self._offset_in_content = len(self._content)
if self._pattern_bak:
self._getInstance().setBuffer(self._content, need_copy=False)
self._createHelpHint()
else:
self._getInstance().clearBuffer()
self._content = []
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
self._stop_reader_thread = False
self._reader_thread = threading.Thread(target=self._readContent, args=(content,))
self._reader_thread.daemon = True
self._reader_thread.start()
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
else:
self._is_content_list = False
self._callback = partial(self._workInIdle, content)
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
def _readContent(self, content):
try:
for line in content:
self._content.append(line)
if self._stop_reader_thread:
break
else:
self._read_finished = 1
except Exception:
self._read_finished = 1
self._read_content_exception = sys.exc_info()
def _setResultContent(self):
if len(self._result_content) > len(self._getInstance().buffer):
self._getInstance().setBuffer(self._result_content)
elif self._index == 0:
self._getInstance().setBuffer(self._content, need_copy=True)
@catchException
def _workInIdle(self, content=None, bang=False):
if self._read_content_exception is not None:
if bang == True:
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
lfPrintError(self._read_content_exception[1])
return
else:
raise self._read_content_exception[1]
if bang == False and self._preview_open == False and lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1' \
and not self._getInstance().empty():
self._previewResult(False)
self._preview_open = True
if self._is_content_list:
if self._cli.pattern and (self._index < len(self._content) or len(self._cb_content) > 0):
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
return
if content:
i = -1
for i, line in enumerate(itertools.islice(content, 20)):
self._content.append(line)
if i == -1 and self._read_finished == 0:
self._read_finished = 1
if self._read_finished > 0:
if self._read_finished == 1:
self._read_finished += 1
self._getExplorer().setContent(self._content)
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlRunning(False)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
if bang:
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] != b'':
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
self._getInstance().setBuffer(self._content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._content))
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < len(self._content) or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
if bang:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
cur_len = len(self._content)
if time.time() - self._start_time > 0.1:
self._start_time = time.time()
self._getInstance().setStlTotal(cur_len//self._getUnit())
self._getInstance().setStlRunning(True)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
else:
self._getInstance().setStlResultsCount(cur_len)
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < cur_len or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content[:cur_len], True, step)
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._getInstance().getWinPos() not in ('popup', 'floatwin') \
and time.time() - self._bang_start_time > 0.5:
self._bang_start_time = time.time()
lfCmd("echohl WarningMsg | redraw | echo ' searching %s' | echohl NONE" % ('.' * self._bang_count))
self._bang_count = (self._bang_count + 1) % 9
elif len(self._getInstance().buffer) < min(cur_len, self._initial_count):
self._getInstance().setBuffer(self._content[:self._initial_count])
@modifiableController
def input(self):
self._preview_open = False
self._current_mode = 'INPUT'
self._getInstance().hideMimicCursor()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().getWinPos() == 'popup':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')"
% (self._getInstance().getPopupWinId(), 'leaderf#PopupFilter'))
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self.clearSelections()
self._hideHelp()
self._resetHighlights()
if self._cli.pattern: # --input xxx or from normal mode to input mode
if self._index == 0: # --input xxx
self._search(self._content)
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"] \
and "--recall" not in self._arguments:
self._guessSearch(self._content)
for cmd in self._cli.input(self._callback):
cur_len = len(self._content)
cur_content = self._content[:cur_len]
if equal(cmd, '<Update>'):
if self._getInstance().getWinPos() == 'popup':
if self._getInstance()._window_object.cursor[0] > 1:
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
self._search(cur_content)
elif equal(cmd, '<Shorten>'):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Mode>'):
self._setStlMode()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
if self._cli.pattern:
self._search(cur_content)
elif equal(cmd, '<C-K>'):
self._toUp()
self._previewResult(False)
elif equal(cmd, '<C-J>'):
self._toDown()
self._previewResult(False)
elif equal(cmd, '<Up>'):
if self._cli.previousHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Down>'):
if self._cli.nextHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<LeftMouse>'):
if self._leftClick():
break
self._previewResult(False)
elif equal(cmd, '<2-LeftMouse>'):
self._leftClick()
if self.accept() is None:
break
elif equal(cmd, '<CR>'):
if self.accept() is None:
break
elif equal(cmd, '<C-X>'):
if self.accept('h') is None:
break
elif equal(cmd, '<C-]>'):
if self.accept('v') is None:
break
elif equal(cmd, '<C-T>'):
if self.accept('t') is None:
break
elif equal(cmd, '<C-\>'):
actions = ['', 'h', 'v', 't', 'dr']
action_count = len(actions)
selection = int( vim.eval(
'confirm("Action?", "&Edit\n&Split\n&Vsplit\n&Tabnew\n&Drop")' ) ) - 1
if selection < 0 or selection >= action_count:
selection = 0
action = actions[selection]
if self.accept(action) is None:
break
elif equal(cmd, '<Quit>'):
self._cli.writeHistory(self._getExplorer().getStlCategory())
self.quit()
break
elif equal(cmd, '<Tab>'): # switch to Normal mode
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._setResultContent()
self.clearSelections()
self._cli.hideCursor()
self._createHelpHint()
self._resetHighlights()
if self._getInstance().isReverseOrder() and self._cli.pattern \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
break
elif equal(cmd, '<F5>'):
self.refresh(False)
elif equal(cmd, '<C-LeftMouse>') or equal(cmd, '<C-S>'):
if self._getExplorer().supportsMulti():
self.addSelections()
elif equal(cmd, '<S-LeftMouse>'):
if self._getExplorer().supportsMulti():
self.selectMulti()
elif equal(cmd, '<C-A>'):
if self._getExplorer().supportsMulti():
self.selectAll()
elif equal(cmd, '<C-L>'):
self.clearSelections()
elif equal(cmd, '<C-P>'):
self._ctrlp_pressed = True
self._previewResult(True)
self._ctrlp_pressed = False
elif equal(cmd, '<PageUp>'):
self._pageUp()
self._previewResult(False)
elif equal(cmd, '<PageDown>'):
self._pageDown()
self._previewResult(False)
elif equal(cmd, '<C-Up>'):
self._toUpInPopup()
elif equal(cmd, '<C-Down>'):
self._toDownInPopup()
else:
if self._cmdExtension(cmd):
break
# vim: set ts=4 sw=4 tw=0 et :
|
pyshell.py
|
#! /usr/bin/env python3
import sys
if __name__ == "__main__":
sys.modules['idlelib.pyshell'] = sys.modules['__main__']
try:
from tkinter import *
except ImportError:
print("** IDLE can't import Tkinter.\n"
"Your Python may not be configured for Tk. **", file=sys.__stderr__)
raise SystemExit(1)
# Valid arguments for the ...Awareness call below are defined in the following.
# https://msdn.microsoft.com/en-us/library/windows/desktop/dn280512(v=vs.85).aspx
if sys.platform == 'win32':
try:
import ctypes
PROCESS_SYSTEM_DPI_AWARE = 1
ctypes.OleDLL('shcore').SetProcessDpiAwareness(PROCESS_SYSTEM_DPI_AWARE)
except (ImportError, AttributeError, OSError):
pass
import tkinter.messagebox as tkMessageBox
if TkVersion < 8.5:
root = Tk() # otherwise create root in main
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
tkMessageBox.showerror("Idle Cannot Start",
"Idle requires tcl/tk 8.5+, not %s." % TkVersion,
parent=root)
raise SystemExit(1)
from code import InteractiveInterpreter
import linecache
import os
import os.path
from platform import python_version
import re
import socket
import subprocess
from textwrap import TextWrapper
import threading
import time
import tokenize
import warnings
from idlelib.colorizer import ColorDelegator
from idlelib.config import idleConf
from idlelib import debugger
from idlelib import debugger_r
from idlelib.editor import EditorWindow, fixwordbreaks
from idlelib.filelist import FileList
from idlelib.outwin import OutputWindow
from idlelib import rpc
from idlelib.run import idle_formatwarning, PseudoInputFile, PseudoOutputFile
from idlelib.undo import UndoDelegator
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
warning_stream = sys.__stderr__ # None, at least on Windows, if no console.
def idle_showwarning(
message, category, filename, lineno, file=None, line=None):
"""Show Idle-format warning (after replacing warnings.showwarning).
The differences are the formatter called, the file=None replacement,
which can be None, the capture of the consequence AttributeError,
and the output of a hard-coded prompt.
"""
if file is None:
file = warning_stream
try:
file.write(idle_formatwarning(
message, category, filename, lineno, line=line))
file.write(">>> ")
except (AttributeError, OSError):
pass # if file (probably __stderr__) is invalid, skip warning.
_warnings_showwarning = None
def capture_warnings(capture):
"Replace warning.showwarning with idle_showwarning, or reverse."
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = idle_showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
capture_warnings(True)
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(
idleConf.userdir, 'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
(None, None, None),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def color_breakpoint_text(self, color=True):
"Turn colorizing of breakpoint text on or off"
if self.io is None:
# possible due to update in restore_file_breaks
return
if color:
theme = idleConf.CurrentTheme()
cfg = idleConf.GetHighlight(theme, "break")
else:
cfg = {'foreground': '', 'background': ''}
self.text.tag_config('BREAK', cfg)
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text.
# Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
except OSError:
lines = []
try:
with open(self.breakpointPath, "w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except OSError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.CurrentTheme()
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
_afterid = None
rpcclt = None
rpcsubproc = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
self.rpcsubproc = subprocess.Popen(self.subprocess_arglist)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
return [sys.executable] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except OSError:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False, filename=''):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
debugger_r.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.terminate_subprocess()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
tag = 'RESTART: ' + (filename if filename else 'Shell')
halfbar = ((int(console.width) -len(tag) - 4) // 2) * '='
console.write("\n{0} {1} {0}".format(halfbar, tag))
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
if not filename:
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
debugger_r.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
if self._afterid is not None:
self.tkconsole.text.after_cancel(self._afterid)
try:
self.rpcclt.listening_sock.close()
except AttributeError: # no socket
pass
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.terminate_subprocess()
self.tkconsole.executing = False
self.rpcclt = None
def terminate_subprocess(self):
"Make sure subprocess is terminated"
try:
self.rpcsubproc.kill()
except OSError:
# process already terminated
return
else:
try:
self.rpcsubproc.wait()
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, OSError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print(repr(what), file=console)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "pyshell.ModifiedInterpreter: Subprocess ERROR:\n"
print(errmsg, what, file=sys.__stderr__)
print(errmsg, what, file=console)
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self._afterid = self.tkconsole.text.after(
self.tkconsole.pollinterval, self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import debugobj_r
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = debugobj_r.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.tree import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.CurrentTheme()
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
if use_subprocess:
source = (f"__file__ = r'''{os.path.abspath(filename)}'''\n"
+ source + "\ndel __file__")
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
# at the moment, InteractiveInterpreter expects str
assert isinstance(source, str)
#if isinstance(source, str):
# from idlelib import iomenu
# try:
# source = source.encode(iomenu.encoding)
# except UnicodeError:
# self.tkconsole.resetoutput()
# self.write("Unsupported characters in input\n")
# return
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Override Interactive Interpreter method: Use Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
tkconsole = self.tkconsole
text = tkconsole.text
text.tag_remove("ERROR", "1.0", "end")
type, value, tb = sys.exc_info()
msg = getattr(value, 'msg', '') or value or "<no detail available>"
lineno = getattr(value, 'lineno', '') or 1
offset = getattr(value, 'offset', '') or 0
if offset == 0:
lineno += 1 #mark end of offending line
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
tkconsole.colorize_syntax_error(text, pos)
tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % msg)
tkconsole.showprompt()
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in list(c.keys()):
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec(code, self.locals)
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec(code, self.locals)
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
parent=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print("IDLE internal error in runcode()",
file=self.tkconsole.stderr)
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print("KeyboardInterrupt", file=self.tkconsole.stderr)
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
return self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
parent=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Connection Error",
"IDLE's subprocess didn't make connection.\n"
"See the 'Startup failure' section of the IDLE doc, online at\n"
"https://docs.python.org/3/library/idle.html#startup-failure",
parent=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
parent=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("window", "_Window"),
("help", "_Help"),
]
# Extend right-click context menu
rmenu_specs = OutputWindow.rmenu_specs + [
("Squeeze", "<<squeeze-current-text>>"),
]
allow_line_numbers = False
# New classes
from idlelib.history import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
OutputWindow.__init__(self, flist, None, None)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.sys_ps1 = sys.ps1 if hasattr(sys, 'ps1') else '>>> '
self.prompt_last_line = self.sys_ps1.split('\n')[-1]
self.prompt = self.sys_ps1 # Changes when debug active
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
squeezer = self.Squeezer(self)
text.bind("<<squeeze-current-text>>",
squeezer.squeeze_current_text_event)
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import iomenu
self.stdin = PseudoInputFile(self, "stdin", iomenu.encoding)
self.stdout = PseudoOutputFile(self, "stdout", iomenu.encoding)
self.stderr = PseudoOutputFile(self, "stderr", iomenu.encoding)
self.console = PseudoOutputFile(self, "console", iomenu.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
try:
# page help() text to shell.
import pydoc # import must be done here to capture i/o rebinding.
# XXX KBK 27Dec07 use text viewer someday, but must work w/o subproc
pydoc.pager = pydoc.plainpager
except:
sys.stderr = sys.__stderr__
raise
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
parent=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
debugger_r.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
self.prompt = self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = debugger_r.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
self.prompt = "[DEBUG ON]\n" + self.sys_ps1
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"Your program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "help", "copyright", "credits" or "license()" for more information.'
def begin(self):
self.text.mark_set("iomark", "insert")
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = ("==== No Subprocess ====\n\n" +
"WARNING: Running IDLE without a Subprocess is deprecated\n" +
"and will be removed in a later version. See Help/IDLE Help\n" +
"for details.\n\n")
sys.displayhook = rpc.displayhook
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.text.focus_force()
self.showprompt()
import tkinter
tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
parent=self.text)
return
from idlelib.stackviewer import StackBrowser
StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
self.console.write(self.prompt)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def show_warning(self, msg):
width = self.interp.tkconsole.width
wrapper = TextWrapper(width=width, tabsize=8, expand_tabs=True)
wrapped_msg = '\n'.join(wrapper.wrap(msg))
if not wrapped_msg.endswith('\n'):
wrapped_msg += '\n'
self.per.bottom.insert("iomark linestart", wrapped_msg, "stderr")
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
def write(self, s, tags=()):
if isinstance(s, str) and len(s) and max(s) > '\uffff':
# Tk doesn't support outputting non-BMP characters
# Let's assume what printed string is not very long,
# find first non-BMP character and construct informative
# UnicodeEncodeError exception.
for start, char in enumerate(s):
if char > '\uffff':
break
raise UnicodeEncodeError("UCS-2", char, start, start+1,
'Non-BMP character not supported in Tk')
try:
self.text.mark_gravity("iomark", "right")
count = OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
raise ###pass # ### 11Aug07 KBK if we are expecting exceptions
# let's find out what they are and be specific.
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
return count
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super().rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert','<','iomark'):
return 'disabled'
return super().rmenu_check_paste()
def fix_x11_paste(root):
"Make paste replace selection on x11. See issue #5124."
if root._windowingsystem == 'x11':
for cls in 'Text', 'Entry', 'Spinbox':
root.bind_class(
cls,
'<<Paste>>',
'catch {%W delete sel.first sel.last}\n' +
root.bind_class(cls, '<<Paste>>'))
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (DEPRECATED,
see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print(sys.argv)" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print(sys.argv)" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
import getopt
from platform import system
from idlelib import testing # bool value
from idlelib import macosx
global flist, root, use_subprocess
capture_warnings(True)
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error as msg:
print("Error: %s\n%s" % (msg, usage_msg), file=sys.stderr)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
print(" Warning: running IDLE without a subprocess is deprecated.",
file=sys.stderr)
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print("No script file: ", script)
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if not dir in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if dir not in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# Setup root. Don't break user code run in IDLE process.
# Don't change environment when testing.
if use_subprocess and not testing:
NoDefaultRoot()
root = Tk(className="Idle")
root.withdraw()
from idlelib.run import fix_scaling
fix_scaling(root)
# set application icon
icondir = os.path.join(os.path.dirname(__file__), 'Icons')
if system() == 'Windows':
iconfile = os.path.join(icondir, 'idle.ico')
root.wm_iconbitmap(default=iconfile)
elif not macosx.isAquaTk():
ext = '.png' if TkVersion >= 8.6 else '.gif'
iconfiles = [os.path.join(icondir, 'idle_%d%s' % (size, ext))
for size in (16, 32, 48)]
icons = [PhotoImage(master=root, file=iconfile)
for iconfile in iconfiles]
root.wm_iconphoto(True, *icons)
# start editor and/or shell windows:
fixwordbreaks(root)
fix_x11_paste(root)
flist = PyShellFileList(root)
macosx.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosx.isAquaTk() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
else:
shell = flist.pyshell
# Handle remaining options. If any of these are set, enable_shell
# was set also, so shell must be true to reach here.
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
elif shell:
# If there is a shell window and no cmd or script in progress,
# check for problematic issues and print warning message(s) in
# the IDLE shell window; this is less intrusive than always
# opening a separate window.
# Warn if using a problematic OS X Tk version.
tkversionwarning = macosx.tkVersionWarning(root)
if tkversionwarning:
shell.show_warning(tkversionwarning)
# Warn if the "Prefer tabs when opening documents" system
# preference is set to "Always".
prefer_tabs_preference_warning = macosx.preferTabsPreferenceWarning()
if prefer_tabs_preference_warning:
shell.show_warning(prefer_tabs_preference_warning)
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
capture_warnings(False)
if __name__ == "__main__":
main()
capture_warnings(False) # Make sure turned off; see issue 18081
|
vk2tg.py
|
import html
from threading import Thread
import time
from os import path
import telebot
import yaml
import vk_api
CONFIG_FILE = 'config.yaml'
link_tmp = 'https://vk.com/club{group_id}?w=wall-{group_id}_{post_id}'
reply_tmp = '_reply ->_\n*{owner}*\n{text}\n{reply}\n'
post_tmp = '{text}\n{reply}\n_{owner}_\n{link}'
class ConfigLoader:
def __init__(self, filename):
self.FILENAME = filename
self.load()
def load(self):
with open(self.FILENAME) as f:
self.values = yaml.load(f)
def save(self):
with open(self.FILENAME, 'w') as f:
yaml.dump(self.values, f)
def load_config():
global CONFIG
CONFIG = ConfigLoader(CONFIG_FILE)
load_config()
vk_session = vk_api.VkApi(CONFIG.values['vk_login'], CONFIG.values['vk_password'])
vk_session.auth()
VK = vk_session.get_api()
BOT = telebot.TeleBot(CONFIG.values['bot_token'])
def get_post(post, reply=False):
post_owner = ''
if post['from_id'] < 0: # author -> group
post_owner = VK.groups.getById(group_id=-post['from_id'])[0]['name']
else: # author -> user
post_owner = '{} {}'.format(VK.users.get(user_ids=str(post['from_id']))[0]['first_name'], VK.users.get(user_ids=str(post['from_id']))[0]['last_name'])
post_reply_text = ''
if 'copy_history' in post:
post_reply_text = get_post(post['copy_history'][0], True)
if reply:
return reply_tmp.format(owner=post_owner, text=post['text'], reply=post_reply_text)
post_time = time.strftime('%d-%m-%Y %H:%M', time.localtime(post['date']))
post_link = link_tmp.format(group_id=-int(post['owner_id']), post_id=post['id'])
return post_tmp.format(owner=post_owner, text=post['text'], reply=post_reply_text, link='[{}]({})'.format(post_time, post_link))
def check_posts():
while True:
posts = []
last_id = CONFIG.values['vk_last_post_id']
for post in VK.wall.get(owner_id=CONFIG.values['vk_group_id'])['items']:
if post['id'] <= CONFIG.values['vk_last_post_id']:
if 'is_pinned' in post and post['is_pinned'] == 1:
continue
else:
break
if post['id'] > last_id:
last_id = post['id']
posts.append(get_post(post))
posts.reverse()
for post in posts:
BOT.send_message(CONFIG.values['tg_chat_id'], post, parse_mode='Markdown')
CONFIG.values['vk_last_post_id'] = last_id
CONFIG.save()
time.sleep(10*60) # 10 min
BOT.send_message(CONFIG.values['tg_chat_id'], 'Bot started')
thread = Thread(target=check_posts)
thread.start()
|
mimic_tts.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Mimic TTS, a local TTS backend.
This Backend uses the mimic executable to render text into speech.
"""
import os
import os.path
from os.path import exists, join, expanduser
import stat
import subprocess
from threading import Thread
from time import sleep
import xdg.BaseDirectory
from mycroft import MYCROFT_ROOT_PATH
from mycroft.api import DeviceApi
from mycroft.configuration import Configuration, BASE_FOLDER
from mycroft.util.download import download
from mycroft.util.log import LOG
from mycroft.tts.tts import TTS, TTSValidator
def get_mimic_binary():
"""Find the mimic binary, either from config or from PATH.
Returns:
(str) path of mimic executable
"""
config = Configuration.get().get("tts", {}).get("mimic")
bin_ = config.get("path",
os.path.join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic'))
if not os.path.isfile(bin_):
# Search for mimic on the path
import distutils.spawn
bin_ = distutils.spawn.find_executable("mimic")
return bin_
def get_subscriber_voices():
"""Get dict of mimic voices exclusive to subscribers.
Returns:
(dict) map of voices to custom Mimic executables.
"""
data_dir = expanduser(Configuration.get()['data_dir'])
old_path = join(data_dir, 'voices/mimic_tn')
if exists(old_path):
return {'trinity': old_path}
path = join(xdg.BaseDirectory.xdg_config_home, BASE_FOLDER, 'voices', 'mimic_tn')
return {'trinity': path}
def download_subscriber_voices(selected_voice):
"""Function to download all premium voices.
The function starts with the currently selected if applicable
"""
subscriber_voices = get_subscriber_voices()
def make_executable(dest):
"""Call back function to make the downloaded file executable."""
LOG.info('Make executable new voice binary executable')
# make executable
file_stat = os.stat(dest)
os.chmod(dest, file_stat.st_mode | stat.S_IEXEC)
# First download the selected voice if needed
voice_file = subscriber_voices.get(selected_voice)
if voice_file is not None and not exists(voice_file):
LOG.info('Voice doesn\'t exist, downloading')
url = DeviceApi().get_subscriber_voice_url(selected_voice)
# Check we got an url
if url:
dl_status = download(url, voice_file, make_executable)
# Wait for completion
while not dl_status.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(selected_voice))
# Download the rest of the subscriber voices as needed
for voice in subscriber_voices:
voice_file = subscriber_voices[voice]
if not exists(voice_file):
url = DeviceApi().get_subscriber_voice_url(voice)
# Check we got an url
if url:
dl_status = download(url, voice_file, make_executable)
# Wait for completion
while not dl_status.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(voice))
def parse_phonemes(phonemes):
"""Parse mimic phoneme string into a list of phone, duration pairs.
Arguments
phonemes (bytes): phoneme output from mimic
Returns:
(list) list of phoneme duration pairs
"""
phon_str = phonemes.decode()
pairs = phon_str.split(' ')
return [pair.split(':') for pair in pairs if ':' in pair]
class Mimic(TTS):
"""TTS interface for local mimic v1."""
def __init__(self, lang, config):
super(Mimic, self).__init__(
lang, config, MimicValidator(self), 'wav',
ssml_tags=["speak", "ssml", "phoneme", "voice", "audio", "prosody"]
)
self.default_binary = get_mimic_binary()
self.clear_cache()
# Download subscriber voices if needed
self.subscriber_voices = get_subscriber_voices()
self.is_subscriber = DeviceApi().is_subscriber
if self.is_subscriber:
trd = Thread(target=download_subscriber_voices, args=[self.voice])
trd.daemon = True
trd.start()
def modify_tag(self, tag):
"""Modify the SSML to suite Mimic."""
ssml_conversions = {
'x-slow': '0.4',
'slow': '0.7',
'medium': '1.0',
'high': '1.3',
'x-high': '1.6',
'speed': 'rate'
}
for key, value in ssml_conversions.items():
tag = tag.replace(key, value)
return tag
@property
def args(self):
"""Build mimic arguments."""
subscriber_voices = self.subscriber_voices
if (self.voice in subscriber_voices and
exists(subscriber_voices[self.voice]) and self.is_subscriber):
# Use subscriber voice
mimic_bin = subscriber_voices[self.voice]
voice = self.voice
elif self.voice in subscriber_voices:
# Premium voice but bin doesn't exist, use ap while downloading
mimic_bin = self.default_binary
voice = 'ap'
else:
# Normal case use normal binary and selected voice
mimic_bin = self.default_binary
voice = self.voice
args = [mimic_bin, '-voice', voice, '-psdur', '-ssml']
stretch = self.config.get('duration_stretch', None)
if stretch:
args += ['--setf', 'duration_stretch={}'.format(stretch)]
return args
def get_tts(self, sentence, wav_file):
"""Generate WAV and phonemes.
Args:
sentence (str): sentence to generate audio for
wav_file (str): output file
Returns:
tuple ((str) file location, (str) generated phonemes)
"""
phonemes = subprocess.check_output(self.args + ['-o', wav_file,
'-t', sentence])
return wav_file, parse_phonemes(phonemes)
def viseme(self, phoneme_pairs):
"""Convert phoneme string to visemes.
Args:
phoneme_pairs (list): Phoneme output from mimic
Returns:
(list) list of tuples of viseme and duration
"""
visemes = []
for phon, dur in phoneme_pairs:
visemes.append((VISIMES.get(phon, '4'), float(dur)))
return visemes
class MimicValidator(TTSValidator):
"""Validator class checking that Mimic can be used."""
def validate_lang(self):
"""Verify that the language is supported."""
# TODO: Verify version of mimic can handle the requested language
def validate_connection(self):
"""Check that Mimic executable is found and works."""
mimic_bin = get_mimic_binary()
try:
subprocess.call([mimic_bin, '--version'])
except Exception as err:
if mimic_bin:
LOG.error('Failed to find mimic at: {}'.format(mimic_bin))
else:
LOG.error('Mimic executable not found')
raise Exception(
'Mimic was not found. Run install-mimic.sh to install it.') \
from err
def get_tts_class(self):
"""Return the TTS class associated with the validator."""
return Mimic
# Mapping based on Jeffers phoneme to viseme map, seen in table 1 from:
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.221.6377&rep=rep1&type=pdf
#
# Mycroft unit visemes based on images found at:
# http://www.web3.lu/wp-content/uploads/2014/09/visemes.jpg
#
# Mapping was created partially based on the "12 mouth shapes visuals seen at:
# https://wolfpaulus.com/journal/software/lipsynchronization/
VISIMES = {
# /A group
'v': '5',
'f': '5',
# /B group
'uh': '2',
'w': '2',
'uw': '2',
'er': '2',
'r': '2',
'ow': '2',
# /C group
'b': '4',
'p': '4',
'm': '4',
# /D group
'aw': '1',
# /E group
'th': '3',
'dh': '3',
# /F group
'zh': '3',
'ch': '3',
'sh': '3',
'jh': '3',
# /G group
'oy': '6',
'ao': '6',
# /Hgroup
'z': '3',
's': '3',
# /I group
'ae': '0',
'eh': '0',
'ey': '0',
'ah': '0',
'ih': '0',
'y': '0',
'iy': '0',
'aa': '0',
'ay': '0',
'ax': '0',
'hh': '0',
# /J group
'n': '3',
't': '3',
'd': '3',
'l': '3',
# /K group
'g': '3',
'ng': '3',
'k': '3',
# blank mouth
'pau': '4',
}
|
main.py
|
from termcolor import colored
from threading import Thread
from pytube import YouTube
from queue import Queue
from tqdm import tqdm
import soundfile
import librosa
import shutil
import pysrt
import os
def time2millis(time):
return ((time.hour * 60 + time.minute) * 60 + time.second) * 1000 + time.microsecond / 1000
def process():
while True:
ID = queue.get()
try:
link = f"https://www.youtube.com/watch?v={ID}"
path = f"data/data/{ID}"
os.mkdir(path)
youtube = YouTube(link)
exists = False
for captions in youtube.caption_tracks:
if captions.name.startswith("English"):
captions = captions.generate_srt_captions()
srt_path = os.path.join(path, 'subtitles.srt')
with open(srt_path, 'w') as f:
f.write(captions)
exists = True
break
if not exists:
print (colored(u"\U0001F534 " + ID + ": No English captions found", "red", attrs = ["bold"]))
shutil.rmtree(path)
continue
audio = youtube.streams.filter(only_audio = True, file_extension = 'mp4').first()
audio.download(output_path = path, filename = 'audio')
mp4_path = os.path.join(path, 'audio.mp4')
y, sr = librosa.load(mp4_path, sr = 22000)
subtitles = pysrt.open(srt_path)
start = subtitles[0].start
start_time = int(time2millis(start.to_time()) * sr / 1000)
text = subtitles[0].text
for line in subtitles[1:]:
end = line.start
end_time = int(time2millis(end.to_time()) * sr / 1000)
clip_path = os.path.join(path, str(start) + '-' + str(end))
os.mkdir(clip_path)
clip_text_path = os.path.join(clip_path, 'subtitles.txt')
with open(clip_text_path, 'w') as f:
f.write(text)
clip = y[start_time:end_time + 1]
start = end
start_time = end_time
text = line.text
soundfile.write(os.path.join(clip_path, 'audio.wav'), clip, sr)
os.remove(mp4_path)
os.remove(srt_path)
print (colored(u"\U0001F7E2 " + ID, "green", attrs = ["bold"]))
queue.task_done()
except Exception as e:
print (colored(u"\U0001F534 " + ID + ": " + str(e), "red", attrs = ["bold"]))
shutil.rmtree(path)
workers = 8
queue = Queue(maxsize = workers)
for i in range(workers):
Thread(target = process).start()
video_ids = open('videos.txt', 'r')
while True:
try:
ID = video_ids.read(11)
queue.put(ID)
except Exception as e:
print (str(e))
video_ids.close()
|
TwitchLinkPopcornEngine.py
|
import threading
import subprocess
import time
from shutil import rmtree
from Services.Twitch.TwitchPlaybackAccessTokens import *
class TwitchDownloaderNetworkError(Exception):
def __str__(self):
return "\nNetwork Error"
class TwitchDownloaderFileSystemError(Exception):
def __str__(self):
return "\nFile System Error"
class TwitchDownloader:
FILE_READER = re.compile("#EXTINF:(\d*\.?\d*),")
TIME_PROGRESS = re.compile(".*size=.*time=(.*)bitrate=.*speed=.*")
THREAD_LIMIT = 20
def __init__(self, ffmpeg, url, file_path, data_path, fast_download, update_tracking):
self.ffmpeg = ffmpeg
self.url = url
self.file_path = file_path
self.data_path = data_path
self.fast_download = fast_download
self.update_tracking = update_tracking
self.process = None
self.error = False
self.done = False
self.cancel = False
self.canceled = False
self.waiting = False
self.fileProgress = 0
self.timeProgress = "00:00:00"
self.checkPlaylist()
def checkPlaylist(self):
try:
data = requests.get(self.url)
if data.status_code != 200:
raise
except:
raise TwitchDownloaderNetworkError
playlist = re.sub("(.*)-(?:un|)muted\.ts", "\\1.ts", data.text)
try:
with open(self.data_path + "/" + "index-dvr.m3u8", "w") as file:
file.write(playlist)
except:
raise TwitchDownloaderFileSystemError
fileLength = list(map(float, re.findall(self.FILE_READER, data.text)))
self.totalFiles = len(fileLength)
self.totalSeconds = int(sum(fileLength))
self.reloadTotalTime()
self.downloadList = re.findall(".*\.ts", playlist)
def reloadTotalTime(self):
h = str(self.totalSeconds // 3600)
h = (2 - len(h)) * "0" + h
m = str(self.totalSeconds % 3600 // 60)
m = (2 - len(m)) * "0" + m
s = str(self.totalSeconds % 3600 % 60)
s = (2 - len(s)) * "0" + s
self.totalTime = h + ":" + m + ":" + s
def download(self):
downloader = threading.Thread(target=self.downloader)
downloader.start()
def downloader(self):
file_url = "/".join(self.url.split("/")[:-1])
downloadedList = []
while True:
if self.fast_download:
downloadThreads = []
runningThreads = []
for file in self.downloadList:
if file not in downloadedList:
downloadedList.append(file)
downloadThreads.append(threading.Thread(target=self.downloadFile, args=(file_url, file)))
for thread in downloadThreads:
if self.cancel:
break
while True:
if self.cancel:
break
time.sleep(0.1)
index = 0
while index < len(runningThreads):
if runningThreads[index].is_alive():
index += 1
else:
del runningThreads[index]
self.fileProgress += 1
if len(runningThreads) < self.THREAD_LIMIT:
break
runningThreads.append(thread)
thread.start()
while len(runningThreads) > 0:
time.sleep(0.1)
index = 0
while index < len(runningThreads):
if runningThreads[index].is_alive():
index += 1
else:
del runningThreads[index]
self.fileProgress += 1
else:
currentDownloadList = []
for file in self.downloadList:
if file not in downloadedList:
downloadedList.append(file)
currentDownloadList.append(file)
for file in currentDownloadList:
if self.cancel:
self.startCancel()
return
self.downloadFile(file_url, file)
self.fileProgress += 1
if self.cancel:
self.startCancel()
return
if not self.update_tracking:
break
self.waiting = True
for i in range(300):
if self.cancel:
self.startCancel()
return
time.sleep(1)
self.waiting = False
if self.cancel:
self.startCancel()
return
totalSeconds = self.totalSeconds
try:
self.checkPlaylist()
except:
break
if totalSeconds == self.totalSeconds:
break
if self.cancel:
self.startCancel()
else:
self.encoder()
def startCancel(self):
self.removeTemporaryFiles()
self.canceled = True
self.done = True
def downloadFile(self, file_url, file_name):
try:
self.fileDownloader(file_url, file_name)
except TwitchDownloaderNetworkError:
pass
except TwitchDownloaderFileSystemError:
self.error = True
self.cancel = True
def fileDownloader(self, file_url, file_name):
unmuted = file_name.replace(".ts", "-unmuted.ts")
muted = file_name.replace(".ts", "-muted.ts")
for check_file in [file_name, unmuted, muted]:
try:
response = requests.get(file_url + "/" + check_file)
if response.status_code != 200:
raise
except:
continue
try:
with open(self.data_path + "/" + file_name, "wb") as file:
file.write(response.content)
return
except:
raise TwitchDownloaderFileSystemError
raise TwitchDownloaderNetworkError
def encoder(self):
try:
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
self.process = subprocess.Popen([self.ffmpeg, "-y", "-i", self.data_path + "/" + "index-dvr.m3u8", "-c", "copy", self.file_path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, encoding="utf-8", startupinfo=startupinfo)
for line in self.process.stdout:
timeCheck = re.search(self.TIME_PROGRESS, line)
if timeCheck != None:
self.timeProgress = timeCheck.group(1).strip().split(".")[0]
except:
self.error = True
self.cancel = True
self.canceled = True
self.removeTemporaryFiles()
self.done = True
def removeTemporaryFiles(self):
try:
rmtree(self.data_path)
except:
pass
def cancelDownload(self):
self.cancel = True
if self.process != None:
if self.done == False:
self.process.kill()
self.canceled = True
while not self.canceled:
pass
|
ar_259_测试_上线域名确认脚本.py
|
import os
import subprocess
import time
import easygui as g
import re
import threading
import sys
import requests
import pprint
"""
1.获取文件的路径
2.用路径获取到包名
3.卸载包名
4.安装app
新建日志开启文件
5.先开启日志抓取
5.打开app
6.分析日志文件抓取各个域名
通过包名抓取cf配置,再得到主域名
通过包名抓取mo配置
通过包名
"""
def openLog():
add_debug_list = [
"adb shell touch sdcard/abcxxxtestmodefilexxx",
"adb shell touch sdcard/logger.ChargerSdk.debug",
"adb shell touch sdcard/logger.CleanerSdk.debug",
"adb shell touch sdcard/logger.DefenderSdk.debug",
"adb shell touch sdcard/logger.CommonSdk.debug",
"adb shell touch sdcard/logger.CoverSdk.debug",
"adb shell touch sdcard/logger.innerSdk.debug",
"adb shell touch sdcard/logger.AnalyticsSdk.debug",
"adb shell touch /sdcard/stoooooorm",
"adb shell touch sdcard/moooooon",
"adb shell touch sdcard/appsurfacetestmode",
"adb shell touch sdcard/xxtestmodexx",
'adb shell touch sdcard/appflashtm'
]
for i in add_debug_list:
os.popen(i)
print("测试日志文件创建成功")
def getPackagInfo():
msg = '请选择你要检查的apk安装包'
title = '文件选择'
default = "*.apk"
filePath = g.fileopenbox(msg=msg, title=title, default=default)
if ' ' in filePath:
os.rename(filePath, filePath.replace(' ', '_'))
filePath = filePath.replace(' ', '_')
if "&" in filePath:
os.rename(filePath, filePath.replace('&', '_'))
filePath = filePath.replace('&', '_')
if "&&" in filePath:
os.rename(filePath, filePath.replace('&&', '_'))
filePath = filePath.replace('&&', '_')
print('选择的apk路径为:', filePath)
command = 'aapt dumpsys badging %s > packageInfo.txt' % filePath
packInfoFile = './packageInfo.txt'
handle = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
time.sleep(2)
reg_packageName = re.compile(r"package: name='(.+?)'")
reg_launchableActivity = re.compile(r"launchable-activity: name='(.+?)'")
log = ''
with open(packInfoFile, encoding='utf-8',errors='ignore')as f:
for i in f:
log += i
packageName = reg_packageName.search(log).group(1)
lanuchableActivity = reg_launchableActivity.search(log).group(1).strip()
print('选择的apk包名为:', packageName)
# print(lanuchableActivity)
return filePath, packageName, lanuchableActivity
def uninstallApp(packageName):
command = 'adb uninstall %s' % packageName
handle = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return handle
def installapp(packagePath):
command = 'adb install %s' % packagePath
handle = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
return handle
def starApp(packageName, lanuchableActivity):
handle = subprocess.Popen(r'adb shell am start %s/%s' % (packageName, lanuchableActivity))
return handle
def judgeRunning(function):
while True:
if function.poll() != None:
print('进程已终止')
break
else:
continue
def get_log():
handle = subprocess.Popen("adb shell logcat >log.txt ", shell=True)
print('\n正在执行log截取,请等待20秒左右')
time.sleep(20)
result = subprocess.Popen("taskkill /F /T /PID %s" % str(handle.pid), stdout=subprocess.PIPE, shell=True)
# print('日志获取1执行完成')
def get_cf_conf(packageName):
# 各个配置连接的正则表达式
reg_cashSDK_cf = re.compile(r'(http://cf.(.+)\..+moduleid=3000&.+%s.+|http://(\d.+)/m/.+moduleid=3000&.+%s.+)' % (
packageName,packageName), re.I)
reg_radicalSDK_cf = re.compile(
r'(http://cf.(.+)\..+moduleid=3300&.+%s.+|http://(\d.+)/m/.+moduleid=3300&.+%s.+)' % (
packageName,packageName), re.I)
reg_guidSDK_cf = re.compile(r'(http://cf.(.+)\..+moduleid=3100&.+%s.+|http://(\d.+)/m/.+moduleid=3100&.+%s.+)' % (
packageName, packageName), re.I)
reg_adSDK_cf = re.compile(
r'(http://mo.(.+)\..+/cr/.+pkg_name=%s&.+has_sim=false.+|http://(\d+.+)/cr/.+pkg_name=%s&.+has_sim=false.+)' % (packageName,packageName) ,re.I)
# 匹配cf链接的m参数
reg_cf_new = re.compile(r'/(m)/')
reg_mo_new = re.compile(r'/(cr)/')
mainIndex = ''
result_cash_mainIndex = ''
result_guidSDK_mainIndex = ''
result_adSDK_mainIndex = ''
result_radicalSDK_mainIndex = ''
with open('log.txt', 'r', encoding='utf-8',errors='ignore')as f:
try:
log = f.read()
except Exception as e:
print(e)
'变现sdk的功能配置请求信息打印'
try:
result_cash = reg_cashSDK_cf.search(log)
result_cash_link_str = result_cash.group(1) # 原始链接
if result_cash.group(2)!=None:
result_cash_mainIndex = result_cash.group(2) # 主域名
result3000_key = reg_cf_new.search(result_cash_link_str)
result3000_key_str = result3000_key.group()
cf_link_final = result_cash_link_str.replace(result3000_key_str, '/p/')
except:
print('\n获取变现sdk的功能配置请求失败')
else:
print('\n变现sdk的功能配置请求连接为:\n%s\n配置内容:' % result_cash_link_str)
try:
cf_json = requests.get(cf_link_final).json()
pprint.pprint(cf_json)
print()
except Exception as e:
print('\n解析变现sdk的功能配置请求失败,错误信息为:%s' % e)
# 激进sdk的功能配置请求信息打印
try:
result_radicalSDK = reg_radicalSDK_cf.search(log)
result_radicalSDK_link_str = result_radicalSDK.group(1) # 原始链接
if result_radicalSDK.group(2) != None:
result_radicalSDK_mainIndex = result_radicalSDK.group(2) # 主域名
result3300_key = reg_cf_new.search(result_radicalSDK_link_str)
result_key3300_str = result3300_key.group()
cf_link_final = result_radicalSDK_link_str.replace(result_key3300_str, '/p/')
except:
print('\n获取激进sdk的功能配置请求失败')
else:
print('\n激进sdk的功能配置请求连接为:\n%s\n配置内容:' % result_radicalSDK_link_str)
try:
cf_json = requests.get(cf_link_final).json()
pprint.pprint(cf_json)
print()
except Exception as e:
print('解析激进sdk的功能配置请求失败,错误信息为:%s' % e)
print()
# guidsdk功能配置请求信息打印
try:
result_guidSDK = reg_guidSDK_cf.search(log)
result_guidSDK_link_str = result_guidSDK.group(1) # 原始链接
if result_guidSDK.group(2) != None:
result_guidSDK_mainIndex = result_guidSDK.group(2) # 主域名
result3100_key = reg_cf_new.search(result_guidSDK_link_str)
result_key3100_str = result3100_key.group()
cf_link_final = result_cash_link_str.replace(result_key3100_str, '/p/')
except:
print('\n获取guidsdk功能配置请求失败')
else:
print('\nguidsdk功能配置请求连接为:\n%s\n配置内容:' % result_guidSDK_link_str)
try:
cf_json = requests.get(cf_link_final).json()
pprint.pprint(cf_json)
print()
except Exception as e:
print('\n解析guidsdk功能配置请求失败,错误信息为:%s' % e)
print()
# adsdk功能配置请求信息打印
try:
result_adSDK = reg_adSDK_cf.search(log)
result_adSDK_link_str = result_adSDK.group(1) # 原始链接
if result_adSDK.group(2) != None:
result_adSDK_mainIndex = result_adSDK.group(2) # 主域名
resultAD_key = reg_mo_new.search(result_adSDK_link_str)
resultAD_key_str = resultAD_key.group()
cf_link_final = result_adSDK_link_str.replace(resultAD_key_str, '/v3/')
except:
print('\n获取adsdk功能配置请求失败')
else:
print('\nadsdk功能配置请求连接为:\n%s\n配置内容:' % result_adSDK_link_str)
try:
cf_json = requests.get(cf_link_final).json()
pprint.pprint(cf_json)
print()
except Exception as e:
print('\n解析adsdk功能配置请求失败,错误信息为:%s' % e)
print()
for i in result_cash_mainIndex, result_radicalSDK_mainIndex, result_guidSDK_mainIndex, result_adSDK_mainIndex:
if i != None:
mainIndex = i
break
else:
print('\n没有匹配到请求配置的相关连接')
print('主域名为:',mainIndex)
return mainIndex
def get_stt_link(product):
# 匹配stt原始链接
reg_ne = re.compile(r'(http://stt.%s.+/nw/ne)|(http://.+)/nw/ne' % product, re.I)
reg_nx = re.compile(r'(http://stt.%s.+/nw/nx)|(http://.+)/nw/nx' % product, re.I)
reg_real = re.compile(r'({("g_act":"real_active").+?"g_cnt":1})', re.I)
reg_daily = re.compile(r'({("g_act":"daily_active").+?"g_cnt":1})', re.I)
reg_code = re.compile(r' {"code":.+{}}')
with open('log.txt', 'r', encoding='utf-8',errors='ignore')as f:
try:
log = f.read()
except Exception as e:
print(e)
try:
reg_real_str = reg_real.search(log).group(2)
print('真实日活验证成功:', reg_real_str)
except:
print('\n真实日活验证失败')
try:
reg_daily_str = reg_daily.search(log).group(2)
print('\n进程日活验证成功:', reg_daily_str)
except:
print('\n进程日活验证失败')
try:
reg_ne_str = reg_ne.search(log).group()
print('\n事件打点上报域名验证成功:', reg_ne_str)
except:
print('\n事件打点上报域名验证失败')
try:
reg_nx_str = reg_nx.search(log).group()
print('\n日活打点上报验证成功:', reg_nx_str)
except:
print('\n日活打点上报验失败')
try:
reg_code_str = reg_code.search(log).group()
print('\n成功上传打点日志::', reg_code_str)
except:
print('\n没有找到上传成功的日志')
print()
def get_longLive_versionName(packageName):
with open('log.txt', 'r', encoding='utf-8',errors='ignore')as f:
try:
log = f.read()
except Exception as e:
print(e)
try:
reg_longLive = re.compile(r'{.+g_pkgname":"(%s)".+"libVerName":"(.+?)",' % packageName)
except:
print('由于配置连接获取失败,无法正常匹配')
else:
try:
reg_longLive_str = reg_longLive.search(log).group(2)
print('保活SDK匹配日志为:', reg_longLive.search(log).group())
print('匹配的包名为:', reg_longLive.search(log).group(1))
print('保活SDK版本为::', reg_longLive_str)
except:
print('\n没有找到上传成功的日志')
# #测试代码
# handle=subprocess.Popen("ping www.baidu.com",shell=True,stdout=subprocess.PIPE)
# a=handle.stdout.read()
# a=a.decode()
# print(a)
#
if __name__ == '__main__':
openLog()
filePath, packageName, lanuchableActivity = getPackagInfo()
handle = uninstallApp(packageName)
uninstallApp(handle)
judgeRunning(handle)
print('%s 卸载成功' % packageName)
print('%s 开始安装,请稍后' % packageName)
handle_install = installapp(filePath)
print('安装日志为:', handle_install.stdout.read().decode().strip('\r\n'))
while True:
handle = os.popen('adb shell pm list package')
if packageName in handle.read():
print('%s 安装成功' % packageName)
break
else:
continue
# if str(handle_install.stdout.read()).find('Success')!=-1:
# print('%s 安装成功' % packageName)
# else:
# input('程序安装失败!请检查是否没有授权安装程序,按回车退出脚本')
# sys.exit()
judgeRunning(handle_install)
# 配置线程
threads = []
# t1=threading.Thread(target=get_log1,args=(product,))
t1 = threading.Thread(target=get_log)
t2 = threading.Thread(target=starApp, args=[packageName, lanuchableActivity])
threads.append(t1)
threads.append(t2)
for t in threads:
t.start()
for t in threads:
t.join()
i = 2
for t in threads:
while 1:
if t.is_alive():
continue
else:
i -= 1
print('线程运行数为:', i)
break
print('线程关闭完毕')
while 1:
# if os.path.exists('.%slog.txt'%os.sep)and os.path.exists('.%slog2.txt'%os.sep):
if os.path.exists('.%slog.txt' % os.sep):
print('\n日志文件生成完毕')
print('\n开始检查日志文件')
break
else:
print('\n日志生成中,继续检查')
continue
time.sleep(3)
product = get_cf_conf(packageName)
get_stt_link(product)
get_longLive_versionName(packageName)
toast = '\n是否删除tmp文件(log.txt文件)y/n:'
while True:
choice = input(toast)
if choice == 'y':
print('执行删除缓存的log')
os.remove('./log.txt')
os.remove('./packageInfo.txt')
break
elif choice == 'n':
print('不执行删除')
break
else:
toast = '你输入的选项有误!请重新输入:'
continue
input('输入回车关闭窗口')
|
Misc.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2019, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
# Import Modules
#
from __future__ import absolute_import
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
import filecmp
from random import sample
from struct import pack
import uuid
import subprocess
import tempfile
from collections import OrderedDict
import Common.LongFilePathOs as os
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from Common.DataType import *
from Common.BuildToolError import *
from CommonDataClass.DataClass import *
from Common.Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.LongFilePathSupport import CopyLongFilePath as CopyLong
from Common.LongFilePathSupport import LongFilePath as LongFilePath
from Common.MultipleWorkspace import MultipleWorkspace as mws
from CommonDataClass.Exceptions import BadExpression
from Common.caching import cached_property
import struct
ArrayIndex = re.compile("\[\s*[0-9a-fA-FxX]*\s*\]")
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
StructPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*$')
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if re.match('^\s*Address\s*Size\s*Align\s*Out\s*In\s*Symbol\s*$', firstline):
return _parseForXcodeAndClang9(lines, efifilepath, varnames)
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcodeAndClang9(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcodeAndClang9(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and (re.match('^\s*Address\s*Size\s*Align\s*Out\s*In\s*Symbol\s*$', line) \
or line == "# Symbols:"):
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$-]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
RtPath.OriginalPath = Path
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True, FileLock=None):
# Convert to long file path format
File = LongFilePath(File)
if os.path.exists(File):
if IsBinaryFile:
try:
with open(File, "rb") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
else:
try:
with open(File, "r") as f:
if Content == f.read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
OpenMode = "w"
if IsBinaryFile:
OpenMode = "wb"
# use default file_lock if no input new lock
if not FileLock:
FileLock = GlobalData.file_lock
if FileLock:
FileLock.acquire()
if GlobalData.gIsWindows and not os.path.exists(File):
try:
with open(File, OpenMode) as tf:
tf.write(Content)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to save file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
else:
try:
with open(File, OpenMode) as Fd:
Fd.write(Content)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to save file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
return True
## Copy source file only if it is different from the destination file
#
# This method is used to copy file only if the source file and destination
# file content are different. This is quite useful to avoid duplicated
# file writing.
#
# @param SrcFile The path of source file
# @param Dst The path of destination file or folder
#
# @retval True The two files content are different and the file is copied
# @retval False No copy really happen
#
def CopyFileOnChange(SrcFile, Dst, FileLock=None):
# Convert to long file path format
SrcFile = LongFilePath(SrcFile)
Dst = LongFilePath(Dst)
if os.path.isdir(SrcFile):
EdkLogger.error(None, FILE_COPY_FAILURE, ExtraData='CopyFileOnChange SrcFile is a dir, not a file: %s' % SrcFile)
return False
if os.path.isdir(Dst):
DstFile = os.path.join(Dst, os.path.basename(SrcFile))
else:
DstFile = Dst
if os.path.exists(DstFile) and filecmp.cmp(SrcFile, DstFile, shallow=False):
return False
DirName = os.path.dirname(DstFile)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
# use default file_lock if no input new lock
if not FileLock:
FileLock = GlobalData.file_lock
if FileLock:
FileLock.acquire()
try:
CopyLong(SrcFile, DstFile)
except IOError as X:
if GlobalData.gBinCacheSource:
EdkLogger.quiet("[cache error]:fails to copy file with error: %s" % (X))
else:
EdkLogger.error(None, FILE_COPY_FAILURE, ExtraData='IOError %s' % X)
finally:
if FileLock:
FileLock.release()
return True
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = list(P.Guids.keys())
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = []
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return "".join(self.String)
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String.append( "".join(S.Instantiate(Dictionary) for S in SectionList))
else:
if isinstance(AppendString,list):
self.String.extend(AppendString)
else:
self.String.append(AppendString)
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress characters
# @param CloseMessage The string printed after progress characters
# @param ProgressChar The character used to indicate the progress
# @param Interval The interval in seconds between two progress characters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress character
#
# @param OpenMessage The string printed before progress characters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress character
#
# @param CloseMessage The string printed after progress characters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', TAB_STAR, 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def AnalyzePcdExpression(Setting):
RanStr = ''.join(sample(string.ascii_letters + string.digits, 8))
Setting = Setting.replace('\\\\', RanStr).strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
for i, ch in enumerate(FieldList):
if RanStr in ch:
FieldList[i] = ch.replace(RanStr,'\\\\')
return FieldList
def ParseFieldValue (Value):
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
out = out.decode()
Size = len(out.split())
out = ','.join(out.split())
return '{' + out + '}', Size
if "{CODE(" in Value:
return Value, len(Value.split(","))
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) // 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = uuid.UUID(Value).bytes_le
ValueL, ValueH = struct.unpack('2Q', Value)
Value = (ValueH << 64 ) | ValueL
except ValueError as Message:
raise BadExpression(Message)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) // 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID and StructPattern.match(FieldList[1]) is None:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
if Value.startswith('0') and not Value.lower().startswith('0x') and len(Value) > 1 and Value.lstrip('0'):
Value = Value.lstrip('0')
try:
if Value and int(Value, 0) < 0:
return False, "PCD can't be set to negative value[%s] for datum type [%s]" % (Value, Type)
Value = int(Value, 0)
if Value > MAX_VAL_TYPE[Type]:
return False, "Too large PCD value[%s] for datum type [%s]" % (Value, Type)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in range(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self.OriginalPath = self
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparison operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
@cached_property
def Key(self):
return self.Path.upper()
@property
def TimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
## Parse PE image to get the required PE information.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != b'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = OrderedDict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self._SkuInherit = {}
self._SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = list(SkuIds.keys())
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != SkuClass.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self._SkuInherit:
self._SkuInherit = {}
for item in self.SkuData.values():
self._SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self._SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
@property
def SkuUsageType(self):
if self._SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
return SkuClass.SINGLE
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
if self.SkuUsageType == SkuClass.SINGLE:
return "{0x0}"
ArrayStrList = []
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
return "{{{myList}}}".format(myList=",".join(ArrayStrList))
@property
def AvailableSkuIdSet(self):
return self.AvailableSkuIds
@property
def SystemSkuId(self):
if self.SkuUsageType == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if not isinstance(Input, str):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
## DeepCopy dict/OrderedDict recusively
#
# @param ori_dict a nested dict or ordereddict
#
# @retval new dict or orderdict
#
def CopyDict(ori_dict):
dict_type = ori_dict.__class__
if dict_type not in (dict,OrderedDict):
return ori_dict
new_dict = dict_type()
for key in ori_dict:
if isinstance(ori_dict[key],(dict,OrderedDict)):
new_dict[key] = CopyDict(ori_dict[key])
else:
new_dict[key] = ori_dict[key]
return new_dict
#
# Remove the c/c++ comments: // and /* */
#
def RemoveCComments(ctext):
return re.sub('//.*?\n|/\*.*?\*/', '\n', ctext, flags=re.S)
|
configure-aspen.py
|
from __future__ import division
from decimal import Decimal as D
import threading
import time
import traceback
import gratipay
import gratipay.wireup
from gratipay import canonize, utils
from gratipay.security import authentication, csrf, x_frame_options
from gratipay.utils import cache_static, i18n, set_cookie, timer
from gratipay.version import get_version
import aspen
from aspen import log_dammit
# Monkey patch aspen.Response
# ===========================
if hasattr(aspen.Response, 'redirect'):
raise Warning('aspen.Response.redirect() already exists')
def _redirect(response, url):
response.code = 302
response.headers['Location'] = url
raise response
aspen.Response.redirect = _redirect
if hasattr(aspen.Response, 'set_cookie'):
raise Warning('aspen.Response.set_cookie() already exists')
def _set_cookie(response, *args, **kw):
set_cookie(response.headers.cookie, *args, **kw)
aspen.Response.set_cookie = _set_cookie
# Wireup Algorithm
# ================
exc = None
try:
website.version = get_version()
except Exception, e:
exc = e
website.version = 'x'
website.renderer_default = "jinja2"
website.renderer_factories['jinja2'].Renderer.global_context = {
'range': range,
'unicode': unicode,
'enumerate': enumerate,
'len': len,
'float': float,
'type': type,
'str': str
}
env = website.env = gratipay.wireup.env()
tell_sentry = website.tell_sentry = gratipay.wireup.make_sentry_teller(env)
gratipay.wireup.canonical(env)
website.db = gratipay.wireup.db(env)
website.mail = gratipay.wireup.mail(env)
gratipay.wireup.billing(env)
gratipay.wireup.username_restrictions(website)
gratipay.wireup.nanswers(env)
gratipay.wireup.load_i18n(website)
gratipay.wireup.other_stuff(website, env)
gratipay.wireup.accounts_elsewhere(website, env)
if exc:
tell_sentry(exc)
# Periodic jobs
# =============
conn = website.db.get_connection().__enter__()
def cron(period, func, exclusive=False):
def f():
if period <= 0:
return
sleep = time.sleep
if exclusive:
cursor = conn.cursor()
try_lock = lambda: cursor.one("SELECT pg_try_advisory_lock(0)")
has_lock = False
while 1:
try:
if exclusive and not has_lock:
has_lock = try_lock()
if not exclusive or has_lock:
func()
except Exception, e:
tell_sentry(e)
log_dammit(traceback.format_exc().strip())
sleep(period)
t = threading.Thread(target=f)
t.daemon = True
t.start()
cron(env.update_global_stats_every, lambda: utils.update_global_stats(website))
cron(env.check_db_every, website.db.self_check, True)
# Website Algorithm
# =================
def add_stuff_to_context(request):
request.context['username'] = None
# Helpers for global call to action to support Gratipay itself.
user = request.context.get('user')
p = user.participant if user else None
if p and p.is_free_rider is None:
usage = p.usage
# Above $500/wk we suggest 2%.
if usage >= 5000:
low = D('100.00')
high = D('1000.00')
elif usage >= 500:
low = D('10.00')
high = D('100.00')
# From $20 to $499 we suggest 5%.
elif usage >= 100:
low = D('5.00')
high = D('25.00')
elif usage >= 20:
low = D('1.00')
high = D('5.00')
# Below $20 we suggest 10%.
elif usage >= 5:
low = D('0.50')
high = D('2.00')
else:
low = D('0.10')
high = D('1.00')
request.context['cta_low'] = low
request.context['cta_high'] = high
noop = lambda: None
algorithm = website.algorithm
algorithm.functions = [ timer.start
, algorithm['parse_environ_into_request']
, algorithm['parse_body_into_request']
, algorithm['raise_200_for_OPTIONS']
, canonize
, authentication.get_auth_from_request
, csrf.get_csrf_token_from_request
, add_stuff_to_context
, i18n.add_helpers_to_context
, algorithm['dispatch_request_to_filesystem']
, cache_static.get_etag_for_file if website.cache_static else noop
, cache_static.try_to_serve_304 if website.cache_static else noop
, algorithm['apply_typecasters_to_path']
, algorithm['get_resource_for_request']
, algorithm['get_response_for_resource']
, tell_sentry
, algorithm['get_response_for_exception']
, gratipay.set_misc_headers
, authentication.add_auth_to_response
, csrf.add_csrf_token_to_response
, cache_static.add_caching_to_response if website.cache_static else noop
, x_frame_options
, algorithm['log_traceback_for_5xx']
, algorithm['delegate_error_to_simplate']
, tell_sentry
, algorithm['log_traceback_for_exception']
, algorithm['log_result_of_request']
, timer.end
, tell_sentry
]
|
test_external_step.py
|
import os
import tempfile
import time
import uuid
from threading import Thread
import pytest
from dagster import (
Field,
ModeDefinition,
RetryRequested,
String,
execute_pipeline,
execute_pipeline_iterator,
fs_io_manager,
pipeline,
reconstructable,
resource,
solid,
)
from dagster.core.definitions.no_step_launcher import no_step_launcher
from dagster.core.events import DagsterEventType
from dagster.core.execution.api import create_execution_plan
from dagster.core.execution.context_creation_pipeline import PlanExecutionContextManager
from dagster.core.execution.plan.external_step import (
LocalExternalStepLauncher,
local_external_step_launcher,
step_context_to_step_run_ref,
step_run_ref_to_step_context,
)
from dagster.core.execution.retries import RetryMode
from dagster.core.instance import DagsterInstance
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.utils import safe_tempfile_path, send_interrupt
from dagster.utils.merger import deep_merge_dicts, merge_dicts
RUN_CONFIG_BASE = {"solids": {"return_two": {"config": {"a": "b"}}}}
def make_run_config(scratch_dir, mode):
if mode in ["external", "request_retry"]:
step_launcher_resource_keys = ["first_step_launcher", "second_step_launcher"]
else:
step_launcher_resource_keys = ["second_step_launcher"]
return deep_merge_dicts(
RUN_CONFIG_BASE,
{
"resources": merge_dicts(
{"io_manager": {"config": {"base_dir": scratch_dir}}},
{
step_launcher_resource_key: {"config": {"scratch_dir": scratch_dir}}
for step_launcher_resource_key in step_launcher_resource_keys
},
),
},
)
class RequestRetryLocalExternalStepLauncher(LocalExternalStepLauncher):
def launch_step(self, step_context, prior_attempts_count):
if prior_attempts_count == 0:
raise RetryRequested()
else:
return super(RequestRetryLocalExternalStepLauncher, self).launch_step(
step_context, prior_attempts_count
)
@resource(config_schema=local_external_step_launcher.config_schema)
def request_retry_local_external_step_launcher(context):
return RequestRetryLocalExternalStepLauncher(**context.resource_config)
def define_basic_pipeline():
@solid(required_resource_keys=set(["first_step_launcher"]), config_schema={"a": Field(str)})
def return_two(_):
return 2
@solid(required_resource_keys=set(["second_step_launcher"]))
def add_one(_, num):
return num + 1
@pipeline(
mode_defs=[
ModeDefinition(
"external",
resource_defs={
"first_step_launcher": local_external_step_launcher,
"second_step_launcher": local_external_step_launcher,
"io_manager": fs_io_manager,
},
),
ModeDefinition(
"internal_and_external",
resource_defs={
"first_step_launcher": no_step_launcher,
"second_step_launcher": local_external_step_launcher,
"io_manager": fs_io_manager,
},
),
ModeDefinition(
"request_retry",
resource_defs={
"first_step_launcher": request_retry_local_external_step_launcher,
"second_step_launcher": request_retry_local_external_step_launcher,
"io_manager": fs_io_manager,
},
),
]
)
def basic_pipeline():
add_one(return_two())
return basic_pipeline
def define_sleepy_pipeline():
@solid(
config_schema={"tempfile": Field(String)},
required_resource_keys=set(["first_step_launcher"]),
)
def sleepy_solid(context):
with open(context.solid_config["tempfile"], "w") as ff:
ff.write("yup")
start_time = time.time()
while True:
time.sleep(0.1)
if time.time() - start_time > 120:
raise Exception("Timed out")
@pipeline(
mode_defs=[
ModeDefinition(
"external",
resource_defs={
"first_step_launcher": local_external_step_launcher,
"io_manager": fs_io_manager,
},
),
]
)
def sleepy_pipeline():
sleepy_solid()
return sleepy_pipeline
def initialize_step_context(scratch_dir, instance):
pipeline_run = PipelineRun(
pipeline_name="foo_pipeline",
run_id=str(uuid.uuid4()),
run_config=make_run_config(scratch_dir, "external"),
mode="external",
)
recon_pipeline = reconstructable(define_basic_pipeline)
plan = create_execution_plan(recon_pipeline, pipeline_run.run_config, mode="external")
initialization_manager = PlanExecutionContextManager(
pipeline=recon_pipeline,
execution_plan=plan,
run_config=pipeline_run.run_config,
pipeline_run=pipeline_run,
instance=instance,
retry_mode=RetryMode.DISABLED,
)
for _ in initialization_manager.prepare_context():
pass
pipeline_context = initialization_manager.get_context()
step_context = pipeline_context.for_step(plan.get_step_by_key("return_two"))
return step_context
def test_step_context_to_step_run_ref():
with DagsterInstance.ephemeral() as instance:
step_context = initialize_step_context("", instance)
step = step_context.step
step_run_ref = step_context_to_step_run_ref(step_context, 0)
assert step_run_ref.run_config == step_context.pipeline_run.run_config
assert step_run_ref.run_id == step_context.pipeline_run.run_id
rehydrated_step_context = step_run_ref_to_step_context(step_run_ref, instance)
rehydrated_step = rehydrated_step_context.step
assert rehydrated_step.pipeline_name == step.pipeline_name
assert rehydrated_step.step_inputs == step.step_inputs
assert rehydrated_step.step_outputs == step.step_outputs
assert rehydrated_step.kind == step.kind
assert rehydrated_step.solid_handle.name == step.solid_handle.name
assert rehydrated_step.logging_tags == step.logging_tags
assert rehydrated_step.tags == step.tags
def test_local_external_step_launcher():
with tempfile.TemporaryDirectory() as tmpdir:
with DagsterInstance.ephemeral() as instance:
step_context = initialize_step_context(tmpdir, instance)
step_launcher = LocalExternalStepLauncher(tmpdir)
events = list(step_launcher.launch_step(step_context, 0))
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_START in event_types
assert DagsterEventType.STEP_SUCCESS in event_types
assert DagsterEventType.STEP_FAILURE not in event_types
@pytest.mark.parametrize("mode", ["external", "internal_and_external"])
def test_pipeline(mode):
with tempfile.TemporaryDirectory() as tmpdir:
result = execute_pipeline(
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=make_run_config(tmpdir, mode),
)
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
def test_launcher_requests_retry():
mode = "request_retry"
with tempfile.TemporaryDirectory() as tmpdir:
result = execute_pipeline(
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=make_run_config(tmpdir, mode),
)
assert result.success
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
for step_key, events in result.events_by_step_key.items():
if step_key:
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_UP_FOR_RETRY in event_types
assert DagsterEventType.STEP_RESTARTED in event_types
def _send_interrupt_thread(temp_file):
while not os.path.exists(temp_file):
time.sleep(0.1)
send_interrupt()
@pytest.mark.parametrize("mode", ["external"])
def test_interrupt_step_launcher(mode):
with tempfile.TemporaryDirectory() as tmpdir:
with safe_tempfile_path() as success_tempfile:
sleepy_run_config = {
"resources": {
"first_step_launcher": {
"config": {"scratch_dir": tmpdir},
},
"io_manager": {"config": {"base_dir": tmpdir}},
},
"solids": {"sleepy_solid": {"config": {"tempfile": success_tempfile}}},
}
interrupt_thread = Thread(target=_send_interrupt_thread, args=(success_tempfile,))
interrupt_thread.start()
results = []
for result in execute_pipeline_iterator(
pipeline=reconstructable(define_sleepy_pipeline),
mode=mode,
run_config=sleepy_run_config,
):
results.append(result.event_type)
assert DagsterEventType.STEP_FAILURE in results
assert DagsterEventType.PIPELINE_FAILURE in results
interrupt_thread.join()
def test_multiproc_launcher_requests_retry():
mode = "request_retry"
with tempfile.TemporaryDirectory() as tmpdir:
run_config = make_run_config(tmpdir, mode)
run_config["execution"] = {"multiprocess": {}}
result = execute_pipeline(
instance=DagsterInstance.local_temp(tmpdir),
pipeline=reconstructable(define_basic_pipeline),
mode=mode,
run_config=run_config,
)
assert result.success
assert result.result_for_solid("return_two").output_value() == 2
assert result.result_for_solid("add_one").output_value() == 3
for step_key, events in result.events_by_step_key.items():
if step_key:
event_types = [event.event_type for event in events]
assert DagsterEventType.STEP_UP_FOR_RETRY in event_types
assert DagsterEventType.STEP_RESTARTED in event_types
|
routes.py
|
from threading import Thread
from flask import Blueprint, request
from .methods import (information_graphic, clear_string, information_filter, information_numbers, information_total,
list_menu, select_menu_option, get_random_movie_or_series)
from ..data.scrapping import get_data, get_day_image, get_movies_series
from ..data.task import scrap_every_day
bot_bp = Blueprint('bot', __package__, url_prefix='/bot')
options = {
'0': information_numbers,
'1': information_graphic,
'2': information_total,
'3': get_random_movie_or_series
}
@bot_bp.route('', methods=['POST'])
def bot_post():
incoming_message = request.values.get('Body', '').lower()
menu = clear_string(incoming_message)
if incoming_message in options:
return select_menu_option(incoming_message, options)
if 'menu' in menu:
return list_menu()
return information_filter(incoming_message)
@bot_bp.route('/fetch/data', methods=['GET'])
def bot_fetch_image_manually():
get_data()
get_day_image()
get_movies_series()
return "Done."
@bot_bp.route('/fetch/random', methods=['GET'])
def bot_fetch_random_movie():
return get_random_movie_or_series()
@bot_bp.route('/run/task', methods=['GET'])
def bot_run_task():
thread = Thread(target=scrap_every_day, name="get_data")
thread.start()
return "Done!"
|
ftclient.py
|
#!/usr/bin/python
###########################################################################
# Program Filename: ftclient.py
# Author: Jonathan Grocott
# Date: 11/18/18
# CS_372_400_F2018 Project 2
# Description: Simple file transfer client implementation
# requires ftserver.c on another host.
# References -
# https://www.bogotobogo.com/python/python_network_programming_server_client_file_transfer.php
# https://docs.python.org/2/library/socket.html
# http://docs.python.org/release/2.6.5/library/internet.html
###########################################################################
#import required libraries
import socket
import sys
import fileinput
import threading
import os.path
#Checks number of command line args
if len(sys.argv) < 4:
print("Invalid arguments: ")
print("ftclient [host] [port] [command] [filename]")
sys.exit(1)
#gets username and password to authenticate with the server
username = input("Username: ")
password = input("Password: ")
authentication = username + "&" + password
#sets command line arguments
server_host = sys.argv[1];
server_port = int(sys.argv[2])
command = sys.argv[3]
message = command
data_port = server_port - 1
#checks number of command line arguments
if len(sys.argv) == 5:
filename = sys.argv[4]
message = message + " " + filename
if os.path.exists(filename):
overwrite = input("File already exists. Would you like to overwrite it? yes/no\n")
if overwrite[:1] == "n" or overwrite[:1] == "N":
print("Exiting program...")
sys.exit(0)
#recieves the directory
def rcvDir(dataSocket):
dataConnection, address = dataSocket.accept()
fileData = dataConnection.recv(10000)
dirListing = fileData.split()
for currFileName in dirListing:
print(currFileName.decode("utf-8"))
#recieves the file
def rcvFile(dataSocket, filename):
dataConnection, address = dataSocket.accept()
outFile = open(filename,"w+")
while True:
fileData = dataConnection.recv(10000)
if not fileData: break
if len(fileData) == 0: break
outFile.write(fileData.decode("utf-8"))
outFile.close()
#handles the file transfer communication
def ftComm(serverSocket):
while 1:
server_response = serverSocket.recv(1000)
if not server_response: break
if len(server_response) == 0: break
if server_response == b'/quit\x00': break
print(server_response.decode("utf-8"))
serverSocket.close()
#create socket to send commands to server
serverSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM);
serverSocket.connect((server_host,server_port))
#create socket to receive data
dataSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
dataSocket.bind(('',data_port))
dataSocket.listen(5)
#send authentication data
serverSocket.send(authentication.encode("utf-8"))
authReply = serverSocket.recv(100)
decodedReply = authReply.decode("utf-8")
print(decodedReply)
if decodedReply[:1] == "I":
sys.exit(0)
#send command and listen for resoonse
serverSocket.send(message.encode("utf-8"))
if len(sys.argv) == 5:
threading.Thread(target=rcvFile,args=(dataSocket, filename),).start()
else:
threading.Thread(target=rcvDir,args=(dataSocket,),).start()
threading.Thread(target=ftComm,args=(serverSocket,),).start()
|
launcher.py
|
from tkinter import Button, Tk, Toplevel, Label
from tkinter.messagebox import askyesno
from tkinter.ttk import Progressbar
from win32api import ShellExecute
from PIL import Image, ImageTk
from os import path, makedirs
from threading import Thread
from requests import get
__version__ = '1.4'
__author__ = "TerraBoii"
# Victor Santiago is an original creator, I just modified entire file because I didn't like the design
# Link to original project: https://github.com/vsantiago113/Tkinter-MyTestApp
__credits__ = ["Victor Santiago", 'TerraBoii']
_AppName_ = 'Guess the square root game launcher'
# url for installer
url = "https://github.com/TerraBoii/guess_the_square_root_game/raw/main/updates/squarerootgame_setup.exe"
file_name = url.split('/')[-1].replace(" ", "_")
file_path = path.join("setup", file_name)
class UpdateManager(Toplevel):
def __init__(self, parent):
Toplevel.__init__(self, parent)
self.transient(parent)
self.attributes("-disabled", True)
self.result = None
self.grab_set()
w = 350
h = 200
sw = self.winfo_screenwidth()
sh = self.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
self.geometry('{0}x{1}+{2}+{3}'.format(w, h, int(x), int(y)))
self.resizable(width=False, height=False)
self.title('Update Manager')
self.wm_iconbitmap('images/Contact.ico')
image = Image.open('images/update_manager.jpg')
photo = ImageTk.PhotoImage(image)
manager_holder = Label(self, image=photo)
manager_holder.image = photo
manager_holder.pack()
def install_update():
tmp.after(0, tmp.destroy)
ShellExecute(0, 'open', "setup\\squarerootgame_setup.exe", None, None, 10)
def start_update_manager():
destination_folder = "setup"
if not path.exists(destination_folder):
makedirs(destination_folder) # create folder if it does not exist
with get(url=url, stream=True) as r:
self.progressbar['maximum'] = int(r.headers.get('Content-Length'))
r.raise_for_status()
with open(file_path, 'wb') as f:
for chunk in r.iter_content(chunk_size=4096):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
self.progressbar['value'] += 4096
self.attributes("-disabled", False)
install_update()
self.progressbar = Progressbar(self,
orient='horizontal',
length=200,
mode='determinate',
value=0,
maximum=0)
self.progressbar.place(relx=0.5, rely=0.5, anchor="center")
self.wait_text = Button(self, text='Wait!', state="disabled")
self.wait_text.place(x=-83, relx=1.0, y=-33, rely=1.0)
self.start_manager = Thread(target=start_update_manager)
self.start_manager.start()
tmp = Tk()
tmp.geometry(f"{650}x{400}+{int((tmp.winfo_screenwidth() - 650) / 2)}+{int((tmp.winfo_screenheight() - 400) / 2)}")
tmp.withdraw()
try:
response = get(
'https://raw.githubusercontent.com/TerraBoii/guess_the_square_root_game/main/version.txt')
data = response.text
if float(data) > float(__version__):
get_update = askyesno('Software update!',
f'Update Available!\n{_AppName_} {__version__} needs to update to version {data}')
if get_update is True:
UpdateManager(tmp)
elif get_update is False:
tmp.after(0, tmp.destroy)
ShellExecute(0, 'open', 'binaries\\Guess the square root.exe', None, None, 10)
else:
tmp.after(0, tmp.destroy)
ShellExecute(0, 'open', 'binaries\\Guess the square root.exe', None, None, 10)
except Exception:
tmp.after(0, tmp.destroy)
ShellExecute(0, 'open', 'binaries\\Guess the square root.exe', None, None, 10)
tmp.mainloop()
|
qcpump.py
|
import json
import logging
import sys
import threading
import wx
import wx.adv
import wx.grid
import wx.lib.masked
import wx.lib.scrolledpanel
from qcpump import logs, utils
from qcpump.pumps.base import (
EVT_PUMP_COMPLETE,
EVT_PUMP_LOG,
EVT_PUMP_PROGRESS,
)
from qcpump.pumps.registry import get_pump_types, register_pump_types
from qcpump.settings import Settings, get_config_dir, get_settings_file_path
import qcpump.ui as ui
settings = Settings()
PUMP_THREAD_JOIN_TIMEOUT = 5 # timeout after 5s while waiting for pump threads to finish
TOOL_TIP_PUMPING_START = "Click to begin pumping"
TOOL_TIP_PUMPING_PAUSE = "Click to pause pumping"
TOOL_TIP_PUMPING_DIRTY = "Please save or reset all Pump Configurations before beginning"
logger = logs.get_logger("qcpump")
class QCPumpUI(ui.VQCPumpUI):
"""Main user interface"""
def __init__(self, parent, *args, **kwargs):
"""do any initial setup required for Orbis"""
super().__init__(parent, *args, **kwargs)
ico = wx.IconBundle()
icon_path = settings.get_img_path("qcpump.ico")
logger.debug(f"Loading icons from {icon_path}")
ico.AddIcon(icon_path)
self.SetTitle(f"QCPump - {settings.VERSION}")
self.SetIcons(ico)
self.SetMinSize((1024, 768))
self.Fit()
self.Center()
self.do_pump_on_startup.SetValue(settings.PUMP_ON_STARTUP)
self.pump_windows = {}
# do expensive intialization after show event
self._init_finished = False
self._show_completed = True
self._startup_pump_run = False
# used to track when user has asked to stop pumping
self.kill_event = threading.Event()
def log(self, pump_name, level, message):
"""write a log message for a pump. Level is e.g. logging.DEBUG"""
logger.log(level, f"{pump_name}: {message}")
def load_existing_pumps(self):
"""Read configs from disk and set up their pumps"""
# root directy where configuration directories/files are stored
config_path = self.get_pump_config_dir()
pumps_to_load = []
for path in config_path.glob("*/config.json"):
try:
logger.debug(f"Trying to load pump config from {path}")
save_data = json.load(path.open("r"))
logger.debug(f"Loaded pump config from {path}")
pumps_to_load.append((save_data['name'], save_data))
except Exception as e:
self.non_fatal_error(f"Failed to load pump config from {path}", e)
pump_types = get_pump_types()
for name, save_data in sorted(pumps_to_load):
if save_data['type'] not in pump_types:
self.non_fatal_error(f"Failed to initialize pump config from {path}")
continue
try:
self.add_pump_page(save_data['type'], name, save_data['state'])
except Exception as e:
self.non_fatal_error(f"Failed to initialize pump config from {path}", e)
self.remove_pump_page(name)
at_least_one_pump_loaded = self.pump_notebook.GetPageCount() > 0
if at_least_one_pump_loaded:
self.pump_notebook.SetSelection(0)
def load_pump_state(self, name):
"""Load specific pump data from disk. (Used for resetting pump state)"""
config_file = self.get_pump_config_path(name)
state = None
try:
logger.debug(f"Attempting to load '{name}' pump state from {config_file}")
state = json.load(open(config_file, "r"))
logger.debug(f"Loaded '{name}' pump state from {config_file}")
except Exception as e:
self.non_fatal_error(f"Unable to load data from {config_file}", e)
return state
def OnAddPump(self, evt):
"""User requested a new pump to be added"""
# ask user which type of pump to add
dlg = AddPumpDialog(get_pump_types(), list(self.pump_windows.keys()), self)
if dlg.ShowModal() != wx.ID_OK:
return
pump_type_requested = ""
try:
pump_type_requested = dlg.get_pump_type()
self.add_pump_page(pump_type_requested, dlg.get_pump_name())
except Exception as e:
self.fatal_error(f"Unable to add a '{pump_type_requested}' pump.", e)
def add_pump_page(self, pump_type, pump_name, state=None):
"""Create a new PumpWindow and add it to our notebook"""
parent = self.pump_notebook
p = PumpWindow(pump_type, pump_name, parent, style=wx.SP_3D | wx.SP_LIVE_UPDATE)
self.pump_notebook.AddPage(p, pump_name, select=True)
self.pump_windows[pump_name] = p
p.set_state(state)
# I don't know why but you need a size event and to reset the focus
# order to get the property controls to respond
if 'win' not in sys.platform:
p.Layout()
p.Fit()
self.SetFocus()
p.SendSizeEvent()
def save_pump(self, name, state):
"""persist pump state to disk"""
pump_window = self.pump_windows[name]
config_path = self.get_pump_config_path(name)
save_data = {
'type': pump_window.pump_type,
'name': name,
'version': settings.VERSION,
'state': state,
}
try:
f = open(config_path, "w")
except Exception as e:
self.non_fatal_error("Unable to open file to save: {config_path}", e)
return False
try:
json.dump(save_data, f, indent=2)
logger.debug(f"Wrote '{name}' state to {config_path}")
except Exception as e:
self.non_fatal_error("Unable to serialize configuration: {config_path}", e)
return False
return True
def delete_pump(self, name):
"""Remove a pump by deleting its config file and removing it from the notebook"""
config_file = self.get_pump_config_path(name)
try:
# TODO: Should we delete the whole directory?
config_file.unlink()
except Exception as e:
self.non_fatal_error(f"Unable to delete {config_file} from disk", e)
return False
self.remove_pump_page(name)
self.config_changed()
return True
def remove_pump_page(self, name):
page = self.pump_windows.pop(name, None)
if not page:
return
page_idx = self.pump_notebook.GetChildren().index(page)
self.pump_notebook.DeletePage(page_idx)
self.pump_notebook.SendSizeEvent()
def set_pump_name(self, page_name, page_label):
"""Update a notebook page label"""
idx = self.pump_notebook.GetChildren().index(self.pump_windows[page_name])
try:
self.pump_notebook.SetPageText(idx, page_label)
except RuntimeError:
# page already deleted
pass
def pump_stopped(self, name):
"""If there are no pumps actually running, but we are still in a pump
running state put us back into a stopped state. This can occur if say
there is only one pump and it has an error and terminates itself"""
pumps_running = self.run_pumps.GetValue()
if not pumps_running:
# we're already stopped so no need to do anything
return False
for pump_window in self.pump_windows.values():
if pump_window.is_running():
# at least one pump is running so quit looking
return
# no pumps running but we're still in a running state
self.stop_pumps()
def get_pump_config_dir(self):
"""Return the user directory used for saving pump configuration data"""
return get_config_dir() / "pumps"
def get_pump_config_path(self, name):
"""Return the path of a specific pumps config file. If the file doesn't
exist, it will be created."""
name = utils.clean_filename(name)
dir_path = self.get_pump_config_dir() / name
path = dir_path / ("config.json")
if not dir_path.is_dir():
try:
dir_path.mkdir(parents=True, exist_ok=True)
except Exception as e:
self.fatal_error(f"Unable to create configuration directory {dir_path}.", e)
if not path.exists():
try:
path.touch()
logger.debug(f"Created config file for {name} at {path}")
except Exception as e:
self.non_fatal_error(f"Unable to create configuration file at {path}.", e)
return path
def fatal_error(self, msg, exception=None):
"""The application can't recover from an error. Log the error, show user a message and quit"""
if exception:
logger.exception(msg)
wx.CallAfter(self._fatal_error, msg)
def _fatal_error(self, msg):
wx.MessageBox(f"Fatal Error: {msg}\n\nPlease check the log file for details.")
self.destroy()
def non_fatal_error(self, msg, exception=None):
"""The application can recover from an error. Log the error, show user a message and continue"""
if exception:
logger.exception(msg)
wx.CallAfter(wx.MessageBox, f"Warning: {msg}\n\nPlease check the log file for details.")
def start_pumps(self):
"""Configs are all valid. Start the pumps!"""
logger.debug("Starting pumps")
self.run_pumps.SetValue(True)
# we don't want the user editing configuration data while pumping is occuring
self.disable_pump_windows()
# make sure kill event is cleared since it might be set from previous runs
self.kill_event.clear()
valid_count = 0
for name, pump_window in self.pump_windows.items():
if not pump_window.pump.active:
pump_window.update_status(0, "Not running. Inactive")
continue
if pump_window.pump.valid:
valid_count += 1
logger.debug(f"Starting pump '{name}'")
pump_window.start_pumping()
else:
pump_window.update_status(0, "Not running. Invalid configuration")
if valid_count:
self.status_bar.SetStatusText(f"Running {valid_count} pumps")
self.run_pumps.SetLabelText("Stop Pumps")
self.run_pumps.SetToolTip(TOOL_TIP_PUMPING_PAUSE)
else:
self.status_bar.SetStatusText("Pumps will not run since there are no valid pumps")
self.stop_pumps()
def pump_on_startup(self):
"""runs after initializing"""
validation_running = False
for pump_window in self.pump_windows.values():
if pump_window.pump.active and not pump_window.pump.initial_validation_complete:
validation_running = True
break
if not validation_running and not self._startup_pump_run:
self._startup_pump_run = True
self.start_pumps()
def stop_pumps(self):
"""Set the kill event and stop all pumps"""
self.kill_event.set()
logger.debug("Stopping pumps. Kill Event Set")
npumps = len(self.pump_windows)
for idx, (name, pump_window) in enumerate(self.pump_windows.items()):
self.status_bar.SetStatusText(f"Waiting for {npumps - idx}/{npumps} Pumps to finish")
if pump_window.is_running():
logger.debug(f"Stopping pump {name}")
pump_window.stop_pumping()
else:
logger.debug(f"Pump {name} already stopped")
self.status_bar.SetStatusText("Pumps all stopped")
self.enable_pump_windows()
self.run_pumps.SetValue(False)
self.run_pumps.SetLabelText("Run Pumps")
self.run_pumps.SetToolTip(TOOL_TIP_PUMPING_START)
def enable_pump_windows(self):
"""Prevent user from editing any pump configs"""
for pump_window in self.pump_windows.values():
pump_window.pump.Enable(True)
pump_window.pump.SetToolTip("")
self.add_pump.Enable(True)
self.add_pump.SetToolTip("")
def disable_pump_windows(self):
"""Enable user editing of pump configs"""
for pump_window in self.pump_windows.values():
pump_window.pump.Enable(False)
pump_window.pump.SetToolTip("Please stop all Pumps before editing configurations")
self.add_pump.Enable(False)
self.add_pump.SetToolTip("Stop all pumps before adding new ones")
def config_changed(self):
"""PumpWindows can use this to inform us configuration value changed"""
# we don't want to let user run pumps if any of the configurations
# have been modified and not saved or reset
dirty = self.get_dirty_pumps()
enable = len(dirty) == 0
self.run_pumps.Enable(enable)
self.run_pumps.SetToolTip(TOOL_TIP_PUMPING_START if enable else TOOL_TIP_PUMPING_DIRTY)
def get_dirty_pumps(self):
"""Return any pump window which has a non-saved (dirty) state"""
return [pw.name for pw in self.pump_windows.values() if pw.pump.dirty]
def OnPumpOnStartup(self, evt):
settings_path = get_settings_file_path()
with settings_path.open('r') as f:
data = json.load(f)
data['PUMP_ON_STARTUP'] = self.do_pump_on_startup.IsChecked()
with settings_path.open("w") as f:
json.dump(data, f, indent=4)
def OnRunPumpsToggle(self, evt):
"""User manually toggled the run pumps button"""
start_pumps = self.run_pumps.GetValue()
if start_pumps:
self.start_pumps()
else:
self.stop_pumps()
def OnAbout(self, evt):
"""User asked to see About dialog"""
items = [
("About QCPump Version", settings.VERSION),
("", ""),
("Config File Location", self.get_pump_config_dir()),
("Log File Location", str(logs.get_log_dir())),
("Settings File Location", str(settings.fname)),
("Author", "Randy Taylor (QATrack+ Project)"),
("Contact", "randy@multileaf.ca"),
("Web", "https://www.multileaf.ca"),
]
lines = []
for label, val in items:
sep = ':\n ' if val not in (None, '') else ''
lines.append("%s%s %s" % (label, sep, str(val)))
wx.MessageBox('\n'.join(lines), "About QCPump", style=wx.OK, parent=self)
def OnClose(self, evt):
"""OS level close event"""
# if the event can't be vetoed, the window must get destroyed.
# https://www.wxpython.org/Phoenix/docs/html/wx.CloseEvent.html
if evt.CanVeto() and not self.confirm_quit():
evt.Veto()
return
self.destroy()
def OnQuit(self, evt):
"""Menu/user level close event"""
if self.confirm_quit():
self.destroy()
def confirm_quit(self):
"""Ensure that the user wants to quit if there are dirty pumps"""
dirty = self.get_dirty_pumps()
if dirty:
if len(dirty) == 1:
msg = f"The config file for {dirty[0]} has not been saved. Quit without saving?"
else:
msg = f"The config files for {', '.join(dirty)} have not been saved. Quit without saving?"
if wx.MessageBox(msg, "Quit without saving?", wx.ICON_QUESTION | wx.YES_NO) != wx.YES:
return False
return True
def destroy(self):
"""make sure pumps are stopped before killing"""
self.stop_pumps()
self.Destroy()
def OnShow(self, event):
self._show_completed = True
def OnIdle(self, event):
if not self._init_finished and self._show_completed:
self._init_finished = True
self.log("qcpump", logging.DEBUG, "Starting to load existing pumps")
self.load_existing_pumps()
self.log("qcpump", logging.DEBUG, "Completed load of existing pumps")
if settings.PUMP_ON_STARTUP and not self._startup_pump_run:
self.pump_on_startup()
class StatusPanel(ui.VStatusPanel):
"""UI element for displaying progress and log messages from pumps"""
def __init__(self, *args, **kwargs):
"""Set up this status panel by adding our logging grid"""
super().__init__(*args, **kwargs)
self.log_grid = logs.LogGrid(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, 0)
self.GetSizer().Add(self.log_grid, 1, wx.ALL | wx.EXPAND, 5)
def update_progress(self, progress, status):
"""Update the progress bar for this pumps status panel"""
self.progress.SetValue(min(100, max(progress, 0)))
self.status.SetLabelText(status)
def log(self, level, message):
"""Write a log message to the logging grid"""
self.log_grid.log(level, message)
def clear_log(self):
"""Clean out log grid"""
self.log_grid.clear()
class PumpWindow(wx.SplitterWindow):
"""A split window consisting of a pumps config on one side and status panel on the other"""
def __init__(self, pump_type, pump_name, *args, **kwargs):
"""Initial configuration our pump window with the pump config & status windows"""
self.pump_type = pump_type
self.name = pump_name
self.logger = logs.get_logger(self.name)
super().__init__(*args, **kwargs)
self.app = self.GetTopLevelParent()
# initialize the Pump Configuration UI window
PumpTypeClass = get_pump_types()[self.pump_type]
self.pump = PumpTypeClass(
self,
wx.ID_ANY,
wx.DefaultPosition,
wx.DefaultSize,
wx.VSCROLL,
)
self.status_split = StatusPanel(self, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL | wx.VSCROLL)
self.SplitVertically(self.pump, self.status_split, int(0.5 * self.Parent.GetSize()[0]))
self.SetSashPosition(int(0.5 * self.Parent.GetSize()[0]))
self.timer = wx.Timer(self)
self.pump_thread = None
self.Bind(wx.EVT_TIMER, self.OnPumpTimer, source=self.timer)
self.Bind(EVT_PUMP_PROGRESS, self.OnPumpProgress)
self.Bind(EVT_PUMP_COMPLETE, self.OnPumpComplete)
self.Bind(EVT_PUMP_LOG, self.OnPumpLog)
def is_running(self):
"""Is our pump running?"""
return self.timer.IsRunning() or self.pump_thread is not None
def OnPumpTimer(self, evt):
"""Interval timer triggered. Run the pump!"""
self._run_pump()
def OnPumpProgress(self, evt):
"""Pump thread sent a progress event. Update the status accordingly"""
result = evt.GetValue()
self.update_status(result['progress'], result['message'])
def OnPumpComplete(self, evt):
"""Pump thread sent a pumping complete message. We can clear the thread now"""
self.pump_thread = None
def OnPumpLog(self, evt):
"""Pump thread sent a logging message."""
result = evt.GetValue()
self.logger.log(result['level'], result['message'])
self.status_split.log(result['level'], result['message'])
def set_state(self, state):
"""Set the state for this pump"""
self.pump.configure(self.pump_type, self.name, state=state)
def start_pumping(self):
"""Run the pump immediately and then every interval seconds after (via OnPumpTimer)"""
self.status_split.clear_log()
self._run_pump()
interval = self.pump.get_config_value("Pump", "interval (s)")
self.timer.Start(interval * 1000)
def _run_pump(self):
"""If our pump is not already running, start it up."""
if self.pump_thread is None:
self.pump_thread = threading.Thread(target=self.pump.run, args=(self.app.kill_event,), daemon=True)
self.pump_thread.start()
def stop_pumping(self):
"""App requested we stop pumping. Note the apps kill_event is set by this point"""
self.timer.Stop()
if self.pump_thread:
# Give the thread some time to finish if it needs it
self.pump_thread.join(PUMP_THREAD_JOIN_TIMEOUT)
self.pump_thread = None
self.app.pump_stopped(self.name)
def set_dirty(self, dirty):
"""Tell main window whether this pump has changed fields"""
name = self.name if not dirty else f"*{self.name}*"
self.app.set_pump_name(self.name, name)
self.app.config_changed()
def update_status(self, progress, message):
"""Update the status indicators"""
self.status_split.update_progress(progress, message)
def delete(self):
"""Delete this pump"""
self.app.delete_pump(self.name)
def save(self):
"""Save pumps state to disk"""
return self.app.save_pump(self.name, self.pump.state)
def reset(self):
"""Reset the pumps state to last save"""
return self.app.load_pump_state(self.name)['state']
def log(self, level, message):
self.status_split.log(level, message)
class AddPumpDialog(ui.VAddPumpDialog):
"""Dialog for user to select the pump type and name that they want to add.
Checks for uniqueness of pump names."""
def __init__(self, pump_types, existing_pump_names, *args, **kwargs):
"""Initialize AddPumpDialog by setting the available pump types and and
pump names that are already in use so uniqueness of pump name can be
enforced"""
super().__init__(*args, **kwargs)
self.existing_pump_names = [x.lower() for x in existing_pump_names]
self.pump_types = {}
for name, pt in pump_types.items():
key = pt.DISPLAY_NAME or name
self.pump_types[key] = name
self.pump_type.SetItems(list(sorted(self.pump_types.keys())))
def OnPumpTypeChange(self, evt):
"""User changed pump type, set default pump name if required"""
self.pump_name.SetValue(evt.GetString())
def OnOk(self, evt):
"""Do validation after user clicks OK"""
pump_name = self.pump_name.GetValue().lower()
selected_pump_type = self.pump_type.GetStringSelection()
if not selected_pump_type:
wx.MessageBox("You must choose a Pump Type", "Missing Pump Type", wx.OK | wx.OK_DEFAULT | wx.ICON_ERROR)
return
if pump_name in self.existing_pump_names:
wx.MessageBox(
"That Pump name is already in use. Please choose a new name", "Duplicate Pump Name",
wx.OK | wx.OK_DEFAULT | wx.ICON_ERROR
)
return
elif not pump_name and selected_pump_type in self.existing_pump_names:
wx.MessageBox(
"Please enter a unique name for this Pump", "Missing Pump Name", wx.OK | wx.OK_DEFAULT | wx.ICON_ERROR
)
return
elif not pump_name and selected_pump_type not in self.existing_pump_names:
# name was blank but no pump exists with name of selected pump type, so we'll use that
self.pump_name.SetValue(selected_pump_type)
evt.Skip()
def get_pump_type(self):
"""Return the pump type name that is currently selected"""
return self.pump_types[self.pump_type.GetStringSelection()]
def get_pump_name(self):
"""Return the string currently entered for the pump name"""
return self.pump_name.GetValue()
def main():
"""Launch the QCPump program"""
app = wx.App(
useBestVisual=True,
redirect=False, # leave as False. stderr/stdout will be redirected below
clearSigInt=True, # set to True to allow a Ctrl+C to kill app
)
if logger is None:
loc = logs.get_log_location()
msg = (
f"Failed to create log file at {loc}.\n\n"
f"Please ensure you have write permissions on the directory {loc.parent}"
)
wx.MessageBox(f"Fatal Error: {msg}", style=wx.ICON_ERROR, caption="Unable to launch")
return
try:
# Before launching GUI we need to first see what kind of pump types are
# available
register_pump_types()
frame = QCPumpUI(None)
frame.Show()
except Exception:
logger.exception("Unhandled exception during initialization")
msg = (
"Unhandled exception during initialization. Check " +
str(logs.get_log_location('qcpump')) + " for details"
)
wx.MessageBox(f"Fatal Error: {msg}", style=wx.ICON_ERROR, caption="Unable to launch.")
return
if not settings.DEBUG:
stdio = logs.get_log_location("stdio")
app.RedirectStdio(stdio)
try:
app.MainLoop()
except Exception:
logger.exception("Unhandled exception in app.MainLoop")
if __name__ == "__main__":
main()
|
JogadorMinmax.py
|
# Jogador
# Created on 11 de Junho de 2021
from __future__ import annotations
from typing import List
from Jogador import Jogador
from Jogada import Jogada
from Tabuleiro import Tabuleiro
import time
import threading
from TabuleiroGoMoku import TabuleiroGoMoku
import numpy as np
class JogadorMinMax(Jogador):
MAXNIVEL = 10
TEMPOMAXIMO = 1.0
def __init__(self, nome):
Jogador.__init__(self, nome)
self.jogada = Jogada(-1, -1, -1, -1)
self.jogadas_possiveis = []
# Calcula uma nova jogada para o tabuleiroe jogador corrente.
def calculaJogada(self, tab, jogadorCor):
tempo1 = time.time()
usado = 0.0
self.jogada = Jogada(-1, -1, -1, -1)
self.jogadas_possiveis = tab.obtemJogadasPossiveis(jogadorCor)
for prof in range(1, self.MAXNIVEL):
t1 = threading.Thread(
target=self.setup, args=(tab, jogadorCor, prof))
t1.start()
t1.join(self.TEMPOMAXIMO - usado)
tempo2 = time.time()
usado = tempo2 - tempo1
print(f"Profundidade: {prof} Tempo usado: {round(usado, 3)} Jogada: {self.jogada.getLinha()}-{self.jogada.getColuna()}")
if usado >= self.TEMPOMAXIMO:
break
return self.jogada
@staticmethod
def play(tab, jogada, jogador):
"""
Realiza um movimento em uma copia do tabuleiro passado
:rtype: TabuleiroGoMoku
"""
child_tab = TabuleiroGoMoku()
child_tab.copiaTab(tab.getTab())
child_tab.move(jogador, jogada)
return child_tab
def funcaoUtilidade(self, jogador, tab):
value_jogador = self.heuristicaBasica(jogador, tab.getTab(), tab)
return value_jogador
def setup(self, tab: TabuleiroGoMoku, jogador: int, prof: int):
"""
Inicia a arvore para o algoritmo minmax
:param tab:
:param jogador:
:param prof: Profundidade maxima do busca em profundidade interativa
"""
depth = 1
best_score = -np.inf
beta = np.inf
for jogada in self.jogadas_possiveis:
genchild = self.play(tab, jogada, jogador)
value = self.minDec(genchild, jogador, depth, prof, best_score, beta)
if value > best_score:
self.jogada = jogada
best_score = value
def maxDec(self, tab: TabuleiroGoMoku, jogador: int, depth: int, max_depth: int, alpha: float, beta: float) -> int:
if depth == max_depth:
return self.funcaoUtilidade(jogador, tab)
max_value = -np.inf
for i in tab.obtemJogadasPossiveis(jogador):
child_tab = self.play(tab, i, jogador)
max_value = max(max_value, self.minDec(child_tab, jogador, depth + 1, max_depth, alpha, beta))
if max_value >= beta:
return max_value
alpha = max(alpha, max_value)
return max_value
def minDec(self, tab: TabuleiroGoMoku, jogador: int, depth: int, max_depth: int, alpha: float, beta: float) -> int:
if depth == max_depth:
return self.funcaoUtilidade(jogador, tab)
min_value = np.inf
oponente = (jogador + 1) % 2
for i in tab.obtemJogadasPossiveis(oponente):
child_tab = self.play(tab, i, oponente)
min_value = min(min_value, self.maxDec(child_tab, jogador, depth + 1, max_depth, alpha, beta))
if min_value <= alpha:
return min_value
beta = min(beta, min_value)
return min_value
def heuristicaBasica(self, jogador, tab, objTab):
"""
Copiamos a função de TabuleiroGoMoku.py para aumentar o ponto negativo do oponente
Retorna um valor heuristico para o tabuleiro dado um jogador
:param jogador numero do jogador
:param tab tabuleiro
:return valor do tabuleiro
"""
valor = 0
for linha in range(0, objTab.DIM):
for coluna in range(0, objTab.DIM):
if tab[linha][coluna] == jogador:
temp = self.contaHeuristica(
jogador, linha, coluna, 1, 0, tab, objTab)
if temp == 100:
return 10000
valor += temp
temp = self.contaHeuristica(
jogador, linha, coluna, 0, 1, tab, objTab)
if temp == 100:
return 10000
valor += temp
temp = self.contaHeuristica(
jogador, linha, coluna, 1, -1, tab, objTab)
if temp == 100:
return 10000
valor += temp
temp = self.contaHeuristica(
jogador, linha, coluna, 1, 1, tab, objTab)
if temp == 100:
return 10000
valor += temp
elif tab[linha][coluna] != objTab.LIVRE:
valor -= 5 * self.contaHeuristica(objTab.oponente(
jogador), linha, coluna, 1, 0, tab, objTab)
valor -= 5 * self.contaHeuristica(objTab.oponente(
jogador), linha, coluna, 0, 1, tab, objTab)
valor -= 5 * self.contaHeuristica(objTab.oponente(
jogador), linha, coluna, 1, -1, tab, objTab)
valor -= 5 * self.contaHeuristica(objTab.oponente(
jogador), linha, coluna, 1, 1, tab, objTab)
return valor
@staticmethod
def contaHeuristica(jogador, linha, coluna, dir_x, dir_y, tab, obj_tab):
"""
Copiamos a função de TabuleiroGoMoku.py para ampliarmos as possibilidades
Conta o numero de pecas de um jogador a partir da posicao passada e na
direcao especificada levando em consideracao a vantagem. Os valores da
direcao definida por dirX e dirY devem ser 0, 1, or -1, sendo que um
deles deve ser diferente de zero.
"""
boqueado_ponta1 = boqueado_ponta2 = False
lin = linha + dir_x # define a direcao .
col = coluna + dir_y
while not obj_tab.saiuTabuleiro(lin, col) and tab[lin][col] == jogador:
# Quadrado esta no tabuleiro e contem uma peca do jogador.
lin += dir_x # Va para o proximo.
col += dir_y
# verifica se fechou a ponta
if obj_tab.saiuTabuleiro(lin, col) or tab[lin][col] != obj_tab.LIVRE:
boqueado_ponta1 = True
lin = lin - dir_x # Olhe na direcao oposta.
col = col - dir_y
ct = 0 # Numero de pecas em linha de um jogador.
while not obj_tab.saiuTabuleiro(lin, col) and tab[lin][col] == jogador:
# Quadrado esta no tabuleiro e contem uma peca do jogador.
ct += 1
lin -= dir_x # Va para o proximo.
col -= dir_y
# verifica se fechou a ponta
if obj_tab.saiuTabuleiro(lin, col) or tab[lin][col] != obj_tab.LIVRE:
boqueado_ponta2 = True
# Verifica se esta bloqueado e nao pode fechar essa linha
if ct < 5 and boqueado_ponta1 and boqueado_ponta2:
ct = 0
elif ct == 5 or (ct == 4 and not boqueado_ponta1 and not boqueado_ponta2):
ct = 100
elif (ct == 4 and boqueado_ponta1 and not boqueado_ponta2) or (ct == 4 and not boqueado_ponta1 and boqueado_ponta2):
ct = 90
elif ct == 3 and not boqueado_ponta1 and not boqueado_ponta2:
ct = 75
elif (ct == 3 and boqueado_ponta1 and not boqueado_ponta2) or (ct == 3 and not boqueado_ponta1 and boqueado_ponta2):
ct = 50
elif ct == 2 and not boqueado_ponta1 and not boqueado_ponta2:
ct = 25
elif (ct == 2 and boqueado_ponta1 and not boqueado_ponta2) or (ct == 2 and not boqueado_ponta1 and boqueado_ponta2):
ct = 15
elif ct == 1 and not boqueado_ponta1 and not boqueado_ponta2:
ct = 10
elif (ct == 1 and boqueado_ponta1 and not boqueado_ponta2) or (ct == 1 and not boqueado_ponta1 and boqueado_ponta2):
ct = 5
return ct
if __name__ == "__main__":
import sys
JogadorMinMax(sys.argv[1]).joga()
print("Fim")
|
main.py
|
import myserial
import threading
import time
t1 = threading.Thread(target = myserial.serial_port0.receivemsg)
t1.start()
while True:
if myserial.serial_port0.recvmsg != "":
print(myserial.serial_port0.recvmsg)
myserial.serial_port0.recvmsg = ""
|
test_setup.py
|
"""Test component/platform setup."""
# pylint: disable=protected-access
import asyncio
import os
from unittest import mock
import threading
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import (
EVENT_HOMEASSISTANT_START, EVENT_COMPONENT_LOADED)
import homeassistant.config as config_util
from homeassistant import setup, loader
import homeassistant.util.dt as dt_util
from homeassistant.helpers.config_validation import (
PLATFORM_SCHEMA_2 as PLATFORM_SCHEMA, PLATFORM_SCHEMA_BASE)
from homeassistant.helpers import discovery
from tests.common import \
get_test_home_assistant, MockModule, MockPlatform, \
assert_setup_component, get_test_config_dir
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
_LOGGER = logging.getLogger(__name__)
class TestSetup:
"""Test the bootstrap utils."""
hass = None
backup_cache = None
# pylint: disable=invalid-name, no-self-use
def setup_method(self, method):
"""Set up the test."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Clean up."""
self.hass.stop()
def test_validate_component_config(self):
"""Test validating component configuration."""
config_schema = vol.Schema({
'comp_conf': {
'hello': str
}
}, required=True)
loader.set_component(
self.hass,
'comp_conf', MockModule('comp_conf', config_schema=config_schema))
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': None
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {}
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(0):
assert not setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.data.pop(setup.DATA_SETUP)
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'comp_conf', {
'comp_conf': {
'hello': 'world',
}
})
def test_validate_platform_config(self):
"""Test validating platform configuration."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({
})
loader.set_component(
self.hass,
'platform_conf',
MockModule('platform_conf',
platform_schema_base=platform_schema_base))
loader.set_component(
self.hass,
'platform_conf.whatever',
MockPlatform('whatever',
platform_schema=platform_schema))
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
'platform_conf 2': {
'platform': 'whatever',
'invalid': True
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'not_existing',
'hello': 'world',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': [{
'platform': 'whatever',
'hello': 'world',
}]
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
# Any falsey platform config will be ignored (None, {}, etc)
with assert_setup_component(0) as config:
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': None
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {}
})
assert 'platform_conf' in self.hass.config.components
assert not config['platform_conf'] # empty
def test_validate_platform_config_2(self):
"""Test component PLATFORM_SCHEMA_BASE prio over PLATFORM_SCHEMA."""
platform_schema = PLATFORM_SCHEMA.extend({
'hello': str,
})
platform_schema_base = PLATFORM_SCHEMA_BASE.extend({
'hello': 'world',
})
loader.set_component(
self.hass,
'platform_conf',
MockModule('platform_conf',
platform_schema=platform_schema,
platform_schema_base=platform_schema_base))
loader.set_component(
self.hass,
'platform_conf.whatever',
MockPlatform('whatever',
platform_schema=platform_schema))
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'platform_conf', {
# fail: no extra keys allowed in platform schema
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
# pass
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
# fail: key hello violates component platform_schema_base
'platform_conf 2': {
'platform': 'whatever',
'hello': 'there'
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
def test_validate_platform_config_3(self):
"""Test fallback to component PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE.extend({
'hello': str,
})
platform_schema = PLATFORM_SCHEMA.extend({
'cheers': str,
'hello': 'world',
})
loader.set_component(
self.hass,
'platform_conf',
MockModule('platform_conf',
platform_schema=component_schema))
loader.set_component(
self.hass,
'platform_conf.whatever',
MockPlatform('whatever',
platform_schema=platform_schema))
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
# fail: no extra keys allowed
'platform': 'whatever',
'hello': 'world',
'invalid': 'extra',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
# pass
'platform_conf': {
'platform': 'whatever',
'hello': 'world',
},
# fail: key hello violates component platform_schema
'platform_conf 2': {
'platform': 'whatever',
'hello': 'there'
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
def test_validate_platform_config_4(self):
"""Test entity_namespace in PLATFORM_SCHEMA."""
component_schema = PLATFORM_SCHEMA_BASE
platform_schema = PLATFORM_SCHEMA
loader.set_component(
self.hass,
'platform_conf',
MockModule('platform_conf',
platform_schema_base=component_schema))
loader.set_component(
self.hass,
'platform_conf.whatever',
MockPlatform('whatever',
platform_schema=platform_schema))
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'platform_conf', {
'platform_conf': {
# pass: entity_namespace accepted by PLATFORM_SCHEMA
'platform': 'whatever',
'entity_namespace': 'yummy',
}
})
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('platform_conf')
def test_component_not_found(self):
"""setup_component should not crash if component doesn't exist."""
assert not setup.setup_component(self.hass, 'non_existing')
def test_component_not_double_initialized(self):
"""Test we do not set up a component twice."""
mock_setup = mock.MagicMock(return_value=True)
loader.set_component(
self.hass, 'comp', MockModule('comp', setup=mock_setup))
assert setup.setup_component(self.hass, 'comp')
assert mock_setup.called
mock_setup.reset_mock()
assert setup.setup_component(self.hass, 'comp')
assert not mock_setup.called
@mock.patch('homeassistant.util.package.install_package',
return_value=False)
def test_component_not_installed_if_requirement_fails(self, mock_install):
"""Component setup should fail if requirement can't install."""
self.hass.config.skip_pip = False
loader.set_component(
self.hass,
'comp', MockModule('comp', requirements=['package==0.0.1']))
assert not setup.setup_component(self.hass, 'comp')
assert 'comp' not in self.hass.config.components
def test_component_not_setup_twice_if_loaded_during_other_setup(self):
"""Test component setup while waiting for lock is not set up twice."""
result = []
@asyncio.coroutine
def async_setup(hass, config):
"""Tracking Setup."""
result.append(1)
loader.set_component(
self.hass,
'comp', MockModule('comp', async_setup=async_setup))
def setup_component():
"""Set up the component."""
setup.setup_component(self.hass, 'comp')
thread = threading.Thread(target=setup_component)
thread.start()
setup.setup_component(self.hass, 'comp')
thread.join()
assert len(result) == 1
def test_component_not_setup_missing_dependencies(self):
"""Test we do not set up a component if not all dependencies loaded."""
deps = ['non_existing']
loader.set_component(
self.hass, 'comp', MockModule('comp', dependencies=deps))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
loader.set_component(
self.hass, 'non_existing', MockModule('non_existing'))
assert setup.setup_component(self.hass, 'comp', {})
def test_component_failing_setup(self):
"""Test component that fails setup."""
loader.set_component(
self.hass, 'comp',
MockModule('comp', setup=lambda hass, config: False))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_exception_setup(self):
"""Test component that raises exception during setup."""
def exception_setup(hass, config):
"""Raise exception."""
raise Exception('fail!')
loader.set_component(
self.hass, 'comp', MockModule('comp', setup=exception_setup))
assert not setup.setup_component(self.hass, 'comp', {})
assert 'comp' not in self.hass.config.components
def test_component_setup_with_validation_and_dependency(self):
"""Test all config is passed to dependencies."""
def config_check_setup(hass, config):
"""Test that config is passed in."""
if config.get('comp_a', {}).get('valid', False):
return True
raise Exception('Config not passed in: {}'.format(config))
loader.set_component(
self.hass, 'comp_a',
MockModule('comp_a', setup=config_check_setup))
loader.set_component(
self.hass, 'switch.platform_a', MockPlatform('comp_b', ['comp_a']))
setup.setup_component(self.hass, 'switch', {
'comp_a': {
'valid': True
},
'switch': {
'platform': 'platform_a',
}
})
assert 'comp_a' in self.hass.config.components
def test_platform_specific_config_validation(self):
"""Test platform that specifies config."""
platform_schema = PLATFORM_SCHEMA.extend({
'valid': True,
}, extra=vol.PREVENT_EXTRA)
mock_setup = mock.MagicMock(spec_set=True)
loader.set_component(
self.hass,
'switch.platform_a',
MockPlatform(platform_schema=platform_schema,
setup_platform=mock_setup))
with assert_setup_component(0, 'switch'):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'invalid': True
}
})
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('switch')
with assert_setup_component(0):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True,
'invalid_extra': True,
}
})
assert mock_setup.call_count == 0
self.hass.data.pop(setup.DATA_SETUP)
self.hass.config.components.remove('switch')
with assert_setup_component(1):
assert setup.setup_component(self.hass, 'switch', {
'switch': {
'platform': 'platform_a',
'valid': True
}
})
assert mock_setup.call_count == 1
def test_disable_component_if_invalid_return(self):
"""Test disabling component if invalid return."""
loader.set_component(
self.hass,
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: None))
assert not setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component(self.hass, 'disabled_component') is None
assert 'disabled_component' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
loader.set_component(
self.hass,
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: False))
assert not setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component(
self.hass, 'disabled_component') is not None
assert 'disabled_component' not in self.hass.config.components
self.hass.data.pop(setup.DATA_SETUP)
loader.set_component(
self.hass,
'disabled_component',
MockModule('disabled_component', setup=lambda hass, config: True))
assert setup.setup_component(self.hass, 'disabled_component')
assert loader.get_component(
self.hass, 'disabled_component') is not None
assert 'disabled_component' in self.hass.config.components
def test_all_work_done_before_start(self):
"""Test all init work done till start."""
call_order = []
def component1_setup(hass, config):
"""Set up mock component."""
discovery.discover(hass, 'test_component2',
component='test_component2')
discovery.discover(hass, 'test_component3',
component='test_component3')
return True
def component_track_setup(hass, config):
"""Set up mock component."""
call_order.append(1)
return True
loader.set_component(
self.hass,
'test_component1',
MockModule('test_component1', setup=component1_setup))
loader.set_component(
self.hass,
'test_component2',
MockModule('test_component2', setup=component_track_setup))
loader.set_component(
self.hass,
'test_component3',
MockModule('test_component3', setup=component_track_setup))
@callback
def track_start(event):
"""Track start event."""
call_order.append(2)
self.hass.bus.listen_once(EVENT_HOMEASSISTANT_START, track_start)
self.hass.add_job(setup.async_setup_component(
self.hass, 'test_component1', {}))
self.hass.block_till_done()
self.hass.start()
assert call_order == [1, 1, 2]
@asyncio.coroutine
def test_component_cannot_depend_config(hass):
"""Test config is not allowed to be a dependency."""
result = yield from setup._async_process_dependencies(
hass, None, 'test', ['config'])
assert not result
@asyncio.coroutine
def test_component_warn_slow_setup(hass):
"""Warn we log when a component setup takes a long time."""
loader.set_component(
hass, 'test_component1', MockModule('test_component1'))
with mock.patch.object(hass.loop, 'call_later', mock.MagicMock()) \
as mock_call:
result = yield from setup.async_setup_component(
hass, 'test_component1', {})
assert result
assert mock_call.called
assert len(mock_call.mock_calls) == 3
timeout, logger_method = mock_call.mock_calls[0][1][:2]
assert timeout == setup.SLOW_SETUP_WARNING
assert logger_method == setup._LOGGER.warning
assert mock_call().cancel.called
@asyncio.coroutine
def test_platform_no_warn_slow(hass):
"""Do not warn for long entity setup time."""
loader.set_component(
hass, 'test_component1',
MockModule('test_component1', platform_schema=PLATFORM_SCHEMA))
with mock.patch.object(hass.loop, 'call_later', mock.MagicMock()) \
as mock_call:
result = yield from setup.async_setup_component(
hass, 'test_component1', {})
assert result
assert not mock_call.called
async def test_when_setup_already_loaded(hass):
"""Test when setup."""
calls = []
async def mock_callback(hass, component):
"""Mock callback."""
calls.append(component)
setup.async_when_setup(hass, 'test', mock_callback)
await hass.async_block_till_done()
assert calls == []
hass.config.components.add('test')
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {
'component': 'test'
})
await hass.async_block_till_done()
assert calls == ['test']
# Event listener should be gone
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {
'component': 'test'
})
await hass.async_block_till_done()
assert calls == ['test']
# Should be called right away
setup.async_when_setup(hass, 'test', mock_callback)
await hass.async_block_till_done()
assert calls == ['test', 'test']
|
logging.py
|
"""Logging utilities."""
import asyncio
from asyncio.events import AbstractEventLoop
from functools import partial, wraps
import inspect
import logging
import threading
import traceback
from typing import Any, Callable, Optional
from .async_ import run_coroutine_threadsafe
class HideSensitiveDataFilter(logging.Filter):
"""Filter API password calls."""
def __init__(self, text: str) -> None:
"""Initialize sensitive data filter."""
super().__init__()
self.text = text
def filter(self, record: logging.LogRecord) -> bool:
"""Hide sensitive data in messages."""
record.msg = record.msg.replace(self.text, '*******')
return True
# pylint: disable=invalid-name
class AsyncHandler:
"""Logging handler wrapper to add an async layer."""
def __init__(
self, loop: AbstractEventLoop, handler: logging.Handler) -> None:
"""Initialize async logging handler wrapper."""
self.handler = handler
self.loop = loop
self._queue = asyncio.Queue(loop=loop) # type: asyncio.Queue
self._thread = threading.Thread(target=self._process)
# Delegate from handler
self.setLevel = handler.setLevel
self.setFormatter = handler.setFormatter
self.addFilter = handler.addFilter
self.removeFilter = handler.removeFilter
self.filter = handler.filter
self.flush = handler.flush
self.handle = handler.handle
self.handleError = handler.handleError
self.format = handler.format
self._thread.start()
def close(self) -> None:
"""Wrap close to handler."""
self.emit(None)
async def async_close(self, blocking: bool = False) -> None:
"""Close the handler.
When blocking=True, will wait till closed.
"""
await self._queue.put(None)
if blocking:
while self._thread.is_alive():
await asyncio.sleep(0, loop=self.loop)
def emit(self, record: Optional[logging.LogRecord]) -> None:
"""Process a record."""
ident = self.loop.__dict__.get("_thread_ident")
# inside eventloop
if ident is not None and ident == threading.get_ident():
self._queue.put_nowait(record)
# from a thread/executor
else:
self.loop.call_soon_threadsafe(self._queue.put_nowait, record)
def __repr__(self) -> str:
"""Return the string names."""
return str(self.handler)
def _process(self) -> None:
"""Process log in a thread."""
while True:
record = run_coroutine_threadsafe(
self._queue.get(), self.loop).result()
if record is None:
self.handler.close()
return
self.handler.emit(record)
def createLock(self) -> None:
"""Ignore lock stuff."""
pass
def acquire(self) -> None:
"""Ignore lock stuff."""
pass
def release(self) -> None:
"""Ignore lock stuff."""
pass
@property
def level(self) -> int:
"""Wrap property level to handler."""
return self.handler.level
@property
def formatter(self) -> Optional[logging.Formatter]:
"""Wrap property formatter to handler."""
return self.handler.formatter
@property
def name(self) -> str:
"""Wrap property set_name to handler."""
return self.handler.get_name() # type: ignore
@name.setter
def name(self, name: str) -> None:
"""Wrap property get_name to handler."""
self.handler.set_name(name) # type: ignore
def catch_log_exception(
func: Callable[..., Any],
format_err: Callable[..., Any],
*args: Any) -> Callable[[], None]:
"""Decorate an callback to catch and log exceptions."""
def log_exception(*args: Any) -> None:
module_name = inspect.getmodule(inspect.trace()[1][0]).__name__
# Do not print the wrapper in the traceback
frames = len(inspect.trace()) - 1
exc_msg = traceback.format_exc(-frames)
friendly_msg = format_err(*args)
logging.getLogger(module_name).error('%s\n%s', friendly_msg, exc_msg)
# Check for partials to properly determine if coroutine function
check_func = func
while isinstance(check_func, partial):
check_func = check_func.func
wrapper_func = None
if asyncio.iscoroutinefunction(check_func):
@wraps(func)
async def async_wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
await func(*args)
except Exception: # pylint: disable=broad-except
log_exception(*args)
wrapper_func = async_wrapper
else:
@wraps(func)
def wrapper(*args: Any) -> None:
"""Catch and log exception."""
try:
func(*args)
except Exception: # pylint: disable=broad-except
log_exception(*args)
wrapper_func = wrapper
return wrapper_func
|
gstreamer_rtsp_server.py
|
'''
* Copyright (C) 2019-2020 Intel Corporation.
*
* SPDX-License-Identifier: BSD-3-Clause
'''
from threading import Thread
from collections import namedtuple
import gi
gi.require_version('GstRtspServer', '1.0')
gi.require_version('Gst', '1.0')
# pylint: disable=wrong-import-position
from gi.repository import Gst, GstRtspServer, GLib
from vaserving.common.utils import logging
from vaserving.rtsp.gstreamer_rtsp_factory import GStreamerRtspFactory
# pylint: enable=wrong-import-position
Stream = namedtuple('stream', ['source', 'caps'])
Stream.__new__.__defaults__ = (None, None, None)
class GStreamerRtspServer():
def __init__(self, port):
self._logger = logging.get_logger('GSTRtspServer', is_static=True)
Gst.init(None)
self._stopped = False
self._port = port
self._server = GstRtspServer.RTSPServer()
self._server.set_service(str(port))
self._context = None
self._mainloop = None
self._mount_points = self._server.get_mount_points()
self._streams = {}
self._thread = None
self._factory = GStreamerRtspFactory(self)
self._factory.set_shared(True)
def check_if_path_exists(self, rtsp_path):
if rtsp_path in self._streams:
self._logger.error("RTSP Stream at {} already exists, use different path".format(rtsp_path))
raise Exception("RTSP Stream at {} already exists, use different path".format(rtsp_path))
def add_stream(self, identifier, rtsp_path, caps, source):
self._streams[rtsp_path] = Stream(source, caps)
self._mount_points.add_factory(rtsp_path, self._factory)
url = "rtsp:://<host ip>:{}{}".format(self._port, rtsp_path)
self._logger.info("Created RTSP Stream for instance {} at {}".format(identifier, url))
def remove_stream(self, rtsp_path):
self._logger.debug(
"Removing RTSP Stream: {}".format(rtsp_path))
if rtsp_path in self._streams:
self._mount_points.remove_factory(rtsp_path)
del self._streams[rtsp_path]
def get_source(self, rtsp_path):
if rtsp_path in self._streams:
return self._streams[rtsp_path]
return None
def _loop(self):
try:
self._mainloop.run()
except (KeyboardInterrupt, SystemExit):
pass
self._logger.debug("Exiting RTSP Main loop")
def start(self):
self._context = GLib.MainContext()
self._server.attach(self._context)
self._mainloop = GLib.MainLoop.new(self._context, False)
self._thread = Thread(target=self._loop, daemon=True)
self._thread.start()
self._logger.info("Gstreamer RTSP Server Started on port: {}".format(self._port))
def stop(self):
if not self._stopped:
self._stopped = True
self._logger.info("Stopping Gstreamer RTSP Server")
for rtsp_path in list(self._streams):
self.remove_stream(rtsp_path)
self._factory = None
self._streams = None
if self._mainloop:
self._mainloop.quit()
if self._thread:
self._thread.join()
self._mainloop = None
self._thread = None
if self._context:
self._context.unref()
self._context = None
self._server = None
self._logger.debug("Gstreamer RTSP Server Stopped")
|
test_StandardDiskBenchmark.py
|
# -*- coding: utf-8 -*-
import threading
from pip_benchmark_python.standardbenchmarks.StandardDiskBenchmark import StandardDiskBenchmark
class TestStandardDiskBenchmark:
benchmark = None
def callback(self, arg=None):
print('Is Done!')
def setup_method(self):
self.benchmark = StandardDiskBenchmark()
self.benchmark.set_up(None)
def teardown_method(self):
self.benchmark.tear_down(self.callback)
def test_execute(self):
threads = []
active_threads = threading.active_count()
for i in range(100):
threads.append(threading.Thread(
target=lambda: self.benchmark.execute(None),
))
threads[-1].start()
for th in threads: # waiting for all threads
th.join()
assert active_threads == threading.active_count()
|
test_thread_local.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import threading
import numpy as np
import mxnet as mx
from mxnet import context, attribute
from mxnet.context import Context
from mxnet.attribute import AttrScope
from mxnet.test_utils import assert_almost_equal, set_default_context
from mxnet.util import _NumpyArrayScope, set_np_shape
def test_context():
ctx_list = []
ctx_list.append(context.current_context())
def f():
set_default_context(mx.gpu(11))
ctx_list.append(context.current_context())
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert Context.devtype2str[ctx_list[0].device_typeid] == "cpu"
assert ctx_list[0].device_id == 0
assert Context.devtype2str[ctx_list[1].device_typeid] == "gpu"
assert ctx_list[1].device_id == 11
e1 = threading.Event()
e2 = threading.Event()
status = [False]
def g():
with mx.cpu(10):
e2.set()
e1.wait()
if context.current_context().device_id == 10:
status[0] = True
thread = threading.Thread(target=g)
thread.start()
e2.wait()
with Context("cpu", 11):
e1.set()
thread.join()
e1.clear()
e2.clear()
assert status[0], "Spawned thread didn't set the correct context"
def test_attrscope():
attrscope_list = []
with AttrScope(y="hi", z="hey") as attrscope:
attrscope_list.append(attrscope)
def f():
with AttrScope(x="hello") as attrscope:
attrscope_list.append(attrscope)
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert len(attrscope_list[0]._attr) == 2
assert attrscope_list[1]._attr["x"] == "hello"
e1 = threading.Event()
e2 = threading.Event()
status = [False]
def g():
with mx.AttrScope(x="hello"):
e2.set()
e1.wait()
if "hello" in mx.attribute.current()._attr.values():
status[0] = True
thread = threading.Thread(target=g)
thread.start()
e2.wait()
with AttrScope(x="hi"):
e1.set()
thread.join()
e1.clear()
e2.clear()
assert status[0], "Spawned thread didn't set the correct attr key values"
def test_name():
name_list = []
name_manager = mx.name.current()
name_manager.get(None, "main_thread")
name_list.append(name_manager)
def f():
with mx.name.NameManager():
name_manager = mx.name.current()
name_manager.get(None, "spawned_thread")
name_list.append(name_manager)
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert "main_thread" in name_list[0]._counter, "cannot find the string `main thread` in name_list[0]._counter"
assert "spawned_thread" in name_list[1]._counter, "cannot find the string `spawned thread` in name_list[1]._counter"
e1 = threading.Event()
e2 = threading.Event()
status = [False]
def g():
with mx.name.NameManager():
e2.set()
e1.wait()
if "main_thread" not in mx.name.current()._counter:
status[0] = True
thread = threading.Thread(target=g)
thread.start()
e2.wait()
with mx.name.NameManager():
mx.name.current().get(None, "main_thread")
e1.set()
thread.join()
e1.clear()
e2.clear()
assert status[0], "Spawned thread isn't using thread local NameManager"
def test_blockscope():
class dummy_block:
pass
blockscope_list = []
status = [False]
event = threading.Event()
def f():
net = dummy_block() # BlockScope only keeps a weakref to the Block
with mx.gluon.block._block_scope(net):
x = mx.name.current().get(None, "hello")
event.wait()
if x == "dummy_block_hello0":
status[0] = True
thread = threading.Thread(target=f)
thread.start()
event.set()
thread.join()
event.clear()
assert status[0], "Spawned thread isn't using the correct blockscope namemanager"
def test_createblock():
status = [False]
def f():
net = mx.gluon.nn.Dense(2)
net.initialize()
x = net(mx.np.array([1, 2, 3]))
x.wait_to_read()
status[0] = True
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert status[0], "Failed to create a layer within a thread"
def test_symbol():
status = [False]
def f():
a = mx.sym.var("a")
b = mx.sym.var("b")
a_ = mx.nd.ones((2, 2))
c_ = a_.copy()
func1 = (a + b)._bind(mx.cpu(), args={'a': a_, 'b': c_})
func1.forward()[0].wait_to_read()
status[0] = True
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert status[0], "Failed to execute a symbolic graph within a thread"
def test_np_array_scope():
np_array_scope_list = []
_NumpyArrayScope._current = _NumpyArrayScope(False)
np_array_scope_list.append(_NumpyArrayScope._current)
def f():
_NumpyArrayScope._current = _NumpyArrayScope(True)
np_array_scope_list.append(_NumpyArrayScope._current)
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert len(np_array_scope_list) == 2
assert not np_array_scope_list[0]._is_np_array
assert np_array_scope_list[1]._is_np_array
event = threading.Event()
status = [False]
def g():
with mx.np_array(False):
event.wait()
if not mx.is_np_array():
status[0] = True
thread = threading.Thread(target=g)
thread.start()
_NumpyArrayScope._current = _NumpyArrayScope(True)
event.set()
thread.join()
event.clear()
assert status[0], "Spawned thread didn't set status correctly"
def test_np_global_shape():
prev_np_shape = set_np_shape(2)
data = []
def f():
# scalar
data.append(mx.np.ones(shape=()))
# zero-dim
data.append(mx.np.ones(shape=(0, 1, 2)))
try:
thread = threading.Thread(target=f)
thread.start()
thread.join()
assert_almost_equal(data[0].asnumpy(), np.ones(shape=()))
assert_almost_equal(data[1].asnumpy(), np.ones(shape=(0, 1, 2)))
finally:
set_np_shape(prev_np_shape)
|
handler.py
|
import json
import threading
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from bot.core.bot_service import InstagramBot
from handlers.redis_handler import RedisQueue
class BotHandler:
bot_session = {}
def read_queue(self):
while True:
queue = RedisQueue('bot_queue')
size = queue.qsize()
if size > 0:
credentials = json.loads(queue.get(block=True))
thread = threading.Thread(target=self.handle_credentials, args=(credentials,))
thread.daemon = True
thread.start()
def handle_credentials(self, credentials):
if credentials['command'] == "START":
self.handle_start_bot(credentials)
elif credentials['command'] == "STOP":
self.handle_stop_bot(credentials)
def handle_stop_bot(self, credentials):
bot = self.get_or_create_bot(credentials['username'], credentials['password'])
if bot:
bot.stop_bot()
del self.bot_session[credentials['username']]
def handle_start_bot(self, credentials):
bot = self.bot_session.get(credentials['username'], None)
if not bot:
bot = self.get_or_create_bot(credentials['username'], credentials['password'])
if bot:
self.send_to_socket(credentials['username'], "Starting bot")
bot.run_bot()
else:
self.send_to_socket(credentials['username'], "Bot already running")
def get_or_create_bot(self, username, password):
if self.bot_session.get(username, None):
return self.bot_session[username]
try:
bot = InstagramBot(username, password)
if bot.login_status:
self.bot_session[username] = bot
return bot
return None
except Exception as e:
print(e)
def send_to_socket(self, username, message):
layer = get_channel_layer()
async_to_sync(layer.group_send)('log_' + username,
{
'type': 'log_message',
'message': message
})
|
wdt_bad_server_test.py
|
#!/usr/bin/env python3
# In this test, a dummy python server is created which reads everything send to
# it, but does not write anything back. When a WDT sender connects to it, it
# should detect that the other side is making no progress and return with
# NO_PROGRESS status.
import socket
import threading
from common_utils import *
def start_server():
global bad_socket
bad_socket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
bad_socket.settimeout(0.5) # timeout of 500ms
bad_socket.bind(("", 0))
bad_socket.listen(5)
def start_server_thread():
print("Before accept")
(conn, addr) = bad_socket.accept()
print("Connected with {0}:{1}".format(addr[0], addr[1]))
while True:
if stop:
break
try:
conn.recv(1024)
# sleep for 1 ms. This limits the rate to 1MBytes/sec or less
time.sleep(0.001)
except Exception as e:
print(e)
print("server thread stopped")
bad_socket.close()
create_test_directory("/tmp")
# create 5mb random files
generate_random_files(5 * 1024 * 1024)
start_server()
port = bad_socket.getsockname()[1]
print("Started fake/bad server at port {0}".format(port))
stop = False
server_thread = threading.Thread(target=start_server_thread)
server_thread.start()
read_timeout = 500
# start a wdt sender
sender_status = run_sender(
"-read_timeout_millis={0}".format(read_timeout),
"wdt://[::1]:{0}?num_ports=1".format(port)
)
stop = True
server_thread.join()
if sender_status != 24:
print(
"Sender should exit with code NO_PROGRESS(24), but it exited "
"with {0}. Logs at {1}".format(sender_status, root_dir)
)
exit(1)
else:
good_run()
|
produceConsume.py
|
import threading
import random
import time
fruit = []
def produce():
global fruit
while True:
try:
tempfruit = random.randint(0, 100)
fruit.append(tempfruit)
print("I made fruit #{0}".format(tempfruit))
time.sleep(random.uniform(0.000000000000000000000000000000000000000000001, 0.05))
except:
print(len(fruit))
def consume():
global fruit
while True:
try:
if len(fruit) > 0:
tempfruit = fruit.pop(0)
print("I ate fruit #{0}".format(tempfruit))
time.sleep(random.uniform(0.0000001, 0.1))
except:
print(len(fruit))
t1 = threading.Thread(target=produce)
t2 = threading.Thread(target=consume)
t3 = threading.Thread(target=consume)
t1.start()
t2.start()
t3.start()
print("The threads should now be running.")
t1.join()
t2.join()
t3.join()
print("The threads should now be done")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.